Revision da4a52a3 lib/cmdlib/instance.py

b/lib/cmdlib/instance.py
48 48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52 52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53 53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54 54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
......
392 392

  
393 393
    # instance name verification
394 394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
395
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname.name
397 397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
398
      self.check_ip = self.hostname.ip
399 399
    else:
400 400
      self.check_ip = None
401 401

  
......
503 503

  
504 504
    # this is just a preventive check, but someone might still add this
505 505
    # instance in the meantime, and creation will fail at lock-add time
506
    if self.op.instance_name in self.cfg.GetInstanceList():
506
    if self.op.instance_name in\
507
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
507 508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 509
                                 self.op.instance_name, errors.ECODE_EXISTS)
509 510

  
......
1188 1189
    else:
1189 1190
      network_port = None
1190 1191

  
1192
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1193

  
1191 1194
    # This is ugly but we got a chicken-egg problem here
1192 1195
    # We can only take the group disk parameters, as the instance
1193 1196
    # has no disks yet (we are generating them right here).
1194 1197
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1195 1198
    disks = GenerateDiskTemplate(self,
1196 1199
                                 self.op.disk_template,
1197
                                 self.op.instance_name, self.pnode.uuid,
1200
                                 instance_uuid, self.pnode.uuid,
1198 1201
                                 self.secondaries,
1199 1202
                                 self.disks,
1200 1203
                                 self.instance_file_storage_dir,
......
1203 1206
                                 feedback_fn,
1204 1207
                                 self.cfg.GetGroupDiskParams(nodegroup))
1205 1208

  
1206
    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1209
    iobj = objects.Instance(name=self.op.instance_name,
1210
                            uuid=instance_uuid,
1211
                            os=self.op.os_type,
1207 1212
                            primary_node=self.pnode.uuid,
1208 1213
                            nics=self.nics, disks=disks,
1209 1214
                            disk_template=self.op.disk_template,
......
1281 1286

  
1282 1287
    if disk_abort:
1283 1288
      RemoveDisks(self, iobj)
1284
      self.cfg.RemoveInstance(iobj.name)
1289
      self.cfg.RemoveInstance(iobj.uuid)
1285 1290
      # Make sure the instance lock gets removed
1286 1291
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1287 1292
      raise errors.OpExecError("There are some degraded disks for"
......
1455 1460
    This checks that the instance is in the cluster and is not running.
1456 1461

  
1457 1462
    """
1458
    self.op.instance_name = ExpandInstanceName(self.cfg,
1459
                                               self.op.instance_name)
1460
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1463
    (self.op.instance_uuid, self.op.instance_name) = \
1464
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1465
                                self.op.instance_name)
1466
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1461 1467
    assert instance is not None
1462 1468
    CheckNodeOnline(self, instance.primary_node)
1463 1469
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
......
1474 1480
                                   (hostname.ip, new_name),
1475 1481
                                   errors.ECODE_NOTUNIQUE)
1476 1482

  
1477
    instance_list = self.cfg.GetInstanceList()
1478
    if new_name in instance_list and new_name != instance.name:
1483
    instance_names = [inst.name for
1484
                      inst in self.cfg.GetAllInstancesInfo().values()]
1485
    if new_name in instance_names and new_name != instance.name:
1479 1486
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1480 1487
                                 new_name, errors.ECODE_EXISTS)
1481 1488

  
......
1492 1499
                               self.instance.disks[0].logical_id[1])
1493 1500
      rename_file_storage = True
1494 1501

  
1495
    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1502
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1496 1503
    # Change the instance lock. This is definitely safe while we hold the BGL.
1497 1504
    # Otherwise the new lock would have to be added in acquired mode.
1498 1505
    assert self.REQ_BGL
......
1501 1508
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1502 1509

  
1503 1510
    # re-read the instance from the configuration after rename
1504
    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1511
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1505 1512

  
1506 1513
    if rename_file_storage:
1507 1514
      new_file_storage_dir = os.path.dirname(
......
1584 1591
    This checks that the instance is in the cluster.
1585 1592

  
1586 1593
    """
1587
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1594
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1588 1595
    assert self.instance is not None, \
1589 1596
      "Cannot retrieve locked instance %s" % self.op.instance_name
1590 1597

  
......
1670 1677
    This checks that the instance is in the cluster.
1671 1678

  
1672 1679
    """
1673
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1680
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1674 1681
    assert self.instance is not None, \
1675 1682
      "Cannot retrieve locked instance %s" % self.op.instance_name
1676 1683

  
......
1752 1759
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1753 1760
    except errors.OpExecError:
1754 1761
      self.LogWarning("Device creation failed")
1755
      self.cfg.ReleaseDRBDMinors(self.instance.name)
1762
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1756 1763
      raise
1757 1764

  
1758 1765
    cluster_name = self.cfg.GetClusterInfo().cluster_name
......
1785 1792
      try:
1786 1793
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1787 1794
      finally:
1788
        self.cfg.ReleaseDRBDMinors(self.instance.name)
1795
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1789 1796
        raise errors.OpExecError("Errors during disk copy: %s" %
1790 1797
                                 (",".join(errs),))
1791 1798

  
......
2376 2383
      # Acquire locks for the instance's nodegroups optimistically. Needs
2377 2384
      # to be verified in CheckPrereq
2378 2385
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2379
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2386
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2380 2387
    elif level == locking.LEVEL_NODE:
2381 2388
      self._LockInstancesNodes()
2382 2389
      if self.op.disk_template and self.op.remote_node:
......
2714 2721

  
2715 2722
    """
2716 2723
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2717
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2724
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2718 2725
    self.cluster = self.cfg.GetClusterInfo()
2719 2726

  
2720 2727
    assert self.instance is not None, \
......
3031 3038
                  constants.IDISK_NAME: d.name}
3032 3039
                 for d in self.instance.disks]
3033 3040
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3034
                                     self.instance.name, pnode_uuid,
3041
                                     self.instance.uuid, pnode_uuid,
3035 3042
                                     [snode_uuid], disk_info, None, None, 0,
3036 3043
                                     feedback_fn, self.diskparams)
3037 3044
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
......
3160 3167

  
3161 3168
    disk = \
3162 3169
      GenerateDiskTemplate(self, self.instance.disk_template,
3163
                           self.instance.name, self.instance.primary_node,
3170
                           self.instance.uuid, self.instance.primary_node,
3164 3171
                           self.instance.secondary_nodes, [params], file_path,
3165 3172
                           file_driver, idx, self.Log, self.diskparams)[0]
3166 3173

  
......
3314 3321
      try:
3315 3322
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3316 3323
      except:
3317
        self.cfg.ReleaseDRBDMinors(self.instance.name)
3324
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3318 3325
        raise
3319 3326
      result.append(("disk_template", self.op.disk_template))
3320 3327

  
......
3359 3366
      pass
3360 3367
    elif self.op.offline:
3361 3368
      # Mark instance as offline
3362
      self.cfg.MarkInstanceOffline(self.instance.name)
3369
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3363 3370
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3364 3371
    else:
3365 3372
      # Mark instance as online, but stopped
3366
      self.cfg.MarkInstanceDown(self.instance.name)
3373
      self.cfg.MarkInstanceDown(self.instance.uuid)
3367 3374
      result.append(("admin_state", constants.ADMINST_DOWN))
3368 3375

  
3369 3376
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
......
3413 3420

  
3414 3421
        # Lock all groups used by instance optimistically; this requires going
3415 3422
        # via the node before it's locked, requiring verification later on
3416
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3423
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3417 3424
        lock_groups.update(instance_groups)
3418 3425
      else:
3419 3426
        # No target groups, need to lock all of them
......
3429 3436

  
3430 3437
        # Lock all nodes in all potential target groups
3431 3438
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3432
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3439
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3433 3440
        member_nodes = [node_uuid
3434 3441
                        for group in lock_groups
3435 3442
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
......
3439 3446
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3440 3447

  
3441 3448
  def CheckPrereq(self):
3442
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3449
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3443 3450
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3444 3451
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3445 3452

  
3446 3453
    assert (self.req_target_uuids is None or
3447 3454
            owned_groups.issuperset(self.req_target_uuids))
3448
    assert owned_instances == set([self.op.instance_name])
3455
    assert owned_instance_names == set([self.op.instance_name])
3449 3456

  
3450 3457
    # Get instance information
3451
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3458
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3452 3459

  
3453 3460
    # Check if node groups for locked instance are still correct
3454 3461
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3455 3462
      ("Instance %s's nodes changed while we kept the lock" %
3456 3463
       self.op.instance_name)
3457 3464

  
3458
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3465
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3459 3466
                                          owned_groups)
3460 3467

  
3461 3468
    if self.req_target_uuids:

Also available in: Unified diff