Revision a57e502a lib/cmdlib/instance_storage.py

b/lib/cmdlib/instance_storage.py
79 79
  @param excl_stor: Whether exclusive_storage is active for the node
80 80

  
81 81
  """
82
  lu.cfg.SetDiskID(device, node_uuid)
83 82
  result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
84 83
                                       device.size, instance.name, force_open,
85 84
                                       info, excl_stor)
......
87 86
               " node %s for instance %s" % (device,
88 87
                                             lu.cfg.GetNodeName(node_uuid),
89 88
                                             instance.name))
90
  if device.physical_id is None:
91
    device.physical_id = result.payload
92 89

  
93 90

  
94 91
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
......
198 195

  
199 196
  """
200 197
  for (node_uuid, disk) in disks_created:
201
    lu.cfg.SetDiskID(disk, node_uuid)
202 198
    result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
203 199
    result.Warn("Failed to remove newly-created disk %s on node %s" %
204 200
                (disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
......
1016 1012
    disks = [(idx, disk, 0)
1017 1013
             for (idx, disk) in enumerate(instance.disks)]
1018 1014

  
1019
  for (_, device, _) in disks:
1020
    lu.cfg.SetDiskID(device, node_uuid)
1021

  
1022 1015
  logging.info("Pausing synchronization of disks of instance '%s'",
1023 1016
               instance.name)
1024 1017
  result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
......
1150 1143
  node_uuid = instance.primary_node
1151 1144
  node_name = lu.cfg.GetNodeName(node_uuid)
1152 1145

  
1153
  for dev in disks:
1154
    lu.cfg.SetDiskID(dev, node_uuid)
1155

  
1156 1146
  # TODO: Convert to utils.Retry
1157 1147

  
1158 1148
  retries = 0
......
1227 1217

  
1228 1218
  for disk in disks:
1229 1219
    for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
1230
      lu.cfg.SetDiskID(top_disk, node_uuid)
1231 1220
      result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
1232 1221
      msg = result.fail_msg
1233 1222
      if msg:
......
1298 1287
      if ignore_size:
1299 1288
        node_disk = node_disk.Copy()
1300 1289
        node_disk.UnsetSize()
1301
      lu.cfg.SetDiskID(node_disk, node_uuid)
1302 1290
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1303 1291
                                             instance.name, False, idx)
1304 1292
      msg = result.fail_msg
......
1324 1312
      if ignore_size:
1325 1313
        node_disk = node_disk.Copy()
1326 1314
        node_disk.UnsetSize()
1327
      lu.cfg.SetDiskID(node_disk, node_uuid)
1328 1315
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1329 1316
                                             instance.name, True, idx)
1330 1317
      msg = result.fail_msg
......
1339 1326
    device_info.append((lu.cfg.GetNodeName(instance.primary_node),
1340 1327
                        inst_disk.iv_name, dev_path))
1341 1328

  
1342
  # leave the disks configured for the primary node
1343
  # this is a workaround that would be fixed better by
1344
  # improving the logical/physical id handling
1345
  for disk in disks:
1346
    lu.cfg.SetDiskID(disk, instance.primary_node)
1347

  
1348 1329
  if not disks_ok:
1349 1330
    lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1350 1331

  
......
1481 1462

  
1482 1463
    # First run all grow ops in dry-run mode
1483 1464
    for node_uuid in self.instance.all_nodes:
1484
      self.cfg.SetDiskID(self.disk, node_uuid)
1485 1465
      result = self.rpc.call_blockdev_grow(node_uuid,
1486 1466
                                           (self.disk, self.instance),
1487 1467
                                           self.delta, True, True,
......
1491 1471

  
1492 1472
    if wipe_disks:
1493 1473
      # Get disk size from primary node for wiping
1494
      self.cfg.SetDiskID(self.disk, self.instance.primary_node)
1495 1474
      result = self.rpc.call_blockdev_getdimensions(
1496 1475
                 self.instance.primary_node, ([self.disk], self.instance))
1497 1476
      result.Raise("Failed to retrieve disk size from node '%s'" %
......
1515 1494
    # We know that (as far as we can test) operations across different
1516 1495
    # nodes will succeed, time to run it for real on the backing storage
1517 1496
    for node_uuid in self.instance.all_nodes:
1518
      self.cfg.SetDiskID(self.disk, node_uuid)
1519 1497
      result = self.rpc.call_blockdev_grow(node_uuid,
1520 1498
                                           (self.disk, self.instance),
1521 1499
                                           self.delta, False, True,
......
1525 1503

  
1526 1504
    # And now execute it for logical storage, on the primary node
1527 1505
    node_uuid = self.instance.primary_node
1528
    self.cfg.SetDiskID(self.disk, node_uuid)
1529 1506
    result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
1530 1507
                                         self.delta, False, False,
1531 1508
                                         self.node_es_flags[node_uuid])
......
1798 1775
  the device(s)) to the ldisk (representing the local storage status).
1799 1776

  
1800 1777
  """
1801
  lu.cfg.SetDiskID(dev, node_uuid)
1802

  
1803 1778
  result = True
1804 1779

  
1805 1780
  if on_primary or dev.AssembleOnSecondary():
......
1943 1918
      for node_uuid in node_uuids:
1944 1919
        self.lu.LogInfo("Checking disk/%d on %s", idx,
1945 1920
                        self.cfg.GetNodeName(node_uuid))
1946
        self.cfg.SetDiskID(dev, node_uuid)
1947 1921

  
1948 1922
        result = _BlockdevFind(self, node_uuid, dev, instance)
1949 1923

  
......
2203 2177
      for node_uuid in node_uuids:
2204 2178
        self.lu.LogInfo("Checking disk/%d on %s", idx,
2205 2179
                        self.cfg.GetNodeName(node_uuid))
2206
        self.cfg.SetDiskID(dev, node_uuid)
2207 2180

  
2208 2181
        result = _BlockdevFind(self, node_uuid, dev, self.instance)
2209 2182

  
......
2246 2219
      self.lu.LogInfo("Adding storage on %s for disk/%d",
2247 2220
                      self.cfg.GetNodeName(node_uuid), idx)
2248 2221

  
2249
      self.cfg.SetDiskID(dev, node_uuid)
2250

  
2251 2222
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
2252 2223
      names = _GenerateUniqueNames(self.lu, lv_names)
2253 2224

  
......
2280 2251

  
2281 2252
  def _CheckDevices(self, node_uuid, iv_names):
2282 2253
    for name, (dev, _, _) in iv_names.iteritems():
2283
      self.cfg.SetDiskID(dev, node_uuid)
2284

  
2285 2254
      result = _BlockdevFind(self, node_uuid, dev, self.instance)
2286 2255

  
2287 2256
      msg = result.fail_msg
......
2299 2268
      self.lu.LogInfo("Remove logical volumes for %s", name)
2300 2269

  
2301 2270
      for lv in old_lvs:
2302
        self.cfg.SetDiskID(lv, node_uuid)
2303 2271
        msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
2304 2272
                .fail_msg
2305 2273
        if msg:
......
2362 2330
      # ok, we created the new LVs, so now we know we have the needed
2363 2331
      # storage; as such, we proceed on the target node to rename
2364 2332
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
2365
      # using the assumption that logical_id == physical_id (which in
2366
      # turn is the unique_id on that node)
2333
      # using the assumption that logical_id == unique_id on that node
2367 2334

  
2368 2335
      # FIXME(iustin): use a better name for the replaced LVs
2369 2336
      temp_suffix = int(time.time())
2370
      ren_fn = lambda d, suff: (d.physical_id[0],
2371
                                d.physical_id[1] + "_replaced-%s" % suff)
2337
      ren_fn = lambda d, suff: (d.logical_id[0],
2338
                                d.logical_id[1] + "_replaced-%s" % suff)
2372 2339

  
2373 2340
      # Build the rename list based on what LVs exist on the node
2374 2341
      rename_old_to_new = []
......
2387 2354

  
2388 2355
      # Now we rename the new LVs to the old LVs
2389 2356
      self.lu.LogInfo("Renaming the new LVs on the target node")
2390
      rename_new_to_old = [(new, old.physical_id)
2357
      rename_new_to_old = [(new, old.logical_id)
2391 2358
                           for old, new in zip(old_lvs, new_lvs)]
2392 2359
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2393 2360
                                             rename_new_to_old)
......
2553 2520
    # We have new devices, shutdown the drbd on the old secondary
2554 2521
    for idx, dev in enumerate(self.instance.disks):
2555 2522
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
2556
      self.cfg.SetDiskID(dev, self.target_node_uuid)
2557 2523
      msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
2558 2524
                                            (dev, self.instance)).fail_msg
2559 2525
      if msg:
......
2578 2544
    self.lu.LogInfo("Updating instance configuration")
2579 2545
    for dev, _, new_logical_id in iv_names.itervalues():
2580 2546
      dev.logical_id = new_logical_id
2581
      self.cfg.SetDiskID(dev, self.instance.primary_node)
2582 2547

  
2583 2548
    self.cfg.Update(self.instance, feedback_fn)
2584 2549

  

Also available in: Unified diff