Revision a57e502a
b/lib/backend.py | ||
---|---|---|
2851 | 2851 |
result["DISK_%d_BACKEND_TYPE" % idx] = "block" |
2852 | 2852 |
elif disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: |
2853 | 2853 |
result["DISK_%d_BACKEND_TYPE" % idx] = \ |
2854 |
"file:%s" % disk.physical_id[0]
|
|
2854 |
"file:%s" % disk.logical_id[0]
|
|
2855 | 2855 |
|
2856 | 2856 |
# NICs |
2857 | 2857 |
for idx, nic in enumerate(instance.nics): |
... | ... | |
3076 | 3076 |
config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count, |
3077 | 3077 |
("%s" % disk.iv_name)) |
3078 | 3078 |
config.set(constants.INISECT_INS, "disk%d_dump" % disk_count, |
3079 |
("%s" % disk.physical_id[1]))
|
|
3079 |
("%s" % disk.logical_id[1]))
|
|
3080 | 3080 |
config.set(constants.INISECT_INS, "disk%d_size" % disk_count, |
3081 | 3081 |
("%d" % disk.size)) |
3082 | 3082 |
|
... | ... | |
3159 | 3159 |
"""Rename a list of block devices. |
3160 | 3160 |
|
3161 | 3161 |
@type devlist: list of tuples |
3162 |
@param devlist: list of tuples of the form (disk, |
|
3163 |
new_logical_id, new_physical_id); disk is an |
|
3164 |
L{objects.Disk} object describing the current disk, |
|
3165 |
and new logical_id/physical_id is the name we |
|
3166 |
rename it to |
|
3162 |
@param devlist: list of tuples of the form (disk, new_unique_id); disk is |
|
3163 |
an L{objects.Disk} object describing the current disk, and new |
|
3164 |
unique_id is the name we rename it to |
|
3167 | 3165 |
@rtype: boolean |
3168 | 3166 |
@return: True if all renames succeeded, False otherwise |
3169 | 3167 |
|
b/lib/client/gnt_instance.py | ||
---|---|---|
942 | 942 |
return data |
943 | 943 |
|
944 | 944 |
|
945 |
def _FormatListInfo(data): |
|
946 |
return list(str(i) for i in data) |
|
947 |
|
|
948 |
|
|
949 | 945 |
def _FormatBlockDevInfo(idx, top_level, dev, roman): |
950 | 946 |
"""Show block device information. |
951 | 947 |
|
... | ... | |
1043 | 1039 |
data.append(("logical_id", l_id[0])) |
1044 | 1040 |
else: |
1045 | 1041 |
data.extend(l_id) |
1046 |
elif dev["physical_id"] is not None: |
|
1047 |
data.append(("physical_id:", _FormatListInfo(dev["physical_id"]))) |
|
1048 | 1042 |
|
1049 | 1043 |
if dev["pstatus"]: |
1050 | 1044 |
data.append(("on primary", helper(dev["dev_type"], dev["pstatus"]))) |
b/lib/cmdlib/backup.py | ||
---|---|---|
393 | 393 |
" node %s" % (self.instance.name, |
394 | 394 |
self.cfg.GetNodeName(src_node_uuid))) |
395 | 395 |
|
396 |
# set the disks ID correctly since call_instance_start needs the |
|
397 |
# correct drbd minor to create the symlinks |
|
398 |
for disk in self.instance.disks: |
|
399 |
self.cfg.SetDiskID(disk, src_node_uuid) |
|
400 |
|
|
401 | 396 |
activate_disks = not self.instance.disks_active |
402 | 397 |
|
403 | 398 |
if activate_disks: |
b/lib/cmdlib/cluster.py | ||
---|---|---|
538 | 538 |
continue |
539 | 539 |
|
540 | 540 |
newl = [(v[2].Copy(), v[0]) for v in dskl] |
541 |
for (dsk, _) in newl: |
|
542 |
self.cfg.SetDiskID(dsk, node_uuid) |
|
543 | 541 |
node_name = self.cfg.GetNodeName(node_uuid) |
544 | 542 |
result = self.rpc.call_blockdev_getdimensions(node_uuid, newl) |
545 | 543 |
if result.fail_msg: |
... | ... | |
2695 | 2693 |
for (inst_uuid, dev) in disks: |
2696 | 2694 |
(anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev], |
2697 | 2695 |
self.cfg) |
2698 |
self.cfg.SetDiskID(anno_disk, nuuid) |
|
2699 | 2696 |
dev_inst_only.append((anno_disk, instanceinfo[inst_uuid])) |
2700 | 2697 |
|
2701 | 2698 |
node_disks_dev_inst_only[nuuid] = dev_inst_only |
b/lib/cmdlib/common.py | ||
---|---|---|
1036 | 1036 |
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq): |
1037 | 1037 |
faulty = [] |
1038 | 1038 |
|
1039 |
for dev in instance.disks: |
|
1040 |
cfg.SetDiskID(dev, node_uuid) |
|
1041 |
|
|
1042 | 1039 |
result = rpc_runner.call_blockdev_getmirrorstatus( |
1043 | 1040 |
node_uuid, (instance.disks, instance)) |
1044 | 1041 |
result.Raise("Failed to get disk status from node %s" % |
b/lib/cmdlib/instance.py | ||
---|---|---|
1272 | 1272 |
for t_dsk, a_dsk in zip(tmp_disks, self.disks): |
1273 | 1273 |
rename_to.append(t_dsk.logical_id) |
1274 | 1274 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) |
1275 |
self.cfg.SetDiskID(t_dsk, self.pnode.uuid) |
|
1276 | 1275 |
result = self.rpc.call_blockdev_rename(self.pnode.uuid, |
1277 | 1276 |
zip(tmp_disks, rename_to)) |
1278 | 1277 |
result.Raise("Failed to rename adoped LVs") |
... | ... | |
1337 | 1336 |
ReleaseLocks(self, locking.LEVEL_NODE_RES) |
1338 | 1337 |
|
1339 | 1338 |
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: |
1340 |
# we need to set the disks ID to the primary node, since the |
|
1341 |
# preceding code might or might have not done it, depending on |
|
1342 |
# disk template and other options |
|
1343 |
for disk in iobj.disks: |
|
1344 |
self.cfg.SetDiskID(disk, self.pnode.uuid) |
|
1345 | 1339 |
if self.op.mode == constants.INSTANCE_CREATE: |
1346 | 1340 |
if not self.op.no_install: |
1347 | 1341 |
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and |
... | ... | |
1573 | 1567 |
info = GetInstanceInfoText(renamed_inst) |
1574 | 1568 |
for (idx, disk) in enumerate(renamed_inst.disks): |
1575 | 1569 |
for node_uuid in renamed_inst.all_nodes: |
1576 |
self.cfg.SetDiskID(disk, node_uuid) |
|
1577 | 1570 |
result = self.rpc.call_blockdev_setinfo(node_uuid, |
1578 | 1571 |
(disk, renamed_inst), info) |
1579 | 1572 |
result.Warn("Error setting info on node %s for disk %s" % |
... | ... | |
3132 | 3125 |
except errors.GenericError, e: |
3133 | 3126 |
feedback_fn("Initializing of DRBD devices failed;" |
3134 | 3127 |
" renaming back original volumes...") |
3135 |
for disk in new_disks: |
|
3136 |
self.cfg.SetDiskID(disk, pnode_uuid) |
|
3137 | 3128 |
rename_back_list = [(n.children[0], o.logical_id) |
3138 | 3129 |
for (n, o) in zip(new_disks, self.instance.disks)] |
3139 | 3130 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) |
... | ... | |
3194 | 3185 |
|
3195 | 3186 |
feedback_fn("Removing volumes on the secondary node...") |
3196 | 3187 |
for disk in old_disks: |
3197 |
self.cfg.SetDiskID(disk, snode_uuid) |
|
3198 | 3188 |
result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance)) |
3199 | 3189 |
result.Warn("Could not remove block device %s on node %s," |
3200 | 3190 |
" continuing anyway" % |
... | ... | |
3204 | 3194 |
feedback_fn("Removing unneeded volumes on the primary node...") |
3205 | 3195 |
for idx, disk in enumerate(old_disks): |
3206 | 3196 |
meta = disk.children[1] |
3207 |
self.cfg.SetDiskID(meta, pnode_uuid) |
|
3208 | 3197 |
result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance)) |
3209 | 3198 |
result.Warn("Could not remove metadata for disk %d on node %s," |
3210 | 3199 |
" continuing anyway" % |
... | ... | |
3264 | 3253 |
(anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg) |
3265 | 3254 |
for node_uuid, disk in anno_disk.ComputeNodeTree( |
3266 | 3255 |
self.instance.primary_node): |
3267 |
self.cfg.SetDiskID(disk, node_uuid) |
|
3268 | 3256 |
msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \ |
3269 | 3257 |
.fail_msg |
3270 | 3258 |
if msg: |
b/lib/cmdlib/instance_migration.py | ||
---|---|---|
502 | 502 |
self.feedback_fn("* switching node %s to secondary mode" % |
503 | 503 |
self.cfg.GetNodeName(node_uuid)) |
504 | 504 |
|
505 |
for dev in self.instance.disks: |
|
506 |
self.cfg.SetDiskID(dev, node_uuid) |
|
507 |
|
|
508 | 505 |
result = self.rpc.call_blockdev_close(node_uuid, self.instance.name, |
509 | 506 |
(self.instance.disks, self.instance)) |
510 | 507 |
result.Raise("Cannot change disk to secondary on node %s" % |
b/lib/cmdlib/instance_operation.py | ||
---|---|---|
394 | 394 |
if instance_running and \ |
395 | 395 |
self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
396 | 396 |
constants.INSTANCE_REBOOT_HARD]: |
397 |
for disk in self.instance.disks: |
|
398 |
self.cfg.SetDiskID(disk, current_node_uuid) |
|
399 | 397 |
result = self.rpc.call_instance_reboot(current_node_uuid, self.instance, |
400 | 398 |
self.op.reboot_type, |
401 | 399 |
self.op.shutdown_timeout, |
b/lib/cmdlib/instance_query.py | ||
---|---|---|
308 | 308 |
if self.op.static or not node_uuid: |
309 | 309 |
return None |
310 | 310 |
|
311 |
self.cfg.SetDiskID(dev, node_uuid) |
|
312 | 311 |
result = self.rpc.call_blockdev_find(node_uuid, (dev, instance)) |
313 | 312 |
if result.offline: |
314 | 313 |
return None |
... | ... | |
372 | 371 |
"dev_type": dev.dev_type, |
373 | 372 |
"logical_id": dev.logical_id, |
374 | 373 |
"drbd_info": drbd_info, |
375 |
"physical_id": dev.physical_id, |
|
376 | 374 |
"pstatus": dev_pstatus, |
377 | 375 |
"sstatus": dev_sstatus, |
378 | 376 |
"children": dev_children, |
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
79 | 79 |
@param excl_stor: Whether exclusive_storage is active for the node |
80 | 80 |
|
81 | 81 |
""" |
82 |
lu.cfg.SetDiskID(device, node_uuid) |
|
83 | 82 |
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance), |
84 | 83 |
device.size, instance.name, force_open, |
85 | 84 |
info, excl_stor) |
... | ... | |
87 | 86 |
" node %s for instance %s" % (device, |
88 | 87 |
lu.cfg.GetNodeName(node_uuid), |
89 | 88 |
instance.name)) |
90 |
if device.physical_id is None: |
|
91 |
device.physical_id = result.payload |
|
92 | 89 |
|
93 | 90 |
|
94 | 91 |
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create, |
... | ... | |
198 | 195 |
|
199 | 196 |
""" |
200 | 197 |
for (node_uuid, disk) in disks_created: |
201 |
lu.cfg.SetDiskID(disk, node_uuid) |
|
202 | 198 |
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) |
203 | 199 |
result.Warn("Failed to remove newly-created disk %s on node %s" % |
204 | 200 |
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning) |
... | ... | |
1016 | 1012 |
disks = [(idx, disk, 0) |
1017 | 1013 |
for (idx, disk) in enumerate(instance.disks)] |
1018 | 1014 |
|
1019 |
for (_, device, _) in disks: |
|
1020 |
lu.cfg.SetDiskID(device, node_uuid) |
|
1021 |
|
|
1022 | 1015 |
logging.info("Pausing synchronization of disks of instance '%s'", |
1023 | 1016 |
instance.name) |
1024 | 1017 |
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid, |
... | ... | |
1150 | 1143 |
node_uuid = instance.primary_node |
1151 | 1144 |
node_name = lu.cfg.GetNodeName(node_uuid) |
1152 | 1145 |
|
1153 |
for dev in disks: |
|
1154 |
lu.cfg.SetDiskID(dev, node_uuid) |
|
1155 |
|
|
1156 | 1146 |
# TODO: Convert to utils.Retry |
1157 | 1147 |
|
1158 | 1148 |
retries = 0 |
... | ... | |
1227 | 1217 |
|
1228 | 1218 |
for disk in disks: |
1229 | 1219 |
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node): |
1230 |
lu.cfg.SetDiskID(top_disk, node_uuid) |
|
1231 | 1220 |
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance)) |
1232 | 1221 |
msg = result.fail_msg |
1233 | 1222 |
if msg: |
... | ... | |
1298 | 1287 |
if ignore_size: |
1299 | 1288 |
node_disk = node_disk.Copy() |
1300 | 1289 |
node_disk.UnsetSize() |
1301 |
lu.cfg.SetDiskID(node_disk, node_uuid) |
|
1302 | 1290 |
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance), |
1303 | 1291 |
instance.name, False, idx) |
1304 | 1292 |
msg = result.fail_msg |
... | ... | |
1324 | 1312 |
if ignore_size: |
1325 | 1313 |
node_disk = node_disk.Copy() |
1326 | 1314 |
node_disk.UnsetSize() |
1327 |
lu.cfg.SetDiskID(node_disk, node_uuid) |
|
1328 | 1315 |
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance), |
1329 | 1316 |
instance.name, True, idx) |
1330 | 1317 |
msg = result.fail_msg |
... | ... | |
1339 | 1326 |
device_info.append((lu.cfg.GetNodeName(instance.primary_node), |
1340 | 1327 |
inst_disk.iv_name, dev_path)) |
1341 | 1328 |
|
1342 |
# leave the disks configured for the primary node |
|
1343 |
# this is a workaround that would be fixed better by |
|
1344 |
# improving the logical/physical id handling |
|
1345 |
for disk in disks: |
|
1346 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
|
1347 |
|
|
1348 | 1329 |
if not disks_ok: |
1349 | 1330 |
lu.cfg.MarkInstanceDisksInactive(instance.uuid) |
1350 | 1331 |
|
... | ... | |
1481 | 1462 |
|
1482 | 1463 |
# First run all grow ops in dry-run mode |
1483 | 1464 |
for node_uuid in self.instance.all_nodes: |
1484 |
self.cfg.SetDiskID(self.disk, node_uuid) |
|
1485 | 1465 |
result = self.rpc.call_blockdev_grow(node_uuid, |
1486 | 1466 |
(self.disk, self.instance), |
1487 | 1467 |
self.delta, True, True, |
... | ... | |
1491 | 1471 |
|
1492 | 1472 |
if wipe_disks: |
1493 | 1473 |
# Get disk size from primary node for wiping |
1494 |
self.cfg.SetDiskID(self.disk, self.instance.primary_node) |
|
1495 | 1474 |
result = self.rpc.call_blockdev_getdimensions( |
1496 | 1475 |
self.instance.primary_node, ([self.disk], self.instance)) |
1497 | 1476 |
result.Raise("Failed to retrieve disk size from node '%s'" % |
... | ... | |
1515 | 1494 |
# We know that (as far as we can test) operations across different |
1516 | 1495 |
# nodes will succeed, time to run it for real on the backing storage |
1517 | 1496 |
for node_uuid in self.instance.all_nodes: |
1518 |
self.cfg.SetDiskID(self.disk, node_uuid) |
|
1519 | 1497 |
result = self.rpc.call_blockdev_grow(node_uuid, |
1520 | 1498 |
(self.disk, self.instance), |
1521 | 1499 |
self.delta, False, True, |
... | ... | |
1525 | 1503 |
|
1526 | 1504 |
# And now execute it for logical storage, on the primary node |
1527 | 1505 |
node_uuid = self.instance.primary_node |
1528 |
self.cfg.SetDiskID(self.disk, node_uuid) |
|
1529 | 1506 |
result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance), |
1530 | 1507 |
self.delta, False, False, |
1531 | 1508 |
self.node_es_flags[node_uuid]) |
... | ... | |
1798 | 1775 |
the device(s)) to the ldisk (representing the local storage status). |
1799 | 1776 |
|
1800 | 1777 |
""" |
1801 |
lu.cfg.SetDiskID(dev, node_uuid) |
|
1802 |
|
|
1803 | 1778 |
result = True |
1804 | 1779 |
|
1805 | 1780 |
if on_primary or dev.AssembleOnSecondary(): |
... | ... | |
1943 | 1918 |
for node_uuid in node_uuids: |
1944 | 1919 |
self.lu.LogInfo("Checking disk/%d on %s", idx, |
1945 | 1920 |
self.cfg.GetNodeName(node_uuid)) |
1946 |
self.cfg.SetDiskID(dev, node_uuid) |
|
1947 | 1921 |
|
1948 | 1922 |
result = _BlockdevFind(self, node_uuid, dev, instance) |
1949 | 1923 |
|
... | ... | |
2203 | 2177 |
for node_uuid in node_uuids: |
2204 | 2178 |
self.lu.LogInfo("Checking disk/%d on %s", idx, |
2205 | 2179 |
self.cfg.GetNodeName(node_uuid)) |
2206 |
self.cfg.SetDiskID(dev, node_uuid) |
|
2207 | 2180 |
|
2208 | 2181 |
result = _BlockdevFind(self, node_uuid, dev, self.instance) |
2209 | 2182 |
|
... | ... | |
2246 | 2219 |
self.lu.LogInfo("Adding storage on %s for disk/%d", |
2247 | 2220 |
self.cfg.GetNodeName(node_uuid), idx) |
2248 | 2221 |
|
2249 |
self.cfg.SetDiskID(dev, node_uuid) |
|
2250 |
|
|
2251 | 2222 |
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]] |
2252 | 2223 |
names = _GenerateUniqueNames(self.lu, lv_names) |
2253 | 2224 |
|
... | ... | |
2280 | 2251 |
|
2281 | 2252 |
def _CheckDevices(self, node_uuid, iv_names): |
2282 | 2253 |
for name, (dev, _, _) in iv_names.iteritems(): |
2283 |
self.cfg.SetDiskID(dev, node_uuid) |
|
2284 |
|
|
2285 | 2254 |
result = _BlockdevFind(self, node_uuid, dev, self.instance) |
2286 | 2255 |
|
2287 | 2256 |
msg = result.fail_msg |
... | ... | |
2299 | 2268 |
self.lu.LogInfo("Remove logical volumes for %s", name) |
2300 | 2269 |
|
2301 | 2270 |
for lv in old_lvs: |
2302 |
self.cfg.SetDiskID(lv, node_uuid) |
|
2303 | 2271 |
msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \ |
2304 | 2272 |
.fail_msg |
2305 | 2273 |
if msg: |
... | ... | |
2362 | 2330 |
# ok, we created the new LVs, so now we know we have the needed |
2363 | 2331 |
# storage; as such, we proceed on the target node to rename |
2364 | 2332 |
# old_lv to _old, and new_lv to old_lv; note that we rename LVs |
2365 |
# using the assumption that logical_id == physical_id (which in |
|
2366 |
# turn is the unique_id on that node) |
|
2333 |
# using the assumption that logical_id == unique_id on that node |
|
2367 | 2334 |
|
2368 | 2335 |
# FIXME(iustin): use a better name for the replaced LVs |
2369 | 2336 |
temp_suffix = int(time.time()) |
2370 |
ren_fn = lambda d, suff: (d.physical_id[0],
|
|
2371 |
d.physical_id[1] + "_replaced-%s" % suff)
|
|
2337 |
ren_fn = lambda d, suff: (d.logical_id[0],
|
|
2338 |
d.logical_id[1] + "_replaced-%s" % suff)
|
|
2372 | 2339 |
|
2373 | 2340 |
# Build the rename list based on what LVs exist on the node |
2374 | 2341 |
rename_old_to_new = [] |
... | ... | |
2387 | 2354 |
|
2388 | 2355 |
# Now we rename the new LVs to the old LVs |
2389 | 2356 |
self.lu.LogInfo("Renaming the new LVs on the target node") |
2390 |
rename_new_to_old = [(new, old.physical_id)
|
|
2357 |
rename_new_to_old = [(new, old.logical_id)
|
|
2391 | 2358 |
for old, new in zip(old_lvs, new_lvs)] |
2392 | 2359 |
result = self.rpc.call_blockdev_rename(self.target_node_uuid, |
2393 | 2360 |
rename_new_to_old) |
... | ... | |
2553 | 2520 |
# We have new devices, shutdown the drbd on the old secondary |
2554 | 2521 |
for idx, dev in enumerate(self.instance.disks): |
2555 | 2522 |
self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx) |
2556 |
self.cfg.SetDiskID(dev, self.target_node_uuid) |
|
2557 | 2523 |
msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid, |
2558 | 2524 |
(dev, self.instance)).fail_msg |
2559 | 2525 |
if msg: |
... | ... | |
2578 | 2544 |
self.lu.LogInfo("Updating instance configuration") |
2579 | 2545 |
for dev, _, new_logical_id in iv_names.itervalues(): |
2580 | 2546 |
dev.logical_id = new_logical_id |
2581 |
self.cfg.SetDiskID(dev, self.instance.primary_node) |
|
2582 | 2547 |
|
2583 | 2548 |
self.cfg.Update(self.instance, feedback_fn) |
2584 | 2549 |
|
b/lib/cmdlib/instance_utils.py | ||
---|---|---|
274 | 274 |
else: |
275 | 275 |
edata = device.ComputeNodeTree(instance.primary_node) |
276 | 276 |
for node_uuid, disk in edata: |
277 |
lu.cfg.SetDiskID(disk, node_uuid) |
|
278 | 277 |
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) |
279 | 278 |
if result.fail_msg: |
280 | 279 |
lu.LogWarning("Could not remove disk %s on node %s," |
b/lib/config.py | ||
---|---|---|
553 | 553 |
|
554 | 554 |
return result |
555 | 555 |
|
556 |
def _CheckDiskIDs(self, disk, l_ids, p_ids):
|
|
556 |
def _CheckDiskIDs(self, disk, l_ids): |
|
557 | 557 |
"""Compute duplicate disk IDs |
558 | 558 |
|
559 | 559 |
@type disk: L{objects.Disk} |
560 | 560 |
@param disk: the disk at which to start searching |
561 | 561 |
@type l_ids: list |
562 | 562 |
@param l_ids: list of current logical ids |
563 |
@type p_ids: list |
|
564 |
@param p_ids: list of current physical ids |
|
565 | 563 |
@rtype: list |
566 | 564 |
@return: a list of error messages |
567 | 565 |
|
... | ... | |
572 | 570 |
result.append("duplicate logical id %s" % str(disk.logical_id)) |
573 | 571 |
else: |
574 | 572 |
l_ids.append(disk.logical_id) |
575 |
if disk.physical_id is not None: |
|
576 |
if disk.physical_id in p_ids: |
|
577 |
result.append("duplicate physical id %s" % str(disk.physical_id)) |
|
578 |
else: |
|
579 |
p_ids.append(disk.physical_id) |
|
580 | 573 |
|
581 | 574 |
if disk.children: |
582 | 575 |
for child in disk.children: |
583 |
result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
|
|
576 |
result.extend(self._CheckDiskIDs(child, l_ids)) |
|
584 | 577 |
return result |
585 | 578 |
|
586 | 579 |
def _UnlockedVerifyConfig(self): |
... | ... | |
598 | 591 |
data = self._config_data |
599 | 592 |
cluster = data.cluster |
600 | 593 |
seen_lids = [] |
601 |
seen_pids = [] |
|
602 | 594 |
|
603 | 595 |
# global cluster checks |
604 | 596 |
if not cluster.enabled_hypervisors: |
... | ... | |
729 | 721 |
for idx, disk in enumerate(instance.disks): |
730 | 722 |
result.extend(["instance '%s' disk %d error: %s" % |
731 | 723 |
(instance.name, idx, msg) for msg in disk.Verify()]) |
732 |
result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
|
|
724 |
result.extend(self._CheckDiskIDs(disk, seen_lids)) |
|
733 | 725 |
|
734 | 726 |
wrong_names = _CheckInstanceDiskIvNames(instance.disks) |
735 | 727 |
if wrong_names: |
... | ... | |
873 | 865 |
""" |
874 | 866 |
return self._UnlockedVerifyConfig() |
875 | 867 |
|
876 |
def _UnlockedSetDiskID(self, disk, node_uuid): |
|
877 |
"""Convert the unique ID to the ID needed on the target nodes. |
|
878 |
|
|
879 |
This is used only for drbd, which needs ip/port configuration. |
|
880 |
|
|
881 |
The routine descends down and updates its children also, because |
|
882 |
this helps when the only the top device is passed to the remote |
|
883 |
node. |
|
884 |
|
|
885 |
This function is for internal use, when the config lock is already held. |
|
886 |
|
|
887 |
""" |
|
888 |
if disk.children: |
|
889 |
for child in disk.children: |
|
890 |
self._UnlockedSetDiskID(child, node_uuid) |
|
891 |
|
|
892 |
if disk.logical_id is None and disk.physical_id is not None: |
|
893 |
return |
|
894 |
if disk.dev_type == constants.DT_DRBD8: |
|
895 |
pnode, snode, port, pminor, sminor, secret = disk.logical_id |
|
896 |
if node_uuid not in (pnode, snode): |
|
897 |
raise errors.ConfigurationError("DRBD device not knowing node %s" % |
|
898 |
node_uuid) |
|
899 |
pnode_info = self._UnlockedGetNodeInfo(pnode) |
|
900 |
snode_info = self._UnlockedGetNodeInfo(snode) |
|
901 |
if pnode_info is None or snode_info is None: |
|
902 |
raise errors.ConfigurationError("Can't find primary or secondary node" |
|
903 |
" for %s" % str(disk)) |
|
904 |
p_data = (pnode_info.secondary_ip, port) |
|
905 |
s_data = (snode_info.secondary_ip, port) |
|
906 |
if pnode == node_uuid: |
|
907 |
disk.physical_id = p_data + s_data + (pminor, secret) |
|
908 |
else: # it must be secondary, we tested above |
|
909 |
disk.physical_id = s_data + p_data + (sminor, secret) |
|
910 |
else: |
|
911 |
disk.physical_id = disk.logical_id |
|
912 |
return |
|
913 |
|
|
914 |
@locking.ssynchronized(_config_lock) |
|
915 |
def SetDiskID(self, disk, node_uuid): |
|
916 |
"""Convert the unique ID to the ID needed on the target nodes. |
|
917 |
|
|
918 |
This is used only for drbd, which needs ip/port configuration. |
|
919 |
|
|
920 |
The routine descends down and updates its children also, because |
|
921 |
this helps when the only the top device is passed to the remote |
|
922 |
node. |
|
923 |
|
|
924 |
""" |
|
925 |
return self._UnlockedSetDiskID(disk, node_uuid) |
|
926 |
|
|
927 | 868 |
@locking.ssynchronized(_config_lock) |
928 | 869 |
def AddTcpUdpPort(self, port): |
929 | 870 |
"""Adds a new port to the available port pool. |
... | ... | |
1586 | 1527 |
disk.logical_id = (disk.logical_id[0], |
1587 | 1528 |
utils.PathJoin(file_storage_dir, inst.name, |
1588 | 1529 |
"disk%s" % idx)) |
1589 |
disk.physical_id = disk.logical_id |
|
1590 | 1530 |
|
1591 | 1531 |
# Force update of ssconf files |
1592 | 1532 |
self._config_data.cluster.serial_no += 1 |
b/lib/hypervisor/hv_xen.py | ||
---|---|---|
296 | 296 |
mode = "r" |
297 | 297 |
|
298 | 298 |
if cfdev.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: |
299 |
driver = _FILE_DRIVER_MAP[cfdev.physical_id[0]]
|
|
299 |
driver = _FILE_DRIVER_MAP[cfdev.logical_id[0]]
|
|
300 | 300 |
else: |
301 | 301 |
driver = "phy" |
302 | 302 |
|
b/lib/masterd/instance.py | ||
---|---|---|
1186 | 1186 |
disk_id = tuple(result.payload) |
1187 | 1187 |
disk_params = constants.DISK_LD_DEFAULTS[constants.DT_PLAIN].copy() |
1188 | 1188 |
new_dev = objects.Disk(dev_type=constants.DT_PLAIN, size=disk.size, |
1189 |
logical_id=disk_id, physical_id=disk_id, |
|
1190 |
iv_name=disk.iv_name, |
|
1189 |
logical_id=disk_id, iv_name=disk.iv_name, |
|
1191 | 1190 |
params=disk_params) |
1192 | 1191 |
|
1193 | 1192 |
self._snap_disks.append(new_dev) |
... | ... | |
1237 | 1236 |
continue |
1238 | 1237 |
|
1239 | 1238 |
path = utils.PathJoin(pathutils.EXPORT_DIR, "%s.new" % instance.name, |
1240 |
dev.physical_id[1])
|
|
1239 |
dev.logical_id[1])
|
|
1241 | 1240 |
|
1242 | 1241 |
finished_fn = compat.partial(self._TransferFinished, idx) |
1243 | 1242 |
|
b/lib/objects.py | ||
---|---|---|
506 | 506 |
|
507 | 507 |
class Disk(ConfigObject): |
508 | 508 |
"""Config object representing a block device.""" |
509 |
__slots__ = (["name", "dev_type", "logical_id", "physical_id", "children", "iv_name",
|
|
509 |
__slots__ = (["name", "dev_type", "logical_id", "children", "iv_name", |
|
510 | 510 |
"size", "mode", "params", "spindles"] + _UUID + |
511 | 511 |
# dynamic_params is special. It depends on the node this instance |
512 | 512 |
# is sent to, and should not be persisted. |
... | ... | |
768 | 768 |
obj.children = outils.ContainerFromDicts(obj.children, list, Disk) |
769 | 769 |
if obj.logical_id and isinstance(obj.logical_id, list): |
770 | 770 |
obj.logical_id = tuple(obj.logical_id) |
771 |
if obj.physical_id and isinstance(obj.physical_id, list): |
|
772 |
obj.physical_id = tuple(obj.physical_id) |
|
773 | 771 |
if obj.dev_type in constants.LDS_DRBD: |
774 | 772 |
# we need a tuple of length six here |
775 | 773 |
if len(obj.logical_id) < 6: |
... | ... | |
785 | 783 |
elif self.dev_type in constants.LDS_DRBD: |
786 | 784 |
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] |
787 | 785 |
val = "<DRBD8(" |
788 |
if self.physical_id is None: |
|
789 |
phy = "unconfigured" |
|
790 |
else: |
|
791 |
phy = ("configured as %s:%s %s:%s" % |
|
792 |
(self.physical_id[0], self.physical_id[1], |
|
793 |
self.physical_id[2], self.physical_id[3])) |
|
794 | 786 |
|
795 |
val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
|
|
796 |
(node_a, minor_a, node_b, minor_b, port, phy))
|
|
787 |
val += ("hosts=%s/%d-%s/%d, port=%s, " % |
|
788 |
(node_a, minor_a, node_b, minor_b, port)) |
|
797 | 789 |
if self.children and self.children.count(None) == 0: |
798 | 790 |
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) |
799 | 791 |
else: |
800 | 792 |
val += "no local storage" |
801 | 793 |
else: |
802 |
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
|
|
803 |
(self.dev_type, self.logical_id, self.physical_id, self.children))
|
|
794 |
val = ("<Disk(type=%s, logical_id=%s, children=%s" % |
|
795 |
(self.dev_type, self.logical_id, self.children)) |
|
804 | 796 |
if self.iv_name is None: |
805 | 797 |
val += ", not visible" |
806 | 798 |
else: |
b/src/Ganeti/Objects.hs | ||
---|---|---|
379 | 379 |
-- code currently can't build it. |
380 | 380 |
data Disk = Disk |
381 | 381 |
{ diskLogicalId :: DiskLogicalId |
382 |
-- , diskPhysicalId :: String |
|
383 | 382 |
, diskChildren :: [Disk] |
384 | 383 |
, diskIvName :: String |
385 | 384 |
, diskSize :: Int |
... | ... | |
392 | 391 |
$(buildObjectSerialisation "Disk" $ |
393 | 392 |
[ customField 'decodeDLId 'encodeFullDLId ["dev_type"] $ |
394 | 393 |
simpleField "logical_id" [t| DiskLogicalId |] |
395 |
-- , simpleField "physical_id" [t| String |] |
|
396 | 394 |
, defaultField [| [] |] $ simpleField "children" [t| [Disk] |] |
397 | 395 |
, defaultField [| "" |] $ simpleField "iv_name" [t| String |] |
398 | 396 |
, simpleField "size" [t| Int |] |
Also available in: Unified diff