Revision bb3011ad
b/lib/cmdlib/backup.py | ||
---|---|---|
257 | 257 |
|
258 | 258 |
# instance disk type verification |
259 | 259 |
# TODO: Implement export support for file-based disks |
260 |
for disk in self.instance.disks:
|
|
260 |
for disk in self.cfg.GetInstanceDisks(self.instance):
|
|
261 | 261 |
if disk.dev_type in constants.DTS_FILEBASED: |
262 | 262 |
raise errors.OpPrereqError("Export not supported for instances with" |
263 | 263 |
" file-based disks", errors.ECODE_INVAL) |
b/lib/cmdlib/cluster.py | ||
---|---|---|
612 | 612 |
pnode = instance.primary_node |
613 | 613 |
if pnode not in per_node_disks: |
614 | 614 |
per_node_disks[pnode] = [] |
615 |
for idx, disk in enumerate(instance.disks):
|
|
615 |
for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance)):
|
|
616 | 616 |
per_node_disks[pnode].append((instance, idx, disk)) |
617 | 617 |
|
618 | 618 |
assert not (frozenset(per_node_disks.keys()) - |
... | ... | |
2376 | 2376 |
" that have exclusive storage set: %s", |
2377 | 2377 |
instance.disk_template, |
2378 | 2378 |
utils.CommaJoin(self.cfg.GetNodeNames(es_nodes))) |
2379 |
for (idx, disk) in enumerate(instance.disks):
|
|
2379 |
for (idx, disk) in enumerate(self.cfg.GetInstanceDisks(instance)):
|
|
2380 | 2380 |
self._ErrorIf(disk.spindles is None, |
2381 | 2381 |
constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name, |
2382 | 2382 |
"number of spindles not configured for disk %s while" |
... | ... | |
3054 | 3054 |
if instanceinfo[uuid].disk_template == diskless) |
3055 | 3055 |
disks = [(inst_uuid, disk) |
3056 | 3056 |
for inst_uuid in node_inst_uuids |
3057 |
for disk in instanceinfo[inst_uuid].disks]
|
|
3057 |
for disk in self.cfg.GetInstanceDisks(instanceinfo[inst_uuid])]
|
|
3058 | 3058 |
|
3059 | 3059 |
if not disks: |
3060 | 3060 |
nodisk_instances.update(uuid for uuid in node_inst_uuids |
b/lib/cmdlib/common.py | ||
---|---|---|
581 | 581 |
cpu_count = be_full[constants.BE_VCPUS] |
582 | 582 |
inst_nodes = cfg.GetInstanceNodes(instance) |
583 | 583 |
es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes) |
584 |
disks = cfg.GetInstanceDisks(instance) |
|
584 | 585 |
if any(es_flags.values()): |
585 | 586 |
# With exclusive storage use the actual spindles |
586 | 587 |
try: |
587 |
spindle_use = sum([disk.spindles for disk in instance.disks])
|
|
588 |
spindle_use = sum([disk.spindles for disk in disks]) |
|
588 | 589 |
except TypeError: |
589 | 590 |
ret.append("Number of spindles not configured for disks of instance %s" |
590 | 591 |
" while exclusive storage is enabled, try running gnt-cluster" |
... | ... | |
593 | 594 |
spindle_use = None |
594 | 595 |
else: |
595 | 596 |
spindle_use = be_full[constants.BE_SPINDLE_USE] |
596 |
disk_count = len(instance.disks)
|
|
597 |
disk_sizes = [disk.size for disk in instance.disks]
|
|
597 |
disk_count = len(disks) |
|
598 |
disk_sizes = [disk.size for disk in disks] |
|
598 | 599 |
nic_count = len(instance.nics) |
599 | 600 |
disk_template = instance.disk_template |
600 | 601 |
|
... | ... | |
1050 | 1051 |
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq): |
1051 | 1052 |
faulty = [] |
1052 | 1053 |
|
1054 |
disks = cfg.GetInstanceDisks(instance) |
|
1053 | 1055 |
result = rpc_runner.call_blockdev_getmirrorstatus( |
1054 |
node_uuid, (instance.disks, instance))
|
|
1056 |
node_uuid, (disks, instance)) |
|
1055 | 1057 |
result.Raise("Failed to get disk status from node %s" % |
1056 | 1058 |
cfg.GetNodeName(node_uuid), |
1057 | 1059 |
prereq=prereq, ecode=errors.ECODE_ENVIRON) |
b/lib/cmdlib/group.py | ||
---|---|---|
915 | 915 |
node_to_inst.setdefault(node_uuid, []).append(inst) |
916 | 916 |
|
917 | 917 |
for (node_uuid, insts) in node_to_inst.items(): |
918 |
node_disks = [(inst.disks, inst) for inst in insts]
|
|
918 |
node_disks = [(self.cfg.GetInstanceDisks(inst), inst) for inst in insts]
|
|
919 | 919 |
node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks) |
920 | 920 |
msg = node_res.fail_msg |
921 | 921 |
if msg: |
... | ... | |
926 | 926 |
|
927 | 927 |
faulty_disk_uuids = set(node_res.payload) |
928 | 928 |
for inst in self.instances.values(): |
929 |
inst_disk_uuids = set([disk.uuid for disk in inst.disks]) |
|
929 |
disks = self.cfg.GetInstanceDisks(inst) |
|
930 |
inst_disk_uuids = set([disk.uuid for disk in disks]) |
|
930 | 931 |
if inst_disk_uuids.intersection(faulty_disk_uuids): |
931 | 932 |
offline_disk_instance_names.add(inst.name) |
932 | 933 |
|
b/lib/cmdlib/instance.py | ||
---|---|---|
1365 | 1365 |
uuid=instance_uuid, |
1366 | 1366 |
os=self.op.os_type, |
1367 | 1367 |
primary_node=self.pnode.uuid, |
1368 |
nics=self.nics, disks=disks,
|
|
1368 |
nics=self.nics, disks=[],
|
|
1369 | 1369 |
disk_template=self.op.disk_template, |
1370 | 1370 |
disks_active=False, |
1371 | 1371 |
admin_state=constants.ADMINST_DOWN, |
... | ... | |
1396 | 1396 |
else: |
1397 | 1397 |
feedback_fn("* creating instance disks...") |
1398 | 1398 |
try: |
1399 |
CreateDisks(self, iobj) |
|
1399 |
CreateDisks(self, iobj, disks=disks)
|
|
1400 | 1400 |
except errors.OpExecError: |
1401 | 1401 |
self.LogWarning("Device creation failed") |
1402 | 1402 |
self.cfg.ReleaseDRBDMinors(self.op.instance_name) |
1403 | 1403 |
raise |
1404 | 1404 |
|
1405 | 1405 |
feedback_fn("adding instance %s to cluster config" % self.op.instance_name) |
1406 |
|
|
1407 | 1406 |
self.cfg.AddInstance(iobj, self.proc.GetECId()) |
1407 |
feedback_fn("adding disks to cluster config") |
|
1408 |
for disk in disks: |
|
1409 |
self.cfg.AddInstDisk(iobj, disk) |
|
1408 | 1410 |
|
1409 | 1411 |
# Declare that we don't want to remove the instance lock anymore, as we've |
1410 | 1412 |
# added the instance to the config |
... | ... | |
1441 | 1443 |
|
1442 | 1444 |
if disk_abort: |
1443 | 1445 |
RemoveDisks(self, iobj) |
1446 |
# Copy the 'iboj.disks' list, because it changes inside 'RemoveInstDisk' |
|
1447 |
for disk_uuid in list(iobj.disks): |
|
1448 |
self.cfg.RemoveInstDisk(disk_uuid) |
|
1444 | 1449 |
self.cfg.RemoveInstance(iobj.uuid) |
1445 | 1450 |
# Make sure the instance lock gets removed |
1446 | 1451 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name |
... | ... | |
1454 | 1459 |
ReleaseLocks(self, locking.LEVEL_NODE_RES) |
1455 | 1460 |
|
1456 | 1461 |
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: |
1462 |
disks = self.cfg.GetInstanceDisks(iobj) |
|
1457 | 1463 |
if self.op.mode == constants.INSTANCE_CREATE: |
1458 | 1464 |
if not self.op.no_install: |
1459 | 1465 |
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and |
... | ... | |
1461 | 1467 |
if pause_sync: |
1462 | 1468 |
feedback_fn("* pausing disk sync to install instance OS") |
1463 | 1469 |
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, |
1464 |
(iobj.disks,
|
|
1465 |
iobj), True)
|
|
1470 |
(disks, iobj),
|
|
1471 |
True) |
|
1466 | 1472 |
for idx, success in enumerate(result.payload): |
1467 | 1473 |
if not success: |
1468 | 1474 |
logging.warn("pause-sync of instance %s for disk %d failed", |
... | ... | |
1478 | 1484 |
if pause_sync: |
1479 | 1485 |
feedback_fn("* resuming disk sync") |
1480 | 1486 |
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, |
1481 |
(iobj.disks,
|
|
1482 |
iobj), False)
|
|
1487 |
(disks, iobj),
|
|
1488 |
False) |
|
1483 | 1489 |
for idx, success in enumerate(result.payload): |
1484 | 1490 |
if not success: |
1485 | 1491 |
logging.warn("resume-sync of instance %s for disk %d failed", |
... | ... | |
1503 | 1509 |
dt = masterd.instance.DiskTransfer("disk/%s" % idx, |
1504 | 1510 |
constants.IEIO_FILE, (image, ), |
1505 | 1511 |
constants.IEIO_SCRIPT, |
1506 |
((iobj.disks[idx], iobj), idx),
|
|
1512 |
((disks[idx], iobj), idx), |
|
1507 | 1513 |
None) |
1508 | 1514 |
transfers.append(dt) |
1509 | 1515 |
|
... | ... | |
1659 | 1665 |
if (self.instance.disk_template in (constants.DT_FILE, |
1660 | 1666 |
constants.DT_SHARED_FILE) and |
1661 | 1667 |
self.op.new_name != self.instance.name): |
1662 |
old_file_storage_dir = os.path.dirname(
|
|
1663 |
self.instance.disks[0].logical_id[1])
|
|
1668 |
disks = self.cfg.GetInstanceDisks(self.instance)
|
|
1669 |
old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
|
|
1664 | 1670 |
rename_file_storage = True |
1665 | 1671 |
|
1666 | 1672 |
self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) |
... | ... | |
1673 | 1679 |
|
1674 | 1680 |
# re-read the instance from the configuration after rename |
1675 | 1681 |
renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) |
1682 |
disks = self.cfg.GetInstanceDisks(renamed_inst) |
|
1676 | 1683 |
|
1677 | 1684 |
if rename_file_storage: |
1678 |
new_file_storage_dir = os.path.dirname( |
|
1679 |
renamed_inst.disks[0].logical_id[1]) |
|
1685 |
new_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) |
|
1680 | 1686 |
result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, |
1681 | 1687 |
old_file_storage_dir, |
1682 | 1688 |
new_file_storage_dir) |
... | ... | |
1688 | 1694 |
StartInstanceDisks(self, renamed_inst, None) |
1689 | 1695 |
# update info on disks |
1690 | 1696 |
info = GetInstanceInfoText(renamed_inst) |
1691 |
for (idx, disk) in enumerate(renamed_inst.disks):
|
|
1697 |
for (idx, disk) in enumerate(disks): |
|
1692 | 1698 |
for node_uuid in self.cfg.GetInstanceNodes(renamed_inst): |
1693 | 1699 |
result = self.rpc.call_blockdev_setinfo(node_uuid, |
1694 | 1700 |
(disk, renamed_inst), info) |
... | ... | |
1863 | 1869 |
cluster = self.cfg.GetClusterInfo() |
1864 | 1870 |
bep = cluster.FillBE(self.instance) |
1865 | 1871 |
|
1866 |
for idx, dsk in enumerate(self.instance.disks): |
|
1872 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
1873 |
for idx, dsk in enumerate(disks): |
|
1867 | 1874 |
if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE, |
1868 | 1875 |
constants.DT_SHARED_FILE, constants.DT_GLUSTER): |
1869 | 1876 |
raise errors.OpPrereqError("Instance disk %d has a complex layout," |
... | ... | |
1930 | 1937 |
errs = [] |
1931 | 1938 |
transfers = [] |
1932 | 1939 |
# activate, get path, create transfer jobs |
1933 |
for idx, disk in enumerate(self.instance.disks): |
|
1940 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
1941 |
for idx, disk in enumerate(disks): |
|
1934 | 1942 |
# FIXME: pass debug option from opcode to backend |
1935 | 1943 |
dt = masterd.instance.DiskTransfer("disk/%s" % idx, |
1936 | 1944 |
constants.IEIO_RAW_DISK, |
... | ... | |
1939 | 1947 |
(disk, self.instance), |
1940 | 1948 |
None) |
1941 | 1949 |
transfers.append(dt) |
1950 |
self.cfg.Update(disk, feedback_fn) |
|
1942 | 1951 |
|
1943 | 1952 |
import_result = \ |
1944 | 1953 |
masterd.instance.TransferInstanceData(self, feedback_fn, |
... | ... | |
2361 | 2370 |
chgdesc.extend(changes) |
2362 | 2371 |
|
2363 | 2372 |
|
2364 |
def _UpdateIvNames(base_index, disks): |
|
2365 |
"""Updates the C{iv_name} attribute of disks. |
|
2366 |
|
|
2367 |
@type disks: list of L{objects.Disk} |
|
2368 |
|
|
2369 |
""" |
|
2370 |
for (idx, disk) in enumerate(disks): |
|
2371 |
disk.iv_name = "disk/%s" % (base_index + idx, ) |
|
2372 |
|
|
2373 |
|
|
2374 | 2373 |
class LUInstanceSetParams(LogicalUnit): |
2375 | 2374 |
"""Modifies an instances's parameters. |
2376 | 2375 |
|
... | ... | |
2801 | 2800 |
assert self.instance.disk_template == constants.DT_PLAIN |
2802 | 2801 |
disks = [{constants.IDISK_SIZE: d.size, |
2803 | 2802 |
constants.IDISK_VG: d.logical_id[0]} |
2804 |
for d in self.instance.disks]
|
|
2803 |
for d in self.cfg.GetInstanceDisks(self.instance)]
|
|
2805 | 2804 |
required = ComputeDiskSizePerVG(self.op.disk_template, disks) |
2806 | 2805 |
CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required) |
2807 | 2806 |
|
... | ... | |
2898 | 2897 |
disk.name = params.get(constants.IDISK_NAME, None) |
2899 | 2898 |
|
2900 | 2899 |
# Verify disk changes (operating on a copy) |
2901 |
disks = copy.deepcopy(self.instance.disks) |
|
2900 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
2901 |
disks = copy.deepcopy(inst_disks) |
|
2902 | 2902 |
_ApplyContainerMods("disk", disks, None, self.diskmod, None, |
2903 | 2903 |
_PrepareDiskMod, None) |
2904 | 2904 |
utils.ValidateDeviceNames("disk", disks) |
... | ... | |
2906 | 2906 |
raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" |
2907 | 2907 |
" more" % constants.MAX_DISKS, |
2908 | 2908 |
errors.ECODE_STATE) |
2909 |
disk_sizes = [disk.size for disk in self.instance.disks]
|
|
2909 |
disk_sizes = [disk.size for disk in inst_disks]
|
|
2910 | 2910 |
disk_sizes.extend(params["size"] for (op, idx, params, private) in |
2911 | 2911 |
self.diskmod if op == constants.DDM_ADD) |
2912 | 2912 |
ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) |
... | ... | |
3331 | 3331 |
|
3332 | 3332 |
assert self.instance.disk_template == constants.DT_PLAIN |
3333 | 3333 |
|
3334 |
old_disks = self.cfg.GetInstanceDisks(self.instance) |
|
3334 | 3335 |
# create a fake disk info for _GenerateDiskTemplate |
3335 | 3336 |
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, |
3336 | 3337 |
constants.IDISK_VG: d.logical_id[0], |
3337 | 3338 |
constants.IDISK_NAME: d.name} |
3338 |
for d in self.instance.disks]
|
|
3339 |
for d in old_disks]
|
|
3339 | 3340 |
new_disks = GenerateDiskTemplate(self, self.op.disk_template, |
3340 | 3341 |
self.instance.uuid, pnode_uuid, |
3341 | 3342 |
[snode_uuid], disk_info, None, None, 0, |
... | ... | |
3357 | 3358 |
# old ones |
3358 | 3359 |
feedback_fn("Renaming original volumes...") |
3359 | 3360 |
rename_list = [(o, n.children[0].logical_id) |
3360 |
for (o, n) in zip(self.instance.disks, new_disks)]
|
|
3361 |
for (o, n) in zip(old_disks, new_disks)]
|
|
3361 | 3362 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list) |
3362 | 3363 |
result.Raise("Failed to rename original LVs") |
3363 | 3364 |
|
... | ... | |
3374 | 3375 |
feedback_fn("Initializing of DRBD devices failed;" |
3375 | 3376 |
" renaming back original volumes...") |
3376 | 3377 |
rename_back_list = [(n.children[0], o.logical_id) |
3377 |
for (n, o) in zip(new_disks, self.instance.disks)]
|
|
3378 |
for (n, o) in zip(new_disks, old_disks)]
|
|
3378 | 3379 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) |
3379 | 3380 |
result.Raise("Failed to rename LVs back after error %s" % str(e)) |
3380 | 3381 |
raise |
3381 | 3382 |
|
3382 | 3383 |
# at this point, the instance has been modified |
3383 | 3384 |
self.instance.disk_template = constants.DT_DRBD8 |
3384 |
self.instance.disks = new_disks |
|
3385 |
for old_disk in old_disks: |
|
3386 |
self.cfg.RemoveInstDisk(old_disk.uuid) |
|
3387 |
for (idx, new_disk) in enumerate(new_disks): |
|
3388 |
self.cfg.AddInstDisk(self.instance, new_disk, idx=idx) |
|
3385 | 3389 |
self.cfg.Update(self.instance, feedback_fn) |
3386 | 3390 |
|
3387 | 3391 |
# Release node locks while waiting for sync |
... | ... | |
3408 | 3412 |
snode_uuid = secondary_nodes[0] |
3409 | 3413 |
feedback_fn("Converting template to plain") |
3410 | 3414 |
|
3411 |
old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
|
3412 |
new_disks = [d.children[0] for d in self.instance.disks] |
|
3415 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
3416 |
old_disks = AnnotateDiskParams(self.instance, disks, self.cfg) |
|
3417 |
new_disks = [d.children[0] for d in disks] |
|
3413 | 3418 |
|
3414 | 3419 |
# copy over size, mode and name |
3415 | 3420 |
for parent, child in zip(old_disks, new_disks): |
... | ... | |
3424 | 3429 |
self.cfg.AddTcpUdpPort(tcp_port) |
3425 | 3430 |
|
3426 | 3431 |
# update instance structure |
3427 |
self.instance.disks = new_disks |
|
3432 |
for old_disk in old_disks: |
|
3433 |
self.cfg.RemoveInstDisk(old_disk.uuid) |
|
3428 | 3434 |
self.instance.disk_template = constants.DT_PLAIN |
3429 |
_UpdateIvNames(0, self.instance.disks) |
|
3435 |
for (idx, new_disk) in enumerate(new_disks): |
|
3436 |
self.cfg.AddInstDisk(self.instance, new_disk, idx=idx) |
|
3430 | 3437 |
self.cfg.Update(self.instance, feedback_fn) |
3431 | 3438 |
|
3432 | 3439 |
# Release locks in case removing disks takes a while |
... | ... | |
3471 | 3478 |
""" |
3472 | 3479 |
# add a new disk |
3473 | 3480 |
if self.instance.disk_template in constants.DTS_FILEBASED: |
3474 |
(file_driver, file_path) = self.instance.disks[0].logical_id |
|
3481 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
3482 |
(file_driver, file_path) = disks[0].logical_id |
|
3475 | 3483 |
file_path = os.path.dirname(file_path) |
3476 | 3484 |
else: |
3477 | 3485 |
file_driver = file_path = None |
... | ... | |
3484 | 3492 |
file_driver, idx, self.Log, self.diskparams)[0] |
3485 | 3493 |
|
3486 | 3494 |
new_disks = CreateDisks(self, self.instance, disks=[disk]) |
3495 |
self.cfg.AddInstDisk(self.instance, disk, idx) |
|
3487 | 3496 |
|
3488 | 3497 |
if self.cluster.prealloc_wipe_disks: |
3489 | 3498 |
# Wipe new disk |
... | ... | |
3577 | 3586 |
if root.dev_type in constants.DTS_DRBD: |
3578 | 3587 |
self.cfg.AddTcpUdpPort(root.logical_id[2]) |
3579 | 3588 |
|
3589 |
# Remove disk from config |
|
3590 |
self.cfg.RemoveInstDisk(root.uuid) |
|
3591 |
|
|
3580 | 3592 |
return hotmsg |
3581 | 3593 |
|
3582 | 3594 |
def _CreateNewNic(self, idx, params, private): |
... | ... | |
3677 | 3689 |
result.append(("runtime_memory", self.op.runtime_mem)) |
3678 | 3690 |
|
3679 | 3691 |
# Apply disk changes |
3680 |
_ApplyContainerMods("disk", self.instance.disks, result, self.diskmod, |
|
3692 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
3693 |
_ApplyContainerMods("disk", inst_disks, result, self.diskmod, |
|
3681 | 3694 |
self._CreateNewDisk, self._ModifyDisk, |
3682 | 3695 |
self._RemoveDisk, post_add_fn=self._PostAddDisk) |
3683 |
_UpdateIvNames(0, self.instance.disks) |
|
3684 | 3696 |
|
3685 | 3697 |
if self.op.disk_template: |
3686 | 3698 |
if __debug__: |
b/lib/cmdlib/instance_migration.py | ||
---|---|---|
485 | 485 |
""" |
486 | 486 |
self.feedback_fn("* wait until resync is done") |
487 | 487 |
all_done = False |
488 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
488 | 489 |
while not all_done: |
489 | 490 |
all_done = True |
490 | 491 |
result = self.rpc.call_drbd_wait_sync(self.all_node_uuids, |
491 |
(self.instance.disks, |
|
492 |
self.instance)) |
|
492 |
(disks, self.instance)) |
|
493 | 493 |
min_percent = 100 |
494 | 494 |
for node_uuid, nres in result.items(): |
495 | 495 |
nres.Raise("Cannot resync disks on node %s" % |
... | ... | |
510 | 510 |
self.feedback_fn("* switching node %s to secondary mode" % |
511 | 511 |
self.cfg.GetNodeName(node_uuid)) |
512 | 512 |
|
513 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
513 | 514 |
result = self.rpc.call_blockdev_close(node_uuid, self.instance.name, |
514 |
(self.instance.disks, self.instance))
|
|
515 |
(disks, self.instance)) |
|
515 | 516 |
result.Raise("Cannot change disk to secondary on node %s" % |
516 | 517 |
self.cfg.GetNodeName(node_uuid)) |
517 | 518 |
|
... | ... | |
520 | 521 |
|
521 | 522 |
""" |
522 | 523 |
self.feedback_fn("* changing into standalone mode") |
524 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
523 | 525 |
result = self.rpc.call_drbd_disconnect_net( |
524 |
self.all_node_uuids, (self.instance.disks, self.instance))
|
|
526 |
self.all_node_uuids, (disks, self.instance)) |
|
525 | 527 |
for node_uuid, nres in result.items(): |
526 | 528 |
nres.Raise("Cannot disconnect disks node %s" % |
527 | 529 |
self.cfg.GetNodeName(node_uuid)) |
... | ... | |
535 | 537 |
else: |
536 | 538 |
msg = "single-master" |
537 | 539 |
self.feedback_fn("* changing disks into %s mode" % msg) |
540 |
disks = self.cfg.GetInstanceDisks(self.instance) |
|
538 | 541 |
result = self.rpc.call_drbd_attach_net(self.all_node_uuids, |
539 |
(self.instance.disks, self.instance),
|
|
542 |
(disks, self.instance), |
|
540 | 543 |
self.instance.name, multimaster) |
541 | 544 |
for node_uuid, nres in result.items(): |
542 | 545 |
nres.Raise("Cannot change disks config on node %s" % |
... | ... | |
682 | 685 |
(src_version, dst_version)) |
683 | 686 |
|
684 | 687 |
self.feedback_fn("* checking disk consistency between source and target") |
685 |
for (idx, dev) in enumerate(self.instance.disks):
|
|
688 |
for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance)):
|
|
686 | 689 |
if not CheckDiskConsistency(self.lu, self.instance, dev, |
687 | 690 |
self.target_node_uuid, |
688 | 691 |
False): |
... | ... | |
819 | 822 |
# If the instance's disk template is `rbd' or `ext' and there was a |
820 | 823 |
# successful migration, unmap the device from the source node. |
821 | 824 |
if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT): |
822 |
disks = ExpandCheckDisks(self.instance, self.instance.disks) |
|
825 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
826 |
disks = ExpandCheckDisks(inst_disks, inst_disks) |
|
823 | 827 |
self.feedback_fn("* unmapping instance's disks from %s" % |
824 | 828 |
self.cfg.GetNodeName(self.source_node_uuid)) |
825 | 829 |
for disk in disks: |
... | ... | |
850 | 854 |
|
851 | 855 |
if self.instance.disks_active: |
852 | 856 |
self.feedback_fn("* checking disk consistency between source and target") |
853 |
for (idx, dev) in enumerate(self.instance.disks):
|
|
857 |
for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance)):
|
|
854 | 858 |
# for drbd, these are drbd over lvm |
855 | 859 |
if not CheckDiskConsistency(self.lu, self.instance, dev, |
856 | 860 |
self.target_node_uuid, False): |
b/lib/cmdlib/instance_query.py | ||
---|---|---|
243 | 243 |
|
244 | 244 |
disks = map(compat.partial(self._ComputeDiskStatus, instance, |
245 | 245 |
node_uuid2name_fn), |
246 |
instance.disks)
|
|
246 |
self.cfg.GetInstanceDisks(instance))
|
|
247 | 247 |
|
248 | 248 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
249 | 249 |
snodes_group_uuids = [nodes[snode_uuid].group |
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
224 | 224 |
|
225 | 225 |
""" |
226 | 226 |
info = GetInstanceInfoText(instance) |
227 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
227 | 228 |
if target_node_uuid is None: |
228 | 229 |
pnode_uuid = instance.primary_node |
229 | 230 |
all_node_uuids = lu.cfg.GetInstanceNodes(instance, disks=disks) |
... | ... | |
232 | 233 |
all_node_uuids = [pnode_uuid] |
233 | 234 |
|
234 | 235 |
if disks is None: |
235 |
disks = instance.disks
|
|
236 |
disks = inst_disks
|
|
236 | 237 |
|
237 | 238 |
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template) |
238 | 239 |
|
239 | 240 |
if instance.disk_template in constants.DTS_FILEBASED: |
240 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
|
241 |
if inst_disks: |
|
242 |
file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1]) |
|
243 |
else: |
|
244 |
file_storage_dir = os.path.dirname(disks[0].logical_id[1]) |
|
241 | 245 |
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir) |
242 | 246 |
|
243 | 247 |
result.Raise("Failed to create directory '%s' on" |
... | ... | |
594 | 598 |
constants.IDISK_SIZE: d.size, |
595 | 599 |
constants.IDISK_MODE: d.mode, |
596 | 600 |
constants.IDISK_SPINDLES: d.spindles, |
597 |
} for d in self.instance.disks]
|
|
601 |
} for d in self.cfg.GetInstanceDisks(self.instance)]
|
|
598 | 602 |
req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name, |
599 | 603 |
disk_template=disk_template, |
600 | 604 |
tags=list(self.instance.GetTags()), |
... | ... | |
808 | 812 |
to_skip = [] |
809 | 813 |
mods = [] # keeps track of needed changes |
810 | 814 |
|
811 |
for idx, disk in enumerate(self.instance.disks): |
|
815 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
816 |
for idx, disk in enumerate(inst_disks): |
|
812 | 817 |
try: |
813 | 818 |
changes = self.disks[idx] |
814 | 819 |
except KeyError: |
... | ... | |
836 | 841 |
# now that we have passed all asserts above, we can apply the mods |
837 | 842 |
# in a single run (to avoid partial changes) |
838 | 843 |
for idx, new_id, changes in mods: |
839 |
disk = self.instance.disks[idx]
|
|
844 |
disk = inst_disks[idx]
|
|
840 | 845 |
if new_id is not None: |
841 | 846 |
assert disk.dev_type == constants.DT_DRBD8 |
842 | 847 |
disk.logical_id = new_id |
... | ... | |
844 | 849 |
disk.Update(size=changes.get(constants.IDISK_SIZE, None), |
845 | 850 |
mode=changes.get(constants.IDISK_MODE, None), |
846 | 851 |
spindles=changes.get(constants.IDISK_SPINDLES, None)) |
852 |
self.cfg.Update(disk, feedback_fn) |
|
847 | 853 |
|
848 | 854 |
# change primary node, if needed |
849 | 855 |
if self.op.node_uuids: |
... | ... | |
862 | 868 |
|
863 | 869 |
# TODO: Release node locks before wiping, or explain why it's not possible |
864 | 870 |
if self.cfg.GetClusterInfo().prealloc_wipe_disks: |
871 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
865 | 872 |
wipedisks = [(idx, disk, 0) |
866 |
for (idx, disk) in enumerate(self.instance.disks)
|
|
873 |
for (idx, disk) in enumerate(inst_disks)
|
|
867 | 874 |
if idx not in to_skip] |
868 | 875 |
WipeOrCleanupDisks(self, self.instance, disks=wipedisks, |
869 | 876 |
cleanup=new_disks) |
... | ... | |
1020 | 1027 |
node_name = lu.cfg.GetNodeName(node_uuid) |
1021 | 1028 |
|
1022 | 1029 |
if disks is None: |
1030 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
1023 | 1031 |
disks = [(idx, disk, 0) |
1024 |
for (idx, disk) in enumerate(instance.disks)]
|
|
1032 |
for (idx, disk) in enumerate(inst_disks)]
|
|
1025 | 1033 |
|
1026 | 1034 |
logging.info("Pausing synchronization of disks of instance '%s'", |
1027 | 1035 |
instance.name) |
... | ... | |
1120 | 1128 |
raise |
1121 | 1129 |
|
1122 | 1130 |
|
1123 |
def ExpandCheckDisks(instance, disks): |
|
1131 |
def ExpandCheckDisks(instance_disks, disks):
|
|
1124 | 1132 |
"""Return the instance disks selected by the disks list |
1125 | 1133 |
|
1126 | 1134 |
@type disks: list of L{objects.Disk} or None |
... | ... | |
1130 | 1138 |
|
1131 | 1139 |
""" |
1132 | 1140 |
if disks is None: |
1133 |
return instance.disks
|
|
1141 |
return instance_disks
|
|
1134 | 1142 |
else: |
1135 |
if not set(disks).issubset(instance.disks):
|
|
1143 |
if not set(disks).issubset(instance_disks):
|
|
1136 | 1144 |
raise errors.ProgrammerError("Can only act on disks belonging to the" |
1137 | 1145 |
" target instance: expected a subset of %r," |
1138 |
" got %r" % (instance.disks, disks))
|
|
1146 |
" got %r" % (instance_disks, disks))
|
|
1139 | 1147 |
return disks |
1140 | 1148 |
|
1141 | 1149 |
|
... | ... | |
1143 | 1151 |
"""Sleep and poll for an instance's disk to sync. |
1144 | 1152 |
|
1145 | 1153 |
""" |
1146 |
if not instance.disks or disks is not None and not disks: |
|
1154 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
1155 |
if not inst_disks or disks is not None and not disks: |
|
1147 | 1156 |
return True |
1148 | 1157 |
|
1149 |
disks = ExpandCheckDisks(instance, disks)
|
|
1158 |
disks = ExpandCheckDisks(inst_disks, disks)
|
|
1150 | 1159 |
|
1151 | 1160 |
if not oneshot: |
1152 | 1161 |
lu.LogInfo("Waiting for instance %s to sync disks", instance.name) |
... | ... | |
1227 | 1236 |
if disks is None: |
1228 | 1237 |
# only mark instance disks as inactive if all disks are affected |
1229 | 1238 |
lu.cfg.MarkInstanceDisksInactive(instance.uuid) |
1230 |
disks = ExpandCheckDisks(instance, disks) |
|
1239 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
1240 |
disks = ExpandCheckDisks(inst_disks, disks) |
|
1231 | 1241 |
|
1232 | 1242 |
for disk in disks: |
1233 | 1243 |
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node): |
... | ... | |
1284 | 1294 |
# only mark instance disks as active if all disks are affected |
1285 | 1295 |
lu.cfg.MarkInstanceDisksActive(instance.uuid) |
1286 | 1296 |
|
1287 |
disks = ExpandCheckDisks(instance, disks) |
|
1297 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
1298 |
disks = ExpandCheckDisks(inst_disks, disks) |
|
1288 | 1299 |
|
1289 | 1300 |
# With the two passes mechanism we try to reduce the window of |
1290 | 1301 |
# opportunity for the race condition of switching DRBD to primary |
... | ... | |
1426 | 1437 |
raise errors.OpPrereqError("Instance's disk layout does not support" |
1427 | 1438 |
" growing", errors.ECODE_INVAL) |
1428 | 1439 |
|
1429 |
self.disk = self.instance.FindDisk(self.op.disk)
|
|
1440 |
self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
|
|
1430 | 1441 |
|
1431 | 1442 |
if self.op.absolute: |
1432 | 1443 |
self.target = self.op.amount |
... | ... | |
1539 | 1550 |
assert wipe_disks ^ (old_disk_size is None) |
1540 | 1551 |
|
1541 | 1552 |
if wipe_disks: |
1542 |
assert self.instance.disks[self.op.disk] == self.disk |
|
1553 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
1554 |
assert inst_disks[self.op.disk] == self.disk |
|
1543 | 1555 |
|
1544 | 1556 |
# Wipe newly added disk space |
1545 | 1557 |
WipeDisks(self, self.instance, |
... | ... | |
1933 | 1945 |
""" |
1934 | 1946 |
node_uuids = self.cfg.GetInstanceNodes(instance) |
1935 | 1947 |
|
1936 |
for idx, dev in enumerate(instance.disks):
|
|
1948 |
for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance)):
|
|
1937 | 1949 |
for node_uuid in node_uuids: |
1938 | 1950 |
self.lu.LogInfo("Checking disk/%d on %s", idx, |
1939 | 1951 |
self.cfg.GetNodeName(node_uuid)) |
... | ... | |
1961 | 1973 |
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based" |
1962 | 1974 |
" instances", errors.ECODE_INVAL) |
1963 | 1975 |
|
1964 |
secondary_nodes = self.cfg.GetInstanceSeconaryNodes(self.instance) |
|
1976 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
|
|
1965 | 1977 |
if len(secondary_nodes) != 1: |
1966 | 1978 |
raise errors.OpPrereqError("The instance has a strange layout," |
1967 | 1979 |
" expected one secondary but found %d" % |
... | ... | |
2190 | 2202 |
|
2191 | 2203 |
def _CheckDisksExistence(self, node_uuids): |
2192 | 2204 |
# Check disk existence |
2193 |
for idx, dev in enumerate(self.instance.disks):
|
|
2205 |
for idx, dev in enumerate(self.cfg.GetInstnaceDisks(self.instance)):
|
|
2194 | 2206 |
if idx not in self.disks: |
2195 | 2207 |
continue |
2196 | 2208 |
|
... | ... | |
2215 | 2227 |
extra_hint)) |
2216 | 2228 |
|
2217 | 2229 |
def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk): |
2218 |
for idx, dev in enumerate(self.instance.disks):
|
|
2230 |
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance)):
|
|
2219 | 2231 |
if idx not in self.disks: |
2220 | 2232 |
continue |
2221 | 2233 |
|
... | ... | |
2238 | 2250 |
""" |
2239 | 2251 |
iv_names = {} |
2240 | 2252 |
|
2241 |
disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
|
2253 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
2254 |
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg) |
|
2242 | 2255 |
for idx, dev in enumerate(disks): |
2243 | 2256 |
if idx not in self.disks: |
2244 | 2257 |
continue |
... | ... | |
2481 | 2494 |
|
2482 | 2495 |
# Step: create new storage |
2483 | 2496 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
2484 |
disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
|
2497 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
2498 |
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg) |
|
2485 | 2499 |
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, |
2486 | 2500 |
self.new_node_uuid) |
2487 | 2501 |
for idx, dev in enumerate(disks): |
... | ... | |
2501 | 2515 |
# error and the success paths |
2502 | 2516 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
2503 | 2517 |
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid |
2504 |
for _ in self.instance.disks],
|
|
2518 |
for _ in inst_disks],
|
|
2505 | 2519 |
self.instance.uuid) |
2506 | 2520 |
logging.debug("Allocated minors %r", minors) |
2507 | 2521 |
|
2508 | 2522 |
iv_names = {} |
2509 |
for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
|
|
2523 |
for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
|
|
2510 | 2524 |
self.lu.LogInfo("activating a new drbd on %s for disk/%d" % |
2511 | 2525 |
(self.cfg.GetNodeName(self.new_node_uuid), idx)) |
2512 | 2526 |
# create new devices on new_node; note that we create two IDs: |
... | ... | |
2545 | 2559 |
raise |
2546 | 2560 |
|
2547 | 2561 |
# We have new devices, shutdown the drbd on the old secondary |
2548 |
for idx, dev in enumerate(self.instance.disks):
|
|
2562 |
for idx, dev in enumerate(inst_disks):
|
|
2549 | 2563 |
self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx) |
2550 | 2564 |
msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid, |
2551 | 2565 |
(dev, self.instance)).fail_msg |
... | ... | |
2557 | 2571 |
|
2558 | 2572 |
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)") |
2559 | 2573 |
result = self.rpc.call_drbd_disconnect_net( |
2560 |
[pnode], (self.instance.disks, self.instance))[pnode]
|
|
2574 |
[pnode], (inst_disks, self.instance))[pnode]
|
|
2561 | 2575 |
|
2562 | 2576 |
msg = result.fail_msg |
2563 | 2577 |
if msg: |
... | ... | |
2571 | 2585 |
self.lu.LogInfo("Updating instance configuration") |
2572 | 2586 |
for dev, _, new_logical_id in iv_names.itervalues(): |
2573 | 2587 |
dev.logical_id = new_logical_id |
2588 |
self.cfg.Update(dev, feedback_fn) |
|
2574 | 2589 |
|
2575 | 2590 |
self.cfg.Update(self.instance, feedback_fn) |
2576 | 2591 |
|
... | ... | |
2580 | 2595 |
# and now perform the drbd attach |
2581 | 2596 |
self.lu.LogInfo("Attaching primary drbds to new secondary" |
2582 | 2597 |
" (standalone => connected)") |
2598 |
inst_disks = self.cfg.GetInstanceDisks(self.instance) |
|
2583 | 2599 |
result = self.rpc.call_drbd_attach_net([self.instance.primary_node, |
2584 | 2600 |
self.new_node_uuid], |
2585 |
(self.instance.disks, self.instance),
|
|
2601 |
(inst_disks, self.instance),
|
|
2586 | 2602 |
self.instance.name, |
2587 | 2603 |
False) |
2588 | 2604 |
for to_node, to_result in result.items(): |
b/lib/cmdlib/instance_utils.py | ||
---|---|---|
166 | 166 |
bep = cluster.FillBE(instance) |
167 | 167 |
hvp = cluster.FillHV(instance) |
168 | 168 |
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance) |
169 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
169 | 170 |
args = { |
170 | 171 |
"name": instance.name, |
171 | 172 |
"primary_node_name": lu.cfg.GetNodeName(instance.primary_node), |
... | ... | |
178 | 179 |
"nics": NICListToTuple(lu, instance.nics), |
179 | 180 |
"disk_template": instance.disk_template, |
180 | 181 |
"disks": [(disk.name, disk.uuid, disk.size, disk.mode) |
181 |
for disk in instance.disks],
|
|
182 |
for disk in inst_disks],
|
|
182 | 183 |
"bep": bep, |
183 | 184 |
"hvp": hvp, |
184 | 185 |
"hypervisor_name": instance.hypervisor, |
... | ... | |
235 | 236 |
raise errors.OpExecError("Can't remove instance's disks") |
236 | 237 |
feedback_fn("Warning: can't remove instance's disks") |
237 | 238 |
|
238 |
logging.info("Removing instance %s out of cluster config", instance.name) |
|
239 |
logging.info("Removing instance's disks") |
|
240 |
# Copy the 'instance.disks' list, because it changes inside 'RemoveInstDisk' |
|
241 |
for disk in list(instance.disks): |
|
242 |
lu.cfg.RemoveInstDisk(disk) |
|
239 | 243 |
|
244 |
logging.info("Removing instance %s out of cluster config", instance.name) |
|
240 | 245 |
lu.cfg.RemoveInstance(instance.uuid) |
241 | 246 |
|
242 | 247 |
assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \ |
... | ... | |
268 | 273 |
|
269 | 274 |
all_result = True |
270 | 275 |
ports_to_release = set() |
271 |
anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg) |
|
276 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
277 |
anno_disks = AnnotateDiskParams(instance, inst_disks, lu.cfg) |
|
272 | 278 |
for (idx, device) in enumerate(anno_disks): |
273 | 279 |
if target_node_uuid: |
274 | 280 |
edata = [(target_node_uuid, device)] |
... | ... | |
294 | 300 |
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template) |
295 | 301 |
|
296 | 302 |
if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]: |
297 |
if len(instance.disks) > 0:
|
|
298 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
|
|
303 |
if len(inst_disks) > 0:
|
|
304 |
file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1])
|
|
299 | 305 |
else: |
300 | 306 |
if instance.disk_template == constants.DT_SHARED_FILE: |
301 | 307 |
file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(), |
b/lib/config.py | ||
---|---|---|
347 | 347 |
raise errors.ReservationError("Disk %s already attached to instance %s" |
348 | 348 |
% (disk.uuid, instance.name)) |
349 | 349 |
|
350 |
# Update disk |
|
350 | 351 |
disk.instance = instance.uuid |
351 | 352 |
disk.serial_no += 1 |
352 | 353 |
disk.mtime = time.time() |
... | ... | |
463 | 464 |
""" |
464 | 465 |
return self._UnlockedGetDiskInfo(disk_uuid) |
465 | 466 |
|
466 |
# pylint: disable=R0201 |
|
467 | 467 |
def _UnlockedGetInstanceNodes(self, instance, disks=None): |
468 | 468 |
"""Get all disk-releated nodes for an instance. |
469 | 469 |
|
... | ... | |
471 | 471 |
|
472 | 472 |
""" |
473 | 473 |
all_nodes = [instance.primary_node] |
474 |
inst_disks = instance.disks
|
|
474 |
inst_disks = self._UnlockedGetInstanceDisks(instance)
|
|
475 | 475 |
if disks is not None: |
476 | 476 |
inst_disks.extend(disks) |
477 | 477 |
for disk in inst_disks: |
... | ... | |
546 | 546 |
ret = None |
547 | 547 |
|
548 | 548 |
node_uuid = instance.primary_node |
549 |
devs = instance.disks
|
|
549 |
devs = self._UnlockedGetInstanceDisks(instance)
|
|
550 | 550 |
_MapLVsByNode(lvmap, devs, node_uuid) |
551 | 551 |
return ret |
552 | 552 |
|
... | ... | |
769 | 769 |
lvnames.update(lv_list) |
770 | 770 |
return lvnames |
771 | 771 |
|
772 |
def _AllDisks(self): |
|
773 |
"""Compute the list of all Disks (recursively, including children). |
|
774 |
|
|
775 |
""" |
|
776 |
def DiskAndAllChildren(disk): |
|
777 |
"""Returns a list containing the given disk and all of his children. |
|
778 |
|
|
779 |
""" |
|
780 |
disks = [disk] |
|
781 |
if disk.children: |
|
782 |
for child_disk in disk.children: |
|
783 |
disks.extend(DiskAndAllChildren(child_disk)) |
|
784 |
return disks |
|
785 |
|
|
786 |
disks = [] |
|
787 |
for instance in self._config_data.instances.values(): |
|
788 |
for disk in instance.disks: |
|
789 |
disks.extend(DiskAndAllChildren(disk)) |
|
790 |
return disks |
|
791 |
|
|
792 | 772 |
def _AllNICs(self): |
793 | 773 |
"""Compute the list of all NICs. |
794 | 774 |
|
... | ... | |
871 | 851 |
helper(child, result) |
872 | 852 |
|
873 | 853 |
result = [] |
874 |
for instance in self._config_data.instances.values(): |
|
875 |
for disk in instance.disks: |
|
876 |
helper(disk, result) |
|
877 |
|
|
878 |
return result |
|
879 |
|
|
880 |
def _CheckDiskIDs(self, disk, l_ids): |
|
881 |
"""Compute duplicate disk IDs |
|
882 |
|
|
883 |
@type disk: L{objects.Disk} |
|
884 |
@param disk: the disk at which to start searching |
|
885 |
@type l_ids: list |
|
886 |
@param l_ids: list of current logical ids |
|
887 |
@rtype: list |
|
888 |
@return: a list of error messages |
|
889 |
|
|
890 |
""" |
|
891 |
result = [] |
|
892 |
if disk.logical_id is not None: |
|
893 |
if disk.logical_id in l_ids: |
|
894 |
result.append("duplicate logical id %s" % str(disk.logical_id)) |
|
895 |
else: |
|
896 |
l_ids.append(disk.logical_id) |
|
854 |
for disk in self._config_data.disks.values(): |
|
855 |
helper(disk, result) |
|
897 | 856 |
|
898 |
if disk.children: |
|
899 |
for child in disk.children: |
|
900 |
result.extend(self._CheckDiskIDs(child, l_ids)) |
|
901 | 857 |
return result |
902 | 858 |
|
903 | 859 |
def _UnlockedVerifyConfig(self): |
... | ... | |
914 | 870 |
ports = {} |
915 | 871 |
data = self._config_data |
916 | 872 |
cluster = data.cluster |
917 |
seen_lids = [] |
|
918 | 873 |
|
919 | 874 |
# global cluster checks |
920 | 875 |
if not cluster.enabled_hypervisors: |
... | ... | |
1005 | 960 |
) |
1006 | 961 |
) |
1007 | 962 |
|
963 |
# per-disk checks |
|
964 |
for disk_uuid in data.disks: |
|
965 |
disk = data.disks[disk_uuid] |
|
966 |
if disk.uuid != disk_uuid: |
|
967 |
result.append("disk '%s' is indexed by wrong UUID '%s'" % |
|
968 |
(disk.name, disk_uuid)) |
|
969 |
if not disk.instance: |
|
970 |
result.append("Disk '%s' is not attached to any instance" % disk.uuid) |
|
971 |
if disk.instance not in data.instances: |
|
972 |
result.append("disk '%s' is attached to invalid instance '%s'" % |
|
973 |
(disk.uuid, disk.instance)) |
|
974 |
result.extend(["disk '%s' error: %s" % |
|
975 |
(disk.uuid, msg) for msg in disk.Verify()]) |
|
976 |
|
|
1008 | 977 |
# per-instance checks |
1009 | 978 |
for instance_uuid in data.instances: |
1010 | 979 |
instance = data.instances[instance_uuid] |
... | ... | |
1041 | 1010 |
_helper("instance %s" % instance.name, "beparams", |
1042 | 1011 |
cluster.FillBE(instance), constants.BES_PARAMETER_TYPES) |
1043 | 1012 |
|
1013 |
# check that disks exists |
|
1014 |
for disk_uuid in instance.disks: |
|
1015 |
if disk_uuid not in data.disks: |
|
1016 |
result.append("Instance '%s' has invalid disk '%s'" % |
|
1017 |
(instance.name, disk_uuid)) |
|
1018 |
|
|
1019 |
inst_disks = self._UnlockedGetInstanceDisks(instance) |
|
1044 | 1020 |
# gather the drbd ports for duplicate checks |
1045 |
for (idx, dsk) in enumerate(instance.disks):
|
|
1021 |
for (idx, dsk) in enumerate(inst_disks):
|
|
1046 | 1022 |
if dsk.dev_type in constants.DTS_DRBD: |
1047 | 1023 |
tcp_port = dsk.logical_id[2] |
1048 | 1024 |
if tcp_port not in ports: |
... | ... | |
1055 | 1031 |
ports[net_port] = [] |
1056 | 1032 |
ports[net_port].append((instance.name, "network port")) |
1057 | 1033 |
|
1058 |
# instance disk verify |
|
1059 |
for idx, disk in enumerate(instance.disks): |
|
1060 |
result.extend(["instance '%s' disk %d error: %s" % |
|
1061 |
(instance.name, idx, msg) for msg in disk.Verify()]) |
|
1062 |
result.extend(self._CheckDiskIDs(disk, seen_lids)) |
|
1063 |
|
|
1064 |
wrong_names = _CheckInstanceDiskIvNames(instance.disks) |
|
1034 |
wrong_names = _CheckInstanceDiskIvNames(inst_disks) |
|
1065 | 1035 |
if wrong_names: |
1066 | 1036 |
tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" % |
1067 | 1037 |
(idx, exp_name, actual_name)) |
... | ... | |
1280 | 1250 |
duplicates = [] |
1281 | 1251 |
my_dict = dict((node_uuid, {}) for node_uuid in self._config_data.nodes) |
1282 | 1252 |
for instance in self._config_data.instances.itervalues(): |
1283 |
for disk in instance.disks:
|
|
1253 |
for disk in self._UnlockedGetInstanceDisks(instance):
|
|
1284 | 1254 |
duplicates.extend(_AppendUsedMinors(self._UnlockedGetNodeName, |
1285 | 1255 |
instance, disk, my_dict)) |
1286 | 1256 |
for (node_uuid, minor), inst_uuid in self._temporary_drbds.iteritems(): |
... | ... | |
1901 | 1871 |
inst = self._config_data.instances[inst_uuid] |
1902 | 1872 |
inst.name = new_name |
1903 | 1873 |
|
1904 |
for (_, disk) in enumerate(inst.disks): |
|
1874 |
inst_disks = self._UnlockedGetInstanceDisks(inst) |
|
1875 |
for (_, disk) in enumerate(inst_disks): |
|
1905 | 1876 |
if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: |
1906 | 1877 |
# rename the file paths in logical and physical id |
1907 | 1878 |
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) |
... | ... | |
2647 | 2618 |
self._config_data.nodes.values() + |
2648 | 2619 |
self._config_data.nodegroups.values() + |
2649 | 2620 |
self._config_data.networks.values() + |
2650 |
self._AllDisks() +
|
|
2621 |
self._config_data.disks.values() +
|
|
2651 | 2622 |
self._AllNICs() + |
2652 | 2623 |
[self._config_data.cluster]) |
2653 | 2624 |
|
b/lib/masterd/iallocator.py | ||
---|---|---|
261 | 261 |
raise errors.OpPrereqError("Instance has not exactly one secondary node", |
262 | 262 |
errors.ECODE_STATE) |
263 | 263 |
|
264 |
disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks] |
|
264 |
inst_disks = cfg.GetInstanceDisks(instance) |
|
265 |
disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in inst_disks] |
|
265 | 266 |
disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes) |
266 | 267 |
|
267 | 268 |
return { |
... | ... | |
732 | 733 |
if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: |
733 | 734 |
nic_dict["bridge"] = filled_params[constants.NIC_LINK] |
734 | 735 |
nic_data.append(nic_dict) |
736 |
inst_disks = cfg.GetInstanceDisks(iinfo) |
|
735 | 737 |
pir = { |
736 | 738 |
"tags": list(iinfo.GetTags()), |
737 | 739 |
"admin_state": iinfo.admin_state, |
... | ... | |
746 | 748 |
"disks": [{constants.IDISK_SIZE: dsk.size, |
747 | 749 |
constants.IDISK_MODE: dsk.mode, |
748 | 750 |
constants.IDISK_SPINDLES: dsk.spindles} |
749 |
for dsk in iinfo.disks],
|
|
751 |
for dsk in inst_disks],
|
|
750 | 752 |
"disk_template": iinfo.disk_template, |
751 | 753 |
"disks_active": iinfo.disks_active, |
752 | 754 |
"hypervisor": iinfo.hypervisor, |
b/lib/masterd/instance.py | ||
---|---|---|
1166 | 1166 |
src_node = instance.primary_node |
1167 | 1167 |
src_node_name = self._lu.cfg.GetNodeName(src_node) |
1168 | 1168 |
|
1169 |
for idx, disk in enumerate(instance.disks): |
|
1169 |
inst_disks = self._lu.cfg.GetInstanceDisks(instance) |
|
1170 |
for idx, disk in enumerate(inst_disks): |
|
1170 | 1171 |
self._feedback_fn("Creating a snapshot of disk/%s on node %s" % |
1171 | 1172 |
(idx, src_node_name)) |
1172 | 1173 |
|
... | ... | |
1287 | 1288 |
|
1288 | 1289 |
""" |
1289 | 1290 |
instance = self._instance |
1291 |
inst_disks = self._lu.cfg.GetInstanceDisks(instance) |
|
1290 | 1292 |
|
1291 | 1293 |
assert len(disk_info) == len(instance.disks) |
1292 | 1294 |
|
... | ... | |
1294 | 1296 |
|
1295 | 1297 |
ieloop = ImportExportLoop(self._lu) |
1296 | 1298 |
try: |
1297 |
for idx, (dev, (host, port, magic)) in enumerate(zip(instance.disks,
|
|
1299 |
for idx, (dev, (host, port, magic)) in enumerate(zip(inst_disks,
|
|
1298 | 1300 |
disk_info)): |
1299 | 1301 |
# Decide whether to use IPv6 |
1300 | 1302 |
ipv6 = netutils.IP6Address.IsValid(host) |
... | ... | |
1482 | 1484 |
len(instance.disks), pnode.primary_ip) |
1483 | 1485 |
|
1484 | 1486 |
ieloop = ImportExportLoop(lu) |
1487 |
inst_disks = lu.cfg.GetInstanceDisks(instance) |
|
1485 | 1488 |
try: |
1486 |
for idx, dev in enumerate(instance.disks):
|
|
1489 |
for idx, dev in enumerate(inst_disks):
|
|
1487 | 1490 |
magic = _GetInstDiskMagic(magic_base, instance.name, idx) |
1488 | 1491 |
|
1489 | 1492 |
# Import daemon options |
b/lib/objects.py | ||
---|---|---|
438 | 438 |
@return: boolean indicating if a disk of the given type was found or not |
439 | 439 |
|
440 | 440 |
""" |
441 |
for instance in self.instances.values(): |
|
442 |
for disk in instance.disks: |
|
443 |
if disk.IsBasedOnDiskType(dev_type): |
|
444 |
return True |
|
441 |
for disk in self.disks.values(): |
|
442 |
if disk.IsBasedOnDiskType(dev_type): |
|
443 |
return True |
|
445 | 444 |
return False |
446 | 445 |
|
447 | 446 |
def UpgradeConfig(self): |
... | ... | |
1116 | 1115 |
|
1117 | 1116 |
@type idx: int |
1118 | 1117 |
@param idx: the disk index |
1119 |
@rtype: L{Disk}
|
|
1120 |
@return: the corresponding disk |
|
1118 |
@rtype: string
|
|
1119 |
@return: the corresponding disk's uuid
|
|
1121 | 1120 |
@raise errors.OpPrereqError: when the given index is not valid |
1122 | 1121 |
|
1123 | 1122 |
""" |
... | ... | |
1144 | 1143 |
if _with_private: |
1145 | 1144 |
bo["osparams_private"] = self.osparams_private.Unprivate() |
1146 | 1145 |
|
1147 |
for attr in "nics", "disks", "disks_info":
|
|
1146 |
for attr in "nics", "disks_info": |
|
1148 | 1147 |
alist = bo.get(attr, None) |
1149 | 1148 |
if alist: |
1150 | 1149 |
nlist = outils.ContainerToDicts(alist) |
... | ... | |
1167 | 1166 |
del val["admin_up"] |
1168 | 1167 |
obj = super(Instance, cls).FromDict(val) |
1169 | 1168 |
obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC) |
1170 |
obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk) |
|
1171 | 1169 |
obj.disks_info = outils.ContainerFromDicts(obj.disks_info, list, Disk) |
1172 | 1170 |
return obj |
1173 | 1171 |
|
... | ... | |
1177 | 1175 |
""" |
1178 | 1176 |
for nic in self.nics: |
1179 | 1177 |
nic.UpgradeConfig() |
1180 |
for disk in self.disks: |
|
1181 |
disk.UpgradeConfig() |
|
1182 | 1178 |
if self.hvparams: |
1183 | 1179 |
for key in constants.HVC_GLOBALS: |
1184 | 1180 |
try: |
b/lib/rpc/node.py | ||
---|---|---|
886 | 886 |
idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams) |
887 | 887 |
if osp is not None: |
888 | 888 |
idict["osparams"].update(osp) |
889 |
idict["disks_info"] = self._DisksDictDP(node, (instance.disks, instance)) |
|
889 |
disks = self._cfg.GetInstanceDisks(instance) |
|
890 |
idict["disks_info"] = self._DisksDictDP(node, (disks, instance)) |
|
890 | 891 |
for nic in idict["nics"]: |
891 | 892 |
nic["nicparams"] = objects.FillDict( |
892 | 893 |
cluster.nicparams[constants.PP_DEFAULT], |
Also available in: Unified diff