Revision 5279c6a6 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
6621 | 6621 |
disk_index)), |
6622 | 6622 |
mode=disk["mode"]) |
6623 | 6623 |
disks.append(disk_dev) |
6624 |
elif template_name == constants.DT_BLOCK: |
|
6625 |
if len(secondary_nodes) != 0: |
|
6626 |
raise errors.ProgrammerError("Wrong template configuration") |
|
6627 |
|
|
6628 |
for idx, disk in enumerate(disk_info): |
|
6629 |
disk_index = idx + base_index |
|
6630 |
disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"], |
|
6631 |
logical_id=(constants.BLOCKDEV_DRIVER_MANUAL, |
|
6632 |
disk["adopt"]), |
|
6633 |
iv_name="disk/%d" % disk_index, |
|
6634 |
mode=disk["mode"]) |
|
6635 |
disks.append(disk_dev) |
|
6636 |
|
|
6624 | 6637 |
else: |
6625 | 6638 |
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) |
6626 | 6639 |
return disks |
... | ... | |
6847 | 6860 |
constants.DT_DRBD8: sum(d["size"] + 128 for d in disks), |
6848 | 6861 |
constants.DT_FILE: None, |
6849 | 6862 |
constants.DT_SHARED_FILE: 0, |
6863 |
constants.DT_BLOCK: 0, |
|
6850 | 6864 |
} |
6851 | 6865 |
|
6852 | 6866 |
if disk_template not in req_size_dict: |
... | ... | |
6982 | 6996 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6983 | 6997 |
raise errors.OpPrereqError("Disk adoption not allowed for" |
6984 | 6998 |
" instance import", errors.ECODE_INVAL) |
6999 |
else: |
|
7000 |
if self.op.disk_template in constants.DTS_MUST_ADOPT: |
|
7001 |
raise errors.OpPrereqError("Disk template %s requires disk adoption," |
|
7002 |
" but no 'adopt' parameter given" % |
|
7003 |
self.op.disk_template, |
|
7004 |
errors.ECODE_INVAL) |
|
6985 | 7005 |
|
6986 | 7006 |
self.adopt_disks = has_adopt |
6987 | 7007 |
|
... | ... | |
7584 | 7604 |
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) |
7585 | 7605 |
_CheckNodesFreeDiskPerVG(self, nodenames, req_sizes) |
7586 | 7606 |
|
7587 |
else: # instead, we must check the adoption data
|
|
7607 |
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
|
|
7588 | 7608 |
all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks]) |
7589 | 7609 |
if len(all_lvs) != len(self.disks): |
7590 | 7610 |
raise errors.OpPrereqError("Duplicate volume names given for adoption", |
... | ... | |
7620 | 7640 |
for dsk in self.disks: |
7621 | 7641 |
dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0])) |
7622 | 7642 |
|
7643 |
elif self.op.disk_template == constants.DT_BLOCK: |
|
7644 |
# Normalize and de-duplicate device paths |
|
7645 |
all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks]) |
|
7646 |
if len(all_disks) != len(self.disks): |
|
7647 |
raise errors.OpPrereqError("Duplicate disk names given for adoption", |
|
7648 |
errors.ECODE_INVAL) |
|
7649 |
baddisks = [d for d in all_disks |
|
7650 |
if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] |
|
7651 |
if baddisks: |
|
7652 |
raise errors.OpPrereqError("Device node(s) %s lie outside %s and" |
|
7653 |
" cannot be adopted" % |
|
7654 |
(", ".join(baddisks), |
|
7655 |
constants.ADOPTABLE_BLOCKDEV_ROOT), |
|
7656 |
errors.ECODE_INVAL) |
|
7657 |
|
|
7658 |
node_disks = self.rpc.call_bdev_sizes([pnode.name], |
|
7659 |
list(all_disks))[pnode.name] |
|
7660 |
node_disks.Raise("Cannot get block device information from node %s" % |
|
7661 |
pnode.name) |
|
7662 |
node_disks = node_disks.payload |
|
7663 |
delta = all_disks.difference(node_disks.keys()) |
|
7664 |
if delta: |
|
7665 |
raise errors.OpPrereqError("Missing block device(s): %s" % |
|
7666 |
utils.CommaJoin(delta), |
|
7667 |
errors.ECODE_INVAL) |
|
7668 |
for dsk in self.disks: |
|
7669 |
dsk["size"] = int(float(node_disks[dsk["adopt"]])) |
|
7670 |
|
|
7623 | 7671 |
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) |
7624 | 7672 |
|
7625 | 7673 |
_CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant) |
... | ... | |
7691 | 7739 |
) |
7692 | 7740 |
|
7693 | 7741 |
if self.adopt_disks: |
7694 |
# rename LVs to the newly-generated names; we need to construct |
|
7695 |
# 'fake' LV disks with the old data, plus the new unique_id |
|
7696 |
tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] |
|
7697 |
rename_to = [] |
|
7698 |
for t_dsk, a_dsk in zip (tmp_disks, self.disks): |
|
7699 |
rename_to.append(t_dsk.logical_id) |
|
7700 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) |
|
7701 |
self.cfg.SetDiskID(t_dsk, pnode_name) |
|
7702 |
result = self.rpc.call_blockdev_rename(pnode_name, |
|
7703 |
zip(tmp_disks, rename_to)) |
|
7704 |
result.Raise("Failed to rename adoped LVs") |
|
7742 |
if self.op.disk_template == constants.DT_PLAIN: |
|
7743 |
# rename LVs to the newly-generated names; we need to construct |
|
7744 |
# 'fake' LV disks with the old data, plus the new unique_id |
|
7745 |
tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] |
|
7746 |
rename_to = [] |
|
7747 |
for t_dsk, a_dsk in zip (tmp_disks, self.disks): |
|
7748 |
rename_to.append(t_dsk.logical_id) |
|
7749 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) |
|
7750 |
self.cfg.SetDiskID(t_dsk, pnode_name) |
|
7751 |
result = self.rpc.call_blockdev_rename(pnode_name, |
|
7752 |
zip(tmp_disks, rename_to)) |
|
7753 |
result.Raise("Failed to rename adoped LVs") |
|
7705 | 7754 |
else: |
7706 | 7755 |
feedback_fn("* creating instance disks...") |
7707 | 7756 |
try: |
Also available in: Unified diff