Revision da4a52a3 lib/cmdlib/instance_storage.py
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
408 | 408 |
|
409 | 409 |
|
410 | 410 |
def GenerateDiskTemplate( |
411 |
lu, template_name, instance_name, primary_node_uuid, secondary_node_uuids,
|
|
411 |
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
|
|
412 | 412 |
disk_info, file_storage_dir, file_driver, base_index, |
413 | 413 |
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage, |
414 | 414 |
_req_shr_file_storage=opcodes.RequireSharedFileStorage): |
... | ... | |
426 | 426 |
raise errors.ProgrammerError("Wrong template configuration") |
427 | 427 |
remote_node_uuid = secondary_node_uuids[0] |
428 | 428 |
minors = lu.cfg.AllocateDRBDMinor( |
429 |
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_name)
|
|
429 |
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
|
|
430 | 430 |
|
431 | 431 |
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name, |
432 | 432 |
full_disk_params) |
... | ... | |
679 | 679 |
# requires going via the node before it's locked, requiring |
680 | 680 |
# verification later on |
681 | 681 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \ |
682 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name, primary_only=True)
|
|
682 |
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
|
|
683 | 683 |
|
684 | 684 |
elif level == locking.LEVEL_NODE: |
685 | 685 |
# If an allocator is used, then we lock all the nodes in the current |
... | ... | |
726 | 726 |
This checks that the instance is in the cluster and is not running. |
727 | 727 |
|
728 | 728 |
""" |
729 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
729 |
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
|
|
730 | 730 |
assert instance is not None, \ |
731 | 731 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
732 | 732 |
if self.op.node_uuids: |
... | ... | |
755 | 755 |
if owned_groups: |
756 | 756 |
# Node group locks are acquired only for the primary node (and only |
757 | 757 |
# when the allocator is used) |
758 |
CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
|
|
758 |
CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
|
|
759 | 759 |
primary_only=True) |
760 | 760 |
|
761 | 761 |
# if we replace nodes *and* the old primary is offline, we don't |
... | ... | |
828 | 828 |
# have changed |
829 | 829 |
(_, _, old_port, _, _, old_secret) = disk.logical_id |
830 | 830 |
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids, |
831 |
self.instance.name)
|
|
831 |
self.instance.uuid)
|
|
832 | 832 |
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port, |
833 | 833 |
new_minors[0], new_minors[1], old_secret) |
834 | 834 |
assert len(disk.logical_id) == len(new_id) |
... | ... | |
1193 | 1193 |
ignored. |
1194 | 1194 |
|
1195 | 1195 |
""" |
1196 |
lu.cfg.MarkInstanceDisksInactive(instance.name)
|
|
1196 |
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
|
|
1197 | 1197 |
all_result = True |
1198 | 1198 |
disks = ExpandCheckDisks(instance, disks) |
1199 | 1199 |
|
... | ... | |
1248 | 1248 |
""" |
1249 | 1249 |
device_info = [] |
1250 | 1250 |
disks_ok = True |
1251 |
iname = instance.name |
|
1252 | 1251 |
disks = ExpandCheckDisks(instance, disks) |
1253 | 1252 |
|
1254 | 1253 |
# With the two passes mechanism we try to reduce the window of |
... | ... | |
1262 | 1261 |
|
1263 | 1262 |
# mark instance disks as active before doing actual work, so watcher does |
1264 | 1263 |
# not try to shut them down erroneously |
1265 |
lu.cfg.MarkInstanceDisksActive(iname)
|
|
1264 |
lu.cfg.MarkInstanceDisksActive(instance.uuid)
|
|
1266 | 1265 |
|
1267 | 1266 |
# 1st pass, assemble on all nodes in secondary mode |
1268 | 1267 |
for idx, inst_disk in enumerate(disks): |
... | ... | |
1273 | 1272 |
node_disk.UnsetSize() |
1274 | 1273 |
lu.cfg.SetDiskID(node_disk, node_uuid) |
1275 | 1274 |
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance), |
1276 |
iname, False, idx) |
|
1275 |
instance.name, False, idx)
|
|
1277 | 1276 |
msg = result.fail_msg |
1278 | 1277 |
if msg: |
1279 | 1278 |
is_offline_secondary = (node_uuid in instance.secondary_nodes and |
... | ... | |
1299 | 1298 |
node_disk.UnsetSize() |
1300 | 1299 |
lu.cfg.SetDiskID(node_disk, node_uuid) |
1301 | 1300 |
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance), |
1302 |
iname, True, idx) |
|
1301 |
instance.name, True, idx)
|
|
1303 | 1302 |
msg = result.fail_msg |
1304 | 1303 |
if msg: |
1305 | 1304 |
lu.LogWarning("Could not prepare block device %s on node %s" |
... | ... | |
1319 | 1318 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
1320 | 1319 |
|
1321 | 1320 |
if not disks_ok: |
1322 |
lu.cfg.MarkInstanceDisksInactive(iname)
|
|
1321 |
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
|
|
1323 | 1322 |
|
1324 | 1323 |
return disks_ok, device_info |
1325 | 1324 |
|
... | ... | |
1389 | 1388 |
This checks that the instance is in the cluster. |
1390 | 1389 |
|
1391 | 1390 |
""" |
1392 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
1393 |
assert instance is not None, \ |
|
1391 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
|
|
1392 |
assert self.instance is not None, \
|
|
1394 | 1393 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1395 |
node_uuids = list(instance.all_nodes) |
|
1394 |
node_uuids = list(self.instance.all_nodes)
|
|
1396 | 1395 |
for node_uuid in node_uuids: |
1397 | 1396 |
CheckNodeOnline(self, node_uuid) |
1398 | 1397 |
|
1399 |
self.instance = instance |
|
1400 |
|
|
1401 |
if instance.disk_template not in constants.DTS_GROWABLE: |
|
1398 |
if self.instance.disk_template not in constants.DTS_GROWABLE: |
|
1402 | 1399 |
raise errors.OpPrereqError("Instance's disk layout does not support" |
1403 | 1400 |
" growing", errors.ECODE_INVAL) |
1404 | 1401 |
|
1405 |
self.disk = instance.FindDisk(self.op.disk) |
|
1402 |
self.disk = self.instance.FindDisk(self.op.disk)
|
|
1406 | 1403 |
|
1407 | 1404 |
if self.op.absolute: |
1408 | 1405 |
self.target = self.op.amount |
... | ... | |
1598 | 1595 |
|
1599 | 1596 |
self.needed_locks[locking.LEVEL_NODE_RES] = [] |
1600 | 1597 |
|
1601 |
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, |
|
1598 |
self.replacer = TLReplaceDisks(self, self.op.instance_uuid, |
|
1599 |
self.op.instance_name, self.op.mode, |
|
1602 | 1600 |
self.op.iallocator, self.op.remote_node_uuid, |
1603 | 1601 |
self.op.disks, self.op.early_release, |
1604 | 1602 |
self.op.ignore_ipolicy) |
... | ... | |
1615 | 1613 |
# Lock all groups used by instance optimistically; this requires going |
1616 | 1614 |
# via the node before it's locked, requiring verification later on |
1617 | 1615 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \ |
1618 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name)
|
|
1616 |
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
|
|
1619 | 1617 |
|
1620 | 1618 |
elif level == locking.LEVEL_NODE: |
1621 | 1619 |
if self.op.iallocator is not None: |
... | ... | |
1676 | 1674 |
# Verify if node group locks are still correct |
1677 | 1675 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP) |
1678 | 1676 |
if owned_groups: |
1679 |
CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
|
|
1677 |
CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
|
|
1680 | 1678 |
|
1681 | 1679 |
return LogicalUnit.CheckPrereq(self) |
1682 | 1680 |
|
... | ... | |
1702 | 1700 |
This checks that the instance is in the cluster. |
1703 | 1701 |
|
1704 | 1702 |
""" |
1705 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
1703 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
|
|
1706 | 1704 |
assert self.instance is not None, \ |
1707 | 1705 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1708 | 1706 |
CheckNodeOnline(self, self.instance.primary_node) |
... | ... | |
1719 | 1717 |
|
1720 | 1718 |
if self.op.wait_for_sync: |
1721 | 1719 |
if not WaitForSync(self, self.instance): |
1722 |
self.cfg.MarkInstanceDisksInactive(self.instance.name)
|
|
1720 |
self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
|
|
1723 | 1721 |
raise errors.OpExecError("Some disks of the instance are degraded!") |
1724 | 1722 |
|
1725 | 1723 |
return disks_info |
... | ... | |
1746 | 1744 |
This checks that the instance is in the cluster. |
1747 | 1745 |
|
1748 | 1746 |
""" |
1749 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
1747 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
|
|
1750 | 1748 |
assert self.instance is not None, \ |
1751 | 1749 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1752 | 1750 |
|
... | ... | |
1841 | 1839 |
Note: Locking is not within the scope of this class. |
1842 | 1840 |
|
1843 | 1841 |
""" |
1844 |
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node_uuid,
|
|
1845 |
disks, early_release, ignore_ipolicy): |
|
1842 |
def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
|
|
1843 |
remote_node_uuid, disks, early_release, ignore_ipolicy):
|
|
1846 | 1844 |
"""Initializes this class. |
1847 | 1845 |
|
1848 | 1846 |
""" |
1849 | 1847 |
Tasklet.__init__(self, lu) |
1850 | 1848 |
|
1851 | 1849 |
# Parameters |
1850 |
self.instance_uuid = instance_uuid |
|
1852 | 1851 |
self.instance_name = instance_name |
1853 | 1852 |
self.mode = mode |
1854 | 1853 |
self.iallocator_name = iallocator_name |
... | ... | |
1866 | 1865 |
self.node_secondary_ip = None |
1867 | 1866 |
|
1868 | 1867 |
@staticmethod |
1869 |
def _RunAllocator(lu, iallocator_name, instance_name,
|
|
1868 |
def _RunAllocator(lu, iallocator_name, instance_uuid,
|
|
1870 | 1869 |
relocate_from_node_uuids): |
1871 | 1870 |
"""Compute a new secondary node using an IAllocator. |
1872 | 1871 |
|
1873 | 1872 |
""" |
1874 | 1873 |
req = iallocator.IAReqRelocate( |
1875 |
name=instance_name,
|
|
1874 |
inst_uuid=instance_uuid,
|
|
1876 | 1875 |
relocate_from_node_uuids=list(relocate_from_node_uuids)) |
1877 | 1876 |
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req) |
1878 | 1877 |
|
... | ... | |
1891 | 1890 |
remote_node_name, errors.ECODE_NOENT) |
1892 | 1891 |
|
1893 | 1892 |
lu.LogInfo("Selected new secondary for instance '%s': %s", |
1894 |
instance_name, remote_node_name)
|
|
1893 |
instance_uuid, remote_node_name)
|
|
1895 | 1894 |
|
1896 | 1895 |
return remote_node.uuid |
1897 | 1896 |
|
... | ... | |
1932 | 1931 |
This checks that the instance is in the cluster. |
1933 | 1932 |
|
1934 | 1933 |
""" |
1935 |
self.instance = self.cfg.GetInstanceInfo(self.instance_name)
|
|
1934 |
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
|
|
1936 | 1935 |
assert self.instance is not None, \ |
1937 | 1936 |
"Cannot retrieve locked instance %s" % self.instance_name |
1938 | 1937 |
|
... | ... | |
1952 | 1951 |
remote_node_uuid = self.remote_node_uuid |
1953 | 1952 |
else: |
1954 | 1953 |
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name, |
1955 |
self.instance.name,
|
|
1954 |
self.instance.uuid,
|
|
1956 | 1955 |
self.instance.secondary_nodes) |
1957 | 1956 |
|
1958 | 1957 |
if remote_node_uuid is None: |
... | ... | |
2473 | 2472 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
2474 | 2473 |
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid |
2475 | 2474 |
for _ in self.instance.disks], |
2476 |
self.instance.name)
|
|
2475 |
self.instance.uuid)
|
|
2477 | 2476 |
logging.debug("Allocated minors %r", minors) |
2478 | 2477 |
|
2479 | 2478 |
iv_names = {} |
... | ... | |
2512 | 2511 |
GetInstanceInfoText(self.instance), False, |
2513 | 2512 |
excl_stor) |
2514 | 2513 |
except errors.GenericError: |
2515 |
self.cfg.ReleaseDRBDMinors(self.instance.name)
|
|
2514 |
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
|
|
2516 | 2515 |
raise |
2517 | 2516 |
|
2518 | 2517 |
# We have new devices, shutdown the drbd on the old secondary |
... | ... | |
2534 | 2533 |
msg = result.fail_msg |
2535 | 2534 |
if msg: |
2536 | 2535 |
# detaches didn't succeed (unlikely) |
2537 |
self.cfg.ReleaseDRBDMinors(self.instance.name)
|
|
2536 |
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
|
|
2538 | 2537 |
raise errors.OpExecError("Can't detach the disks from the network on" |
2539 | 2538 |
" old node: %s" % (msg,)) |
2540 | 2539 |
|
Also available in: Unified diff