Revision 1c3231aa lib/cmdlib/instance_storage.py
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
38 | 38 |
from ganeti import rpc |
39 | 39 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet |
40 | 40 |
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \ |
41 |
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeName, \ |
|
41 |
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
|
|
42 | 42 |
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \ |
43 |
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks |
|
43 |
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes
|
|
44 | 44 |
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \ |
45 | 45 |
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \ |
46 | 46 |
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy |
... | ... | |
65 | 65 |
} |
66 | 66 |
|
67 | 67 |
|
68 |
def CreateSingleBlockDev(lu, node, instance, device, info, force_open, |
|
68 |
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
|
|
69 | 69 |
excl_stor): |
70 | 70 |
"""Create a single block device on a given node. |
71 | 71 |
|
... | ... | |
73 | 73 |
created in advance. |
74 | 74 |
|
75 | 75 |
@param lu: the lu on whose behalf we execute |
76 |
@param node: the node on which to create the device |
|
76 |
@param node_uuid: the node on which to create the device
|
|
77 | 77 |
@type instance: L{objects.Instance} |
78 | 78 |
@param instance: the instance which owns the device |
79 | 79 |
@type device: L{objects.Disk} |
... | ... | |
89 | 89 |
@param excl_stor: Whether exclusive_storage is active for the node |
90 | 90 |
|
91 | 91 |
""" |
92 |
lu.cfg.SetDiskID(device, node) |
|
93 |
result = lu.rpc.call_blockdev_create(node, device, device.size, |
|
92 |
lu.cfg.SetDiskID(device, node_uuid)
|
|
93 |
result = lu.rpc.call_blockdev_create(node_uuid, device, device.size,
|
|
94 | 94 |
instance.name, force_open, info, |
95 | 95 |
excl_stor) |
96 | 96 |
result.Raise("Can't create block device %s on" |
97 |
" node %s for instance %s" % (device, node, instance.name)) |
|
97 |
" node %s for instance %s" % (device, |
|
98 |
lu.cfg.GetNodeName(node_uuid), |
|
99 |
instance.name)) |
|
98 | 100 |
if device.physical_id is None: |
99 | 101 |
device.physical_id = result.payload |
100 | 102 |
|
101 | 103 |
|
102 |
def _CreateBlockDevInner(lu, node, instance, device, force_create, |
|
104 |
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
|
|
103 | 105 |
info, force_open, excl_stor): |
104 | 106 |
"""Create a tree of block devices on a given node. |
105 | 107 |
|
... | ... | |
111 | 113 |
@attention: The device has to be annotated already. |
112 | 114 |
|
113 | 115 |
@param lu: the lu on whose behalf we execute |
114 |
@param node: the node on which to create the device |
|
116 |
@param node_uuid: the node on which to create the device
|
|
115 | 117 |
@type instance: L{objects.Instance} |
116 | 118 |
@param instance: the instance which owns the device |
117 | 119 |
@type device: L{objects.Disk} |
... | ... | |
139 | 141 |
|
140 | 142 |
if device.children: |
141 | 143 |
for child in device.children: |
142 |
devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
|
|
143 |
info, force_open, excl_stor) |
|
144 |
devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
|
|
145 |
force_create, info, force_open, excl_stor)
|
|
144 | 146 |
created_devices.extend(devs) |
145 | 147 |
|
146 | 148 |
if not force_create: |
147 | 149 |
return created_devices |
148 | 150 |
|
149 |
CreateSingleBlockDev(lu, node, instance, device, info, force_open, |
|
151 |
CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
|
|
150 | 152 |
excl_stor) |
151 | 153 |
# The device has been completely created, so there is no point in keeping |
152 | 154 |
# its subdevices in the list. We just add the device itself instead. |
153 |
created_devices = [(node, device)] |
|
155 |
created_devices = [(node_uuid, device)]
|
|
154 | 156 |
return created_devices |
155 | 157 |
|
156 | 158 |
except errors.DeviceCreationError, e: |
... | ... | |
160 | 162 |
raise errors.DeviceCreationError(str(e), created_devices) |
161 | 163 |
|
162 | 164 |
|
163 |
def IsExclusiveStorageEnabledNodeName(cfg, nodename):
|
|
165 |
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
|
|
164 | 166 |
"""Whether exclusive_storage is in effect for the given node. |
165 | 167 |
|
166 | 168 |
@type cfg: L{config.ConfigWriter} |
167 | 169 |
@param cfg: The cluster configuration |
168 |
@type nodename: string
|
|
169 |
@param nodename: The node
|
|
170 |
@type node_uuid: string
|
|
171 |
@param node_uuid: The node UUID
|
|
170 | 172 |
@rtype: bool |
171 | 173 |
@return: The effective value of exclusive_storage |
172 | 174 |
@raise errors.OpPrereqError: if no node exists with the given name |
173 | 175 |
|
174 | 176 |
""" |
175 |
ni = cfg.GetNodeInfo(nodename)
|
|
177 |
ni = cfg.GetNodeInfo(node_uuid)
|
|
176 | 178 |
if ni is None: |
177 |
raise errors.OpPrereqError("Invalid node name %s" % nodename,
|
|
179 |
raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
|
|
178 | 180 |
errors.ECODE_NOENT) |
179 | 181 |
return IsExclusiveStorageEnabledNode(cfg, ni) |
180 | 182 |
|
181 | 183 |
|
182 |
def _CreateBlockDev(lu, node, instance, device, force_create, info, |
|
184 |
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
|
|
183 | 185 |
force_open): |
184 | 186 |
"""Wrapper around L{_CreateBlockDevInner}. |
185 | 187 |
|
... | ... | |
187 | 189 |
|
188 | 190 |
""" |
189 | 191 |
(disk,) = AnnotateDiskParams(instance, [device], lu.cfg) |
190 |
excl_stor = IsExclusiveStorageEnabledNodeName(lu.cfg, node)
|
|
191 |
return _CreateBlockDevInner(lu, node, instance, disk, force_create, info, |
|
192 |
excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
|
|
193 |
return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
|
|
192 | 194 |
force_open, excl_stor) |
193 | 195 |
|
194 | 196 |
|
... | ... | |
203 | 205 |
@param disks_created: the result returned by L{CreateDisks} |
204 | 206 |
|
205 | 207 |
""" |
206 |
for (node, disk) in disks_created: |
|
207 |
lu.cfg.SetDiskID(disk, node) |
|
208 |
result = lu.rpc.call_blockdev_remove(node, disk) |
|
208 |
for (node_uuid, disk) in disks_created:
|
|
209 |
lu.cfg.SetDiskID(disk, node_uuid)
|
|
210 |
result = lu.rpc.call_blockdev_remove(node_uuid, disk)
|
|
209 | 211 |
result.Warn("Failed to remove newly-created disk %s on node %s" % |
210 |
(disk, node), logging.warning)
|
|
212 |
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
|
|
211 | 213 |
|
212 | 214 |
|
213 |
def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None): |
|
215 |
def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
|
|
214 | 216 |
"""Create all disks for an instance. |
215 | 217 |
|
216 | 218 |
This abstracts away some work from AddInstance. |
... | ... | |
221 | 223 |
@param instance: the instance whose disks we should create |
222 | 224 |
@type to_skip: list |
223 | 225 |
@param to_skip: list of indices to skip |
224 |
@type target_node: string |
|
225 |
@param target_node: if passed, overrides the target node for creation |
|
226 |
@type target_node_uuid: string
|
|
227 |
@param target_node_uuid: if passed, overrides the target node for creation
|
|
226 | 228 |
@type disks: list of {objects.Disk} |
227 | 229 |
@param disks: the disks to create; if not specified, all the disks of the |
228 | 230 |
instance are created |
... | ... | |
232 | 234 |
|
233 | 235 |
""" |
234 | 236 |
info = GetInstanceInfoText(instance) |
235 |
if target_node is None: |
|
236 |
pnode = instance.primary_node |
|
237 |
all_nodes = instance.all_nodes |
|
237 |
if target_node_uuid is None:
|
|
238 |
pnode_uuid = instance.primary_node
|
|
239 |
all_node_uuids = instance.all_nodes
|
|
238 | 240 |
else: |
239 |
pnode = target_node
|
|
240 |
all_nodes = [pnode]
|
|
241 |
pnode_uuid = target_node_uuid
|
|
242 |
all_node_uuids = [pnode_uuid]
|
|
241 | 243 |
|
242 | 244 |
if disks is None: |
243 | 245 |
disks = instance.disks |
244 | 246 |
|
245 | 247 |
if instance.disk_template in constants.DTS_FILEBASED: |
246 | 248 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
247 |
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) |
|
249 |
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
|
|
248 | 250 |
|
249 | 251 |
result.Raise("Failed to create directory '%s' on" |
250 |
" node %s" % (file_storage_dir, pnode)) |
|
252 |
" node %s" % (file_storage_dir, |
|
253 |
lu.cfg.GetNodeName(pnode_uuid))) |
|
251 | 254 |
|
252 | 255 |
disks_created = [] |
253 | 256 |
for idx, device in enumerate(disks): |
254 | 257 |
if to_skip and idx in to_skip: |
255 | 258 |
continue |
256 | 259 |
logging.info("Creating disk %s for instance '%s'", idx, instance.name) |
257 |
for node in all_nodes:
|
|
258 |
f_create = node == pnode
|
|
260 |
for node_uuid in all_node_uuids:
|
|
261 |
f_create = node_uuid == pnode_uuid
|
|
259 | 262 |
try: |
260 |
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create) |
|
261 |
disks_created.append((node, device)) |
|
263 |
_CreateBlockDev(lu, node_uuid, instance, device, f_create, info, |
|
264 |
f_create) |
|
265 |
disks_created.append((node_uuid, device)) |
|
262 | 266 |
except errors.DeviceCreationError, e: |
263 | 267 |
logging.warning("Creating disk %s for instance '%s' failed", |
264 | 268 |
idx, instance.name) |
... | ... | |
375 | 379 |
pass |
376 | 380 |
|
377 | 381 |
|
378 |
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
|
|
382 |
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
|
|
379 | 383 |
iv_name, p_minor, s_minor): |
380 | 384 |
"""Generate a drbd8 device complete with its children. |
381 | 385 |
|
... | ... | |
394 | 398 |
params={}) |
395 | 399 |
dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId()) |
396 | 400 |
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, |
397 |
logical_id=(primary, secondary, port,
|
|
401 |
logical_id=(primary_uuid, secondary_uuid, port,
|
|
398 | 402 |
p_minor, s_minor, |
399 | 403 |
shared_secret), |
400 | 404 |
children=[dev_data, dev_meta], |
... | ... | |
404 | 408 |
|
405 | 409 |
|
406 | 410 |
def GenerateDiskTemplate( |
407 |
lu, template_name, instance_name, primary_node, secondary_nodes,
|
|
411 |
lu, template_name, instance_name, primary_node_uuid, secondary_node_uuids,
|
|
408 | 412 |
disk_info, file_storage_dir, file_driver, base_index, |
409 | 413 |
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage, |
410 | 414 |
_req_shr_file_storage=opcodes.RequireSharedFileStorage): |
... | ... | |
418 | 422 |
if template_name == constants.DT_DISKLESS: |
419 | 423 |
pass |
420 | 424 |
elif template_name == constants.DT_DRBD8: |
421 |
if len(secondary_nodes) != 1: |
|
425 |
if len(secondary_node_uuids) != 1:
|
|
422 | 426 |
raise errors.ProgrammerError("Wrong template configuration") |
423 |
remote_node = secondary_nodes[0]
|
|
427 |
remote_node_uuid = secondary_node_uuids[0]
|
|
424 | 428 |
minors = lu.cfg.AllocateDRBDMinor( |
425 |
[primary_node, remote_node] * len(disk_info), instance_name)
|
|
429 |
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_name)
|
|
426 | 430 |
|
427 | 431 |
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name, |
428 | 432 |
full_disk_params) |
... | ... | |
437 | 441 |
disk_index = idx + base_index |
438 | 442 |
data_vg = disk.get(constants.IDISK_VG, vgname) |
439 | 443 |
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg) |
440 |
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
|
|
444 |
disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
|
|
441 | 445 |
disk[constants.IDISK_SIZE], |
442 | 446 |
[data_vg, meta_vg], |
443 | 447 |
names[idx * 2:idx * 2 + 2], |
... | ... | |
447 | 451 |
disk_dev.name = disk.get(constants.IDISK_NAME, None) |
448 | 452 |
disks.append(disk_dev) |
449 | 453 |
else: |
450 |
if secondary_nodes: |
|
454 |
if secondary_node_uuids:
|
|
451 | 455 |
raise errors.ProgrammerError("Wrong template configuration") |
452 | 456 |
|
453 | 457 |
if template_name == constants.DT_FILE: |
... | ... | |
619 | 623 |
" %s" % (self.op.iallocator, ial.info), |
620 | 624 |
errors.ECODE_NORES) |
621 | 625 |
|
622 |
self.op.nodes = ial.result
|
|
626 |
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
|
|
623 | 627 |
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
624 | 628 |
self.op.instance_name, self.op.iallocator, |
625 |
utils.CommaJoin(ial.result))
|
|
629 |
utils.CommaJoin(self.op.nodes))
|
|
626 | 630 |
|
627 | 631 |
def CheckArguments(self): |
628 | 632 |
if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]): |
... | ... | |
654 | 658 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND |
655 | 659 |
|
656 | 660 |
if self.op.nodes: |
657 |
self.op.nodes = [ExpandNodeName(self.cfg, n) for n in self.op.nodes]
|
|
658 |
self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes) |
|
661 |
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
|
|
662 |
self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
|
|
659 | 663 |
else: |
660 | 664 |
self.needed_locks[locking.LEVEL_NODE] = [] |
661 | 665 |
if self.op.iallocator: |
... | ... | |
725 | 729 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
726 | 730 |
assert instance is not None, \ |
727 | 731 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
728 |
if self.op.nodes: |
|
729 |
if len(self.op.nodes) != len(instance.all_nodes): |
|
732 |
if self.op.node_uuids:
|
|
733 |
if len(self.op.node_uuids) != len(instance.all_nodes):
|
|
730 | 734 |
raise errors.OpPrereqError("Instance %s currently has %d nodes, but" |
731 | 735 |
" %d replacement nodes were specified" % |
732 | 736 |
(instance.name, len(instance.all_nodes), |
733 |
len(self.op.nodes)), |
|
737 |
len(self.op.node_uuids)),
|
|
734 | 738 |
errors.ECODE_INVAL) |
735 | 739 |
assert instance.disk_template != constants.DT_DRBD8 or \ |
736 |
len(self.op.nodes) == 2 |
|
740 |
len(self.op.node_uuids) == 2
|
|
737 | 741 |
assert instance.disk_template != constants.DT_PLAIN or \ |
738 |
len(self.op.nodes) == 1 |
|
739 |
primary_node = self.op.nodes[0] |
|
742 |
len(self.op.node_uuids) == 1
|
|
743 |
primary_node = self.op.node_uuids[0]
|
|
740 | 744 |
else: |
741 | 745 |
primary_node = instance.primary_node |
742 | 746 |
if not self.op.iallocator: |
... | ... | |
757 | 761 |
# if we replace nodes *and* the old primary is offline, we don't |
758 | 762 |
# check the instance state |
759 | 763 |
old_pnode = self.cfg.GetNodeInfo(instance.primary_node) |
760 |
if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline): |
|
764 |
if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
|
|
761 | 765 |
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, |
762 | 766 |
msg="cannot recreate disks") |
763 | 767 |
|
... | ... | |
771 | 775 |
raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx, |
772 | 776 |
errors.ECODE_INVAL) |
773 | 777 |
|
774 |
if ((self.op.nodes or self.op.iallocator) and |
|
778 |
if ((self.op.node_uuids or self.op.iallocator) and
|
|
775 | 779 |
sorted(self.disks.keys()) != range(len(instance.disks))): |
776 | 780 |
raise errors.OpPrereqError("Can't recreate disks partially and" |
777 | 781 |
" change the nodes at the same time", |
... | ... | |
782 | 786 |
if self.op.iallocator: |
783 | 787 |
self._RunAllocator() |
784 | 788 |
# Release unneeded node and node resource locks |
785 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes) |
|
786 |
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes) |
|
789 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
|
|
790 |
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
|
|
787 | 791 |
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) |
788 | 792 |
|
789 | 793 |
assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) |
790 | 794 |
|
791 |
if self.op.nodes: |
|
792 |
nodes = self.op.nodes
|
|
795 |
if self.op.node_uuids:
|
|
796 |
node_uuids = self.op.node_uuids
|
|
793 | 797 |
else: |
794 |
nodes = instance.all_nodes |
|
798 |
node_uuids = instance.all_nodes
|
|
795 | 799 |
excl_stor = compat.any( |
796 |
rpc.GetExclusiveStorageForNodeNames(self.cfg, nodes).values()
|
|
800 |
rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
|
|
797 | 801 |
) |
798 | 802 |
for new_params in self.disks.values(): |
799 | 803 |
CheckSpindlesExclusiveStorage(new_params, excl_stor, False) |
... | ... | |
819 | 823 |
continue |
820 | 824 |
|
821 | 825 |
# update secondaries for disks, if needed |
822 |
if self.op.nodes and disk.dev_type == constants.LD_DRBD8: |
|
826 |
if self.op.node_uuids and disk.dev_type == constants.LD_DRBD8:
|
|
823 | 827 |
# need to update the nodes and minors |
824 |
assert len(self.op.nodes) == 2 |
|
828 |
assert len(self.op.node_uuids) == 2
|
|
825 | 829 |
assert len(disk.logical_id) == 6 # otherwise disk internals |
826 | 830 |
# have changed |
827 | 831 |
(_, _, old_port, _, _, old_secret) = disk.logical_id |
828 |
new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name) |
|
829 |
new_id = (self.op.nodes[0], self.op.nodes[1], old_port, |
|
832 |
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids, |
|
833 |
instance.name) |
|
834 |
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port, |
|
830 | 835 |
new_minors[0], new_minors[1], old_secret) |
831 | 836 |
assert len(disk.logical_id) == len(new_id) |
832 | 837 |
else: |
... | ... | |
847 | 852 |
spindles=changes.get(constants.IDISK_SPINDLES, None)) |
848 | 853 |
|
849 | 854 |
# change primary node, if needed |
850 |
if self.op.nodes: |
|
851 |
instance.primary_node = self.op.nodes[0] |
|
855 |
if self.op.node_uuids:
|
|
856 |
instance.primary_node = self.op.node_uuids[0]
|
|
852 | 857 |
self.LogWarning("Changing the instance's nodes, you will have to" |
853 | 858 |
" remove any disks left on the older nodes manually") |
854 | 859 |
|
855 |
if self.op.nodes: |
|
860 |
if self.op.node_uuids:
|
|
856 | 861 |
self.cfg.Update(instance, feedback_fn) |
857 | 862 |
|
858 | 863 |
# All touched nodes must be locked |
... | ... | |
868 | 873 |
WipeOrCleanupDisks(self, instance, disks=wipedisks, cleanup=new_disks) |
869 | 874 |
|
870 | 875 |
|
871 |
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
|
|
876 |
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
|
|
872 | 877 |
"""Checks if nodes have enough free disk space in the specified VG. |
873 | 878 |
|
874 | 879 |
This function checks if all given nodes have the needed amount of |
... | ... | |
878 | 883 |
|
879 | 884 |
@type lu: C{LogicalUnit} |
880 | 885 |
@param lu: a logical unit from which we get configuration data |
881 |
@type nodenames: C{list}
|
|
882 |
@param nodenames: the list of node names to check
|
|
886 |
@type node_uuids: C{list}
|
|
887 |
@param node_uuids: the list of node UUIDs to check
|
|
883 | 888 |
@type vg: C{str} |
884 | 889 |
@param vg: the volume group to check |
885 | 890 |
@type requested: C{int} |
... | ... | |
888 | 893 |
or we cannot check the node |
889 | 894 |
|
890 | 895 |
""" |
891 |
es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
|
|
896 |
es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, node_uuids)
|
|
892 | 897 |
# FIXME: This maps everything to storage type 'lvm-vg' to maintain |
893 | 898 |
# the current functionality. Refactor to make it more flexible. |
894 | 899 |
hvname = lu.cfg.GetHypervisorType() |
895 | 900 |
hvparams = lu.cfg.GetClusterInfo().hvparams |
896 |
nodeinfo = lu.rpc.call_node_info(nodenames, [(constants.ST_LVM_VG, vg)],
|
|
901 |
nodeinfo = lu.rpc.call_node_info(node_uuids, [(constants.ST_LVM_VG, vg)],
|
|
897 | 902 |
[(hvname, hvparams[hvname])], es_flags) |
898 |
for node in nodenames: |
|
903 |
for node in node_uuids: |
|
904 |
node_name = lu.cfg.GetNodeName(node) |
|
905 |
|
|
899 | 906 |
info = nodeinfo[node] |
900 |
info.Raise("Cannot get current information from node %s" % node, |
|
907 |
info.Raise("Cannot get current information from node %s" % node_name,
|
|
901 | 908 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
902 | 909 |
(_, (vg_info, ), _) = info.payload |
903 | 910 |
vg_free = vg_info.get("vg_free", None) |
904 | 911 |
if not isinstance(vg_free, int): |
905 | 912 |
raise errors.OpPrereqError("Can't compute free disk space on node" |
906 | 913 |
" %s for vg %s, result was '%s'" % |
907 |
(node, vg, vg_free), errors.ECODE_ENVIRON) |
|
914 |
(node_name, vg, vg_free), errors.ECODE_ENVIRON)
|
|
908 | 915 |
if requested > vg_free: |
909 | 916 |
raise errors.OpPrereqError("Not enough disk space on target node %s" |
910 | 917 |
" vg %s: required %d MiB, available %d MiB" % |
911 |
(node, vg, requested, vg_free), |
|
918 |
(node_name, vg, requested, vg_free),
|
|
912 | 919 |
errors.ECODE_NORES) |
913 | 920 |
|
914 | 921 |
|
915 |
def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
|
|
922 |
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
|
|
916 | 923 |
"""Checks if nodes have enough free disk space in all the VGs. |
917 | 924 |
|
918 | 925 |
This function checks if all given nodes have the needed amount of |
... | ... | |
922 | 929 |
|
923 | 930 |
@type lu: C{LogicalUnit} |
924 | 931 |
@param lu: a logical unit from which we get configuration data |
925 |
@type nodenames: C{list}
|
|
926 |
@param nodenames: the list of node names to check
|
|
932 |
@type node_uuids: C{list}
|
|
933 |
@param node_uuids: the list of node UUIDs to check
|
|
927 | 934 |
@type req_sizes: C{dict} |
928 | 935 |
@param req_sizes: the hash of vg and corresponding amount of disk in |
929 | 936 |
MiB to check for |
... | ... | |
932 | 939 |
|
933 | 940 |
""" |
934 | 941 |
for vg, req_size in req_sizes.items(): |
935 |
_CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
|
|
942 |
_CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
|
|
936 | 943 |
|
937 | 944 |
|
938 | 945 |
def _DiskSizeInBytesToMebibytes(lu, size): |
... | ... | |
977 | 984 |
start offset |
978 | 985 |
|
979 | 986 |
""" |
980 |
node = instance.primary_node |
|
987 |
node_uuid = instance.primary_node |
|
988 |
node_name = lu.cfg.GetNodeName(node_uuid) |
|
981 | 989 |
|
982 | 990 |
if disks is None: |
983 | 991 |
disks = [(idx, disk, 0) |
984 | 992 |
for (idx, disk) in enumerate(instance.disks)] |
985 | 993 |
|
986 | 994 |
for (_, device, _) in disks: |
987 |
lu.cfg.SetDiskID(device, node) |
|
995 |
lu.cfg.SetDiskID(device, node_uuid)
|
|
988 | 996 |
|
989 | 997 |
logging.info("Pausing synchronization of disks of instance '%s'", |
990 | 998 |
instance.name) |
991 |
result = lu.rpc.call_blockdev_pause_resume_sync(node, |
|
999 |
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
|
|
992 | 1000 |
(map(compat.snd, disks), |
993 | 1001 |
instance), |
994 | 1002 |
True) |
995 |
result.Raise("Failed to pause disk synchronization on node '%s'" % node) |
|
1003 |
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
|
|
996 | 1004 |
|
997 | 1005 |
for idx, success in enumerate(result.payload): |
998 | 1006 |
if not success: |
... | ... | |
1021 | 1029 |
lu.LogInfo("* Wiping disk %s%s", idx, info_text) |
1022 | 1030 |
|
1023 | 1031 |
logging.info("Wiping disk %d for instance %s on node %s using" |
1024 |
" chunk size %s", idx, instance.name, node, wipe_chunk_size) |
|
1032 |
" chunk size %s", idx, instance.name, node_name, |
|
1033 |
wipe_chunk_size) |
|
1025 | 1034 |
|
1026 | 1035 |
while offset < size: |
1027 | 1036 |
wipe_size = min(wipe_chunk_size, size - offset) |
... | ... | |
1029 | 1038 |
logging.debug("Wiping disk %d, offset %s, chunk %s", |
1030 | 1039 |
idx, offset, wipe_size) |
1031 | 1040 |
|
1032 |
result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
|
|
1033 |
wipe_size) |
|
1041 |
result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
|
|
1042 |
offset, wipe_size)
|
|
1034 | 1043 |
result.Raise("Could not wipe disk %d at offset %d for size %d" % |
1035 | 1044 |
(idx, offset, wipe_size)) |
1036 | 1045 |
|
... | ... | |
1045 | 1054 |
logging.info("Resuming synchronization of disks for instance '%s'", |
1046 | 1055 |
instance.name) |
1047 | 1056 |
|
1048 |
result = lu.rpc.call_blockdev_pause_resume_sync(node, |
|
1057 |
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
|
|
1049 | 1058 |
(map(compat.snd, disks), |
1050 | 1059 |
instance), |
1051 | 1060 |
False) |
1052 | 1061 |
|
1053 | 1062 |
if result.fail_msg: |
1054 | 1063 |
lu.LogWarning("Failed to resume disk synchronization on node '%s': %s", |
1055 |
node, result.fail_msg) |
|
1064 |
node_name, result.fail_msg)
|
|
1056 | 1065 |
else: |
1057 | 1066 |
for idx, success in enumerate(result.payload): |
1058 | 1067 |
if not success: |
... | ... | |
1113 | 1122 |
if not oneshot: |
1114 | 1123 |
lu.LogInfo("Waiting for instance %s to sync disks", instance.name) |
1115 | 1124 |
|
1116 |
node = instance.primary_node |
|
1125 |
node_uuid = instance.primary_node |
|
1126 |
node_name = lu.cfg.GetNodeName(node_uuid) |
|
1117 | 1127 |
|
1118 | 1128 |
for dev in disks: |
1119 |
lu.cfg.SetDiskID(dev, node) |
|
1129 |
lu.cfg.SetDiskID(dev, node_uuid)
|
|
1120 | 1130 |
|
1121 | 1131 |
# TODO: Convert to utils.Retry |
1122 | 1132 |
|
... | ... | |
1126 | 1136 |
max_time = 0 |
1127 | 1137 |
done = True |
1128 | 1138 |
cumul_degraded = False |
1129 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance)) |
|
1139 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
|
|
1130 | 1140 |
msg = rstats.fail_msg |
1131 | 1141 |
if msg: |
1132 |
lu.LogWarning("Can't get any data from node %s: %s", node, msg) |
|
1142 |
lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
|
|
1133 | 1143 |
retries += 1 |
1134 | 1144 |
if retries >= 10: |
1135 | 1145 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
1136 |
" aborting." % node) |
|
1146 |
" aborting." % node_name)
|
|
1137 | 1147 |
time.sleep(6) |
1138 | 1148 |
continue |
1139 | 1149 |
rstats = rstats.payload |
... | ... | |
1141 | 1151 |
for i, mstat in enumerate(rstats): |
1142 | 1152 |
if mstat is None: |
1143 | 1153 |
lu.LogWarning("Can't compute data for node %s/%s", |
1144 |
node, disks[i].iv_name) |
|
1154 |
node_name, disks[i].iv_name)
|
|
1145 | 1155 |
continue |
1146 | 1156 |
|
1147 | 1157 |
cumul_degraded = (cumul_degraded or |
... | ... | |
1191 | 1201 |
disks = ExpandCheckDisks(instance, disks) |
1192 | 1202 |
|
1193 | 1203 |
for disk in disks: |
1194 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
|
1195 |
lu.cfg.SetDiskID(top_disk, node) |
|
1196 |
result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance)) |
|
1204 |
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
|
|
1205 |
lu.cfg.SetDiskID(top_disk, node_uuid)
|
|
1206 |
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
|
|
1197 | 1207 |
msg = result.fail_msg |
1198 | 1208 |
if msg: |
1199 | 1209 |
lu.LogWarning("Could not shutdown block device %s on node %s: %s", |
1200 |
disk.iv_name, node, msg)
|
|
1201 |
if ((node == instance.primary_node and not ignore_primary) or |
|
1202 |
(node != instance.primary_node and not result.offline)): |
|
1210 |
disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
|
|
1211 |
if ((node_uuid == instance.primary_node and not ignore_primary) or
|
|
1212 |
(node_uuid != instance.primary_node and not result.offline)):
|
|
1203 | 1213 |
all_result = False |
1204 | 1214 |
return all_result |
1205 | 1215 |
|
... | ... | |
1259 | 1269 |
|
1260 | 1270 |
# 1st pass, assemble on all nodes in secondary mode |
1261 | 1271 |
for idx, inst_disk in enumerate(disks): |
1262 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
|
1272 |
for node_uuid, node_disk in inst_disk.ComputeNodeTree( |
|
1273 |
instance.primary_node): |
|
1263 | 1274 |
if ignore_size: |
1264 | 1275 |
node_disk = node_disk.Copy() |
1265 | 1276 |
node_disk.UnsetSize() |
1266 |
lu.cfg.SetDiskID(node_disk, node) |
|
1267 |
result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
|
|
1268 |
False, idx) |
|
1277 |
lu.cfg.SetDiskID(node_disk, node_uuid)
|
|
1278 |
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
|
|
1279 |
iname, False, idx)
|
|
1269 | 1280 |
msg = result.fail_msg |
1270 | 1281 |
if msg: |
1271 |
is_offline_secondary = (node in instance.secondary_nodes and |
|
1282 |
is_offline_secondary = (node_uuid in instance.secondary_nodes and
|
|
1272 | 1283 |
result.offline) |
1273 | 1284 |
lu.LogWarning("Could not prepare block device %s on node %s" |
1274 | 1285 |
" (is_primary=False, pass=1): %s", |
1275 |
inst_disk.iv_name, node, msg)
|
|
1286 |
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
|
|
1276 | 1287 |
if not (ignore_secondaries or is_offline_secondary): |
1277 | 1288 |
disks_ok = False |
1278 | 1289 |
|
... | ... | |
1282 | 1293 |
for idx, inst_disk in enumerate(disks): |
1283 | 1294 |
dev_path = None |
1284 | 1295 |
|
1285 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
|
1286 |
if node != instance.primary_node: |
|
1296 |
for node_uuid, node_disk in inst_disk.ComputeNodeTree( |
|
1297 |
instance.primary_node): |
|
1298 |
if node_uuid != instance.primary_node: |
|
1287 | 1299 |
continue |
1288 | 1300 |
if ignore_size: |
1289 | 1301 |
node_disk = node_disk.Copy() |
1290 | 1302 |
node_disk.UnsetSize() |
1291 |
lu.cfg.SetDiskID(node_disk, node) |
|
1292 |
result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
|
|
1293 |
True, idx) |
|
1303 |
lu.cfg.SetDiskID(node_disk, node_uuid)
|
|
1304 |
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
|
|
1305 |
iname, True, idx)
|
|
1294 | 1306 |
msg = result.fail_msg |
1295 | 1307 |
if msg: |
1296 | 1308 |
lu.LogWarning("Could not prepare block device %s on node %s" |
1297 | 1309 |
" (is_primary=True, pass=2): %s", |
1298 |
inst_disk.iv_name, node, msg)
|
|
1310 |
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
|
|
1299 | 1311 |
disks_ok = False |
1300 | 1312 |
else: |
1301 | 1313 |
dev_path = result.payload |
1302 | 1314 |
|
1303 |
device_info.append((instance.primary_node, inst_disk.iv_name, dev_path)) |
|
1315 |
device_info.append((lu.cfg.GetNodeName(instance.primary_node), |
|
1316 |
inst_disk.iv_name, dev_path)) |
|
1304 | 1317 |
|
1305 | 1318 |
# leave the disks configured for the primary node |
1306 | 1319 |
# this is a workaround that would be fixed better by |
... | ... | |
1382 | 1395 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
1383 | 1396 |
assert instance is not None, \ |
1384 | 1397 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1385 |
nodenames = list(instance.all_nodes)
|
|
1386 |
for node in nodenames:
|
|
1387 |
CheckNodeOnline(self, node) |
|
1398 |
node_uuids = list(instance.all_nodes)
|
|
1399 |
for node_uuid in node_uuids:
|
|
1400 |
CheckNodeOnline(self, node_uuid)
|
|
1388 | 1401 |
|
1389 | 1402 |
self.instance = instance |
1390 | 1403 |
|
... | ... | |
1411 | 1424 |
utils.FormatUnit(self.delta, "h"), |
1412 | 1425 |
errors.ECODE_INVAL) |
1413 | 1426 |
|
1414 |
self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
|
|
1427 |
self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
|
|
1415 | 1428 |
|
1416 |
def _CheckDiskSpace(self, nodenames, req_vgspace):
|
|
1429 |
def _CheckDiskSpace(self, node_uuids, req_vgspace):
|
|
1417 | 1430 |
template = self.instance.disk_template |
1418 | 1431 |
if template not in (constants.DTS_NO_FREE_SPACE_CHECK): |
1419 | 1432 |
# TODO: check the free disk space for file, when that feature will be |
1420 | 1433 |
# supported |
1421 |
nodes = map(self.cfg.GetNodeInfo, nodenames)
|
|
1434 |
nodes = map(self.cfg.GetNodeInfo, node_uuids)
|
|
1422 | 1435 |
es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n), |
1423 | 1436 |
nodes) |
1424 | 1437 |
if es_nodes: |
... | ... | |
1426 | 1439 |
# at free space; for now, let's simply abort the operation. |
1427 | 1440 |
raise errors.OpPrereqError("Cannot grow disks when exclusive_storage" |
1428 | 1441 |
" is enabled", errors.ECODE_STATE) |
1429 |
CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
|
|
1442 |
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
|
|
1430 | 1443 |
|
1431 | 1444 |
def Exec(self, feedback_fn): |
1432 | 1445 |
"""Execute disk grow. |
... | ... | |
1451 | 1464 |
utils.FormatUnit(self.target, "h"))) |
1452 | 1465 |
|
1453 | 1466 |
# First run all grow ops in dry-run mode |
1454 |
for node in instance.all_nodes: |
|
1455 |
self.cfg.SetDiskID(disk, node) |
|
1456 |
result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta, |
|
1457 |
True, True) |
|
1458 |
result.Raise("Dry-run grow request failed to node %s" % node) |
|
1467 |
for node_uuid in instance.all_nodes: |
|
1468 |
self.cfg.SetDiskID(disk, node_uuid) |
|
1469 |
result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance), |
|
1470 |
self.delta, True, True) |
|
1471 |
result.Raise("Dry-run grow request failed to node %s" % |
|
1472 |
self.cfg.GetNodeName(node_uuid)) |
|
1459 | 1473 |
|
1460 | 1474 |
if wipe_disks: |
1461 | 1475 |
# Get disk size from primary node for wiping |
... | ... | |
1481 | 1495 |
|
1482 | 1496 |
# We know that (as far as we can test) operations across different |
1483 | 1497 |
# nodes will succeed, time to run it for real on the backing storage |
1484 |
for node in instance.all_nodes: |
|
1485 |
self.cfg.SetDiskID(disk, node) |
|
1486 |
result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta, |
|
1487 |
False, True) |
|
1488 |
result.Raise("Grow request failed to node %s" % node) |
|
1498 |
for node_uuid in instance.all_nodes: |
|
1499 |
self.cfg.SetDiskID(disk, node_uuid) |
|
1500 |
result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance), |
|
1501 |
self.delta, False, True) |
|
1502 |
result.Raise("Grow request failed to node %s" % |
|
1503 |
self.cfg.GetNodeName(node_uuid)) |
|
1489 | 1504 |
|
1490 | 1505 |
# And now execute it for logical storage, on the primary node |
1491 |
node = instance.primary_node |
|
1492 |
self.cfg.SetDiskID(disk, node) |
|
1493 |
result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta, |
|
1494 |
False, False) |
|
1495 |
result.Raise("Grow request failed to node %s" % node) |
|
1506 |
node_uuid = instance.primary_node |
|
1507 |
self.cfg.SetDiskID(disk, node_uuid) |
|
1508 |
result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance), |
|
1509 |
self.delta, False, False) |
|
1510 |
result.Raise("Grow request failed to node %s" % |
|
1511 |
self.cfg.GetNodeName(node_uuid)) |
|
1496 | 1512 |
|
1497 | 1513 |
disk.RecordGrow(self.delta) |
1498 | 1514 |
self.cfg.Update(instance, feedback_fn) |
... | ... | |
1567 | 1583 |
"Conflicting options" |
1568 | 1584 |
|
1569 | 1585 |
if self.op.remote_node is not None: |
1570 |
self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node) |
|
1586 |
(self.op.remote_node_uuid, self.op.remote_node) = \ |
|
1587 |
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, |
|
1588 |
self.op.remote_node) |
|
1571 | 1589 |
|
1572 | 1590 |
# Warning: do not remove the locking of the new secondary here |
1573 | 1591 |
# unless DRBD8Dev.AddChildren is changed to work in parallel; |
1574 | 1592 |
# currently it doesn't since parallel invocations of |
1575 | 1593 |
# FindUnusedMinor will conflict |
1576 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node] |
|
1594 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
|
|
1577 | 1595 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND |
1578 | 1596 |
else: |
1579 | 1597 |
self.needed_locks[locking.LEVEL_NODE] = [] |
... | ... | |
1587 | 1605 |
self.needed_locks[locking.LEVEL_NODE_RES] = [] |
1588 | 1606 |
|
1589 | 1607 |
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, |
1590 |
self.op.iallocator, self.op.remote_node, |
|
1608 |
self.op.iallocator, self.op.remote_node_uuid,
|
|
1591 | 1609 |
self.op.disks, self.op.early_release, |
1592 | 1610 |
self.op.ignore_ipolicy) |
1593 | 1611 |
|
... | ... | |
1595 | 1613 |
|
1596 | 1614 |
def DeclareLocks(self, level): |
1597 | 1615 |
if level == locking.LEVEL_NODEGROUP: |
1598 |
assert self.op.remote_node is None |
|
1616 |
assert self.op.remote_node_uuid is None
|
|
1599 | 1617 |
assert self.op.iallocator is not None |
1600 | 1618 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
1601 | 1619 |
|
... | ... | |
1607 | 1625 |
|
1608 | 1626 |
elif level == locking.LEVEL_NODE: |
1609 | 1627 |
if self.op.iallocator is not None: |
1610 |
assert self.op.remote_node is None |
|
1628 |
assert self.op.remote_node_uuid is None
|
|
1611 | 1629 |
assert not self.needed_locks[locking.LEVEL_NODE] |
1612 | 1630 |
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC) |
1613 | 1631 |
|
1614 | 1632 |
# Lock member nodes of all locked groups |
1615 | 1633 |
self.needed_locks[locking.LEVEL_NODE] = \ |
1616 |
[node_name
|
|
1634 |
[node_uuid
|
|
1617 | 1635 |
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
1618 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
|
|
1636 |
for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
|
|
1619 | 1637 |
else: |
1620 | 1638 |
assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) |
1621 | 1639 |
|
... | ... | |
1636 | 1654 |
env = { |
1637 | 1655 |
"MODE": self.op.mode, |
1638 | 1656 |
"NEW_SECONDARY": self.op.remote_node, |
1639 |
"OLD_SECONDARY": instance.secondary_nodes[0],
|
|
1657 |
"OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
|
|
1640 | 1658 |
} |
1641 | 1659 |
env.update(BuildInstanceHookEnvByObject(self, instance)) |
1642 | 1660 |
return env |
... | ... | |
1650 | 1668 |
self.cfg.GetMasterNode(), |
1651 | 1669 |
instance.primary_node, |
1652 | 1670 |
] |
1653 |
if self.op.remote_node is not None: |
|
1654 |
nl.append(self.op.remote_node) |
|
1671 |
if self.op.remote_node_uuid is not None:
|
|
1672 |
nl.append(self.op.remote_node_uuid)
|
|
1655 | 1673 |
return nl, nl |
1656 | 1674 |
|
1657 | 1675 |
def CheckPrereq(self): |
... | ... | |
1749 | 1767 |
_SafeShutdownInstanceDisks(self, instance) |
1750 | 1768 |
|
1751 | 1769 |
|
1752 |
def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary, |
|
1770 |
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
|
|
1753 | 1771 |
ldisk=False): |
1754 | 1772 |
"""Check that mirrors are not degraded. |
1755 | 1773 |
|
... | ... | |
1760 | 1778 |
the device(s)) to the ldisk (representing the local storage status). |
1761 | 1779 |
|
1762 | 1780 |
""" |
1763 |
lu.cfg.SetDiskID(dev, node) |
|
1781 |
lu.cfg.SetDiskID(dev, node_uuid)
|
|
1764 | 1782 |
|
1765 | 1783 |
result = True |
1766 | 1784 |
|
1767 | 1785 |
if on_primary or dev.AssembleOnSecondary(): |
1768 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
|
1786 |
rstats = lu.rpc.call_blockdev_find(node_uuid, dev)
|
|
1769 | 1787 |
msg = rstats.fail_msg |
1770 | 1788 |
if msg: |
1771 |
lu.LogWarning("Can't find disk on node %s: %s", node, msg) |
|
1789 |
lu.LogWarning("Can't find disk on node %s: %s", |
|
1790 |
lu.cfg.GetNodeName(node_uuid), msg) |
|
1772 | 1791 |
result = False |
1773 | 1792 |
elif not rstats.payload: |
1774 |
lu.LogWarning("Can't find disk on node %s", node)
|
|
1793 |
lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
|
|
1775 | 1794 |
result = False |
1776 | 1795 |
else: |
1777 | 1796 |
if ldisk: |
... | ... | |
1781 | 1800 |
|
1782 | 1801 |
if dev.children: |
1783 | 1802 |
for child in dev.children: |
1784 |
result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
|
|
1785 |
on_primary) |
|
1803 |
result = result and _CheckDiskConsistencyInner(lu, instance, child, |
|
1804 |
node_uuid, on_primary)
|
|
1786 | 1805 |
|
1787 | 1806 |
return result |
1788 | 1807 |
|
1789 | 1808 |
|
1790 |
def CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False): |
|
1809 |
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
|
|
1791 | 1810 |
"""Wrapper around L{_CheckDiskConsistencyInner}. |
1792 | 1811 |
|
1793 | 1812 |
""" |
1794 | 1813 |
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg) |
1795 |
return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary, |
|
1814 |
return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
|
|
1796 | 1815 |
ldisk=ldisk) |
1797 | 1816 |
|
1798 | 1817 |
|
1799 |
def _BlockdevFind(lu, node, dev, instance): |
|
1818 |
def _BlockdevFind(lu, node_uuid, dev, instance):
|
|
1800 | 1819 |
"""Wrapper around call_blockdev_find to annotate diskparams. |
1801 | 1820 |
|
1802 | 1821 |
@param lu: A reference to the lu object |
1803 |
@param node: The node to call out |
|
1822 |
@param node_uuid: The node to call out
|
|
1804 | 1823 |
@param dev: The device to find |
1805 | 1824 |
@param instance: The instance object the device belongs to |
1806 | 1825 |
@returns The result of the rpc call |
1807 | 1826 |
|
1808 | 1827 |
""" |
1809 | 1828 |
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg) |
1810 |
return lu.rpc.call_blockdev_find(node, disk) |
|
1829 |
return lu.rpc.call_blockdev_find(node_uuid, disk)
|
|
1811 | 1830 |
|
1812 | 1831 |
|
1813 | 1832 |
def _GenerateUniqueNames(lu, exts): |
... | ... | |
1829 | 1848 |
Note: Locking is not within the scope of this class. |
1830 | 1849 |
|
1831 | 1850 |
""" |
1832 |
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node, |
|
1851 |
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node_uuid,
|
|
1833 | 1852 |
disks, early_release, ignore_ipolicy): |
1834 | 1853 |
"""Initializes this class. |
1835 | 1854 |
|
... | ... | |
1840 | 1859 |
self.instance_name = instance_name |
1841 | 1860 |
self.mode = mode |
1842 | 1861 |
self.iallocator_name = iallocator_name |
1843 |
self.remote_node = remote_node
|
|
1862 |
self.remote_node_uuid = remote_node_uuid
|
|
1844 | 1863 |
self.disks = disks |
1845 | 1864 |
self.early_release = early_release |
1846 | 1865 |
self.ignore_ipolicy = ignore_ipolicy |
1847 | 1866 |
|
1848 | 1867 |
# Runtime data |
1849 | 1868 |
self.instance = None |
1850 |
self.new_node = None |
|
1851 |
self.target_node = None |
|
1852 |
self.other_node = None |
|
1869 |
self.new_node_uuid = None
|
|
1870 |
self.target_node_uuid = None
|
|
1871 |
self.other_node_uuid = None
|
|
1853 | 1872 |
self.remote_node_info = None |
1854 | 1873 |
self.node_secondary_ip = None |
1855 | 1874 |
|
1856 | 1875 |
@staticmethod |
1857 |
def _RunAllocator(lu, iallocator_name, instance_name, relocate_from): |
|
1876 |
def _RunAllocator(lu, iallocator_name, instance_name, |
|
1877 |
relocate_from_node_uuids): |
|
1858 | 1878 |
"""Compute a new secondary node using an IAllocator. |
1859 | 1879 |
|
1860 | 1880 |
""" |
1861 |
req = iallocator.IAReqRelocate(name=instance_name, |
|
1862 |
relocate_from=list(relocate_from)) |
|
1881 |
req = iallocator.IAReqRelocate( |
|
1882 |
name=instance_name, |
|
1883 |
relocate_from_node_uuids=list(relocate_from_node_uuids)) |
|
1863 | 1884 |
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req) |
1864 | 1885 |
|
1865 | 1886 |
ial.Run(iallocator_name) |
... | ... | |
1870 | 1891 |
errors.ECODE_NORES) |
1871 | 1892 |
|
1872 | 1893 |
remote_node_name = ial.result[0] |
1894 |
remote_node = lu.cfg.GetNodeInfoByName(remote_node_name) |
|
1895 |
|
|
1896 |
if remote_node is None: |
|
1897 |
raise errors.OpPrereqError("Node %s not found in configuration" % |
|
1898 |
remote_node_name, errors.ECODE_NOENT) |
|
1873 | 1899 |
|
1874 | 1900 |
lu.LogInfo("Selected new secondary for instance '%s': %s", |
1875 | 1901 |
instance_name, remote_node_name) |
1876 | 1902 |
|
1877 |
return remote_node_name
|
|
1903 |
return remote_node.uuid
|
|
1878 | 1904 |
|
1879 |
def _FindFaultyDisks(self, node_name):
|
|
1905 |
def _FindFaultyDisks(self, node_uuid):
|
|
1880 | 1906 |
"""Wrapper for L{FindFaultyInstanceDisks}. |
1881 | 1907 |
|
1882 | 1908 |
""" |
1883 | 1909 |
return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance, |
1884 |
node_name, True)
|
|
1910 |
node_uuid, True)
|
|
1885 | 1911 |
|
1886 | 1912 |
def _CheckDisksActivated(self, instance): |
1887 | 1913 |
"""Checks if the instance disks are activated. |
... | ... | |
1890 | 1916 |
@return: True if they are activated, False otherwise |
1891 | 1917 |
|
1892 | 1918 |
""" |
1893 |
nodes = instance.all_nodes |
|
1919 |
node_uuids = instance.all_nodes
|
|
1894 | 1920 |
|
1895 | 1921 |
for idx, dev in enumerate(instance.disks): |
1896 |
for node in nodes: |
|
1897 |
self.lu.LogInfo("Checking disk/%d on %s", idx, node) |
|
1898 |
self.cfg.SetDiskID(dev, node) |
|
1922 |
for node_uuid in node_uuids: |
|
1923 |
self.lu.LogInfo("Checking disk/%d on %s", idx, |
|
1924 |
self.cfg.GetNodeName(node_uuid)) |
|
1925 |
self.cfg.SetDiskID(dev, node_uuid) |
|
1899 | 1926 |
|
1900 |
result = _BlockdevFind(self, node, dev, instance) |
|
1927 |
result = _BlockdevFind(self, node_uuid, dev, instance)
|
|
1901 | 1928 |
|
1902 | 1929 |
if result.offline: |
1903 | 1930 |
continue |
... | ... | |
1927 | 1954 |
errors.ECODE_FAULT) |
1928 | 1955 |
|
1929 | 1956 |
instance = self.instance |
1930 |
secondary_node = instance.secondary_nodes[0] |
|
1957 |
secondary_node_uuid = instance.secondary_nodes[0]
|
|
1931 | 1958 |
|
1932 | 1959 |
if self.iallocator_name is None: |
1933 |
remote_node = self.remote_node
|
|
1960 |
remote_node_uuid = self.remote_node_uuid
|
|
1934 | 1961 |
else: |
1935 |
remote_node = self._RunAllocator(self.lu, self.iallocator_name, |
|
1936 |
instance.name, instance.secondary_nodes) |
|
1962 |
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name, |
|
1963 |
instance.name, |
|
1964 |
instance.secondary_nodes) |
|
1937 | 1965 |
|
1938 |
if remote_node is None: |
|
1966 |
if remote_node_uuid is None:
|
|
1939 | 1967 |
self.remote_node_info = None |
1940 | 1968 |
else: |
1941 |
assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \ |
|
1942 |
"Remote node '%s' is not locked" % remote_node |
|
1969 |
assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
|
|
1970 |
"Remote node '%s' is not locked" % remote_node_uuid
|
|
1943 | 1971 |
|
1944 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
|
1972 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
|
|
1945 | 1973 |
assert self.remote_node_info is not None, \ |
1946 |
"Cannot retrieve locked node %s" % remote_node |
|
1974 |
"Cannot retrieve locked node %s" % remote_node_uuid
|
|
1947 | 1975 |
|
1948 |
if remote_node == self.instance.primary_node: |
|
1976 |
if remote_node_uuid == self.instance.primary_node:
|
|
1949 | 1977 |
raise errors.OpPrereqError("The specified node is the primary node of" |
1950 | 1978 |
" the instance", errors.ECODE_INVAL) |
1951 | 1979 |
|
1952 |
if remote_node == secondary_node:
|
|
1980 |
if remote_node_uuid == secondary_node_uuid:
|
|
1953 | 1981 |
raise errors.OpPrereqError("The specified node is already the" |
1954 | 1982 |
" secondary node of the instance", |
1955 | 1983 |
errors.ECODE_INVAL) |
... | ... | |
1965 | 1993 |
" first" % self.instance_name, |
1966 | 1994 |
errors.ECODE_STATE) |
1967 | 1995 |
faulty_primary = self._FindFaultyDisks(instance.primary_node) |
1968 |
faulty_secondary = self._FindFaultyDisks(secondary_node) |
|
1996 |
faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
|
|
1969 | 1997 |
|
1970 | 1998 |
if faulty_primary and faulty_secondary: |
1971 | 1999 |
raise errors.OpPrereqError("Instance %s has faulty disks on more than" |
... | ... | |
1975 | 2003 |
|
1976 | 2004 |
if faulty_primary: |
1977 | 2005 |
self.disks = faulty_primary |
1978 |
self.target_node = instance.primary_node |
|
1979 |
self.other_node = secondary_node
|
|
1980 |
check_nodes = [self.target_node, self.other_node]
|
|
2006 |
self.target_node_uuid = instance.primary_node
|
|
2007 |
self.other_node_uuid = secondary_node_uuid
|
|
2008 |
check_nodes = [self.target_node_uuid, self.other_node_uuid]
|
|
1981 | 2009 |
elif faulty_secondary: |
1982 | 2010 |
self.disks = faulty_secondary |
1983 |
self.target_node = secondary_node
|
|
1984 |
self.other_node = instance.primary_node |
|
1985 |
check_nodes = [self.target_node, self.other_node]
|
|
2011 |
self.target_node_uuid = secondary_node_uuid
|
|
2012 |
self.other_node_uuid = instance.primary_node
|
|
2013 |
check_nodes = [self.target_node_uuid, self.other_node_uuid]
|
|
1986 | 2014 |
else: |
1987 | 2015 |
self.disks = [] |
1988 | 2016 |
check_nodes = [] |
... | ... | |
1990 | 2018 |
else: |
1991 | 2019 |
# Non-automatic modes |
1992 | 2020 |
if self.mode == constants.REPLACE_DISK_PRI: |
1993 |
self.target_node = instance.primary_node |
|
1994 |
self.other_node = secondary_node
|
|
1995 |
check_nodes = [self.target_node, self.other_node]
|
|
2021 |
self.target_node_uuid = instance.primary_node
|
|
2022 |
self.other_node_uuid = secondary_node_uuid
|
|
2023 |
check_nodes = [self.target_node_uuid, self.other_node_uuid]
|
|
1996 | 2024 |
|
1997 | 2025 |
elif self.mode == constants.REPLACE_DISK_SEC: |
1998 |
self.target_node = secondary_node
|
|
1999 |
self.other_node = instance.primary_node |
|
2000 |
check_nodes = [self.target_node, self.other_node]
|
|
2026 |
self.target_node_uuid = secondary_node_uuid
|
|
2027 |
self.other_node_uuid = instance.primary_node
|
|
2028 |
check_nodes = [self.target_node_uuid, self.other_node_uuid]
|
|
2001 | 2029 |
|
2002 | 2030 |
elif self.mode == constants.REPLACE_DISK_CHG: |
2003 |
self.new_node = remote_node
|
|
2004 |
self.other_node = instance.primary_node |
|
2005 |
self.target_node = secondary_node
|
|
2006 |
check_nodes = [self.new_node, self.other_node]
|
|
2031 |
self.new_node_uuid = remote_node_uuid
|
|
2032 |
self.other_node_uuid = instance.primary_node
|
|
2033 |
self.target_node_uuid = secondary_node_uuid
|
|
2034 |
check_nodes = [self.new_node_uuid, self.other_node_uuid]
|
|
2007 | 2035 |
|
2008 |
CheckNodeNotDrained(self.lu, remote_node) |
|
2009 |
CheckNodeVmCapable(self.lu, remote_node) |
|
2036 |
CheckNodeNotDrained(self.lu, remote_node_uuid)
|
|
2037 |
CheckNodeVmCapable(self.lu, remote_node_uuid)
|
|
2010 | 2038 |
|
2011 |
old_node_info = self.cfg.GetNodeInfo(secondary_node) |
|
2039 |
old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
|
|
2012 | 2040 |
assert old_node_info is not None |
2013 | 2041 |
if old_node_info.offline and not self.early_release: |
2014 | 2042 |
# doesn't make sense to delay the release |
2015 | 2043 |
self.early_release = True |
2016 | 2044 |
self.lu.LogInfo("Old secondary %s is offline, automatically enabling" |
2017 |
" early-release mode", secondary_node) |
|
2045 |
" early-release mode", secondary_node_uuid)
|
|
2018 | 2046 |
|
2019 | 2047 |
else: |
2020 | 2048 |
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" % |
... | ... | |
2035 | 2063 |
CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info, |
2036 | 2064 |
self.cfg, ignore=self.ignore_ipolicy) |
2037 | 2065 |
|
2038 |
for node in check_nodes: |
|
2039 |
CheckNodeOnline(self.lu, node) |
|
2066 |
for node_uuid in check_nodes:
|
|
2067 |
CheckNodeOnline(self.lu, node_uuid)
|
|
2040 | 2068 |
|
2041 |
touched_nodes = frozenset(node_name for node_name in [self.new_node,
|
|
2042 |
self.other_node, |
|
2043 |
self.target_node] |
|
2044 |
if node_name is not None)
|
|
2069 |
touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
|
|
2070 |
self.other_node_uuid,
|
|
2071 |
self.target_node_uuid]
|
|
2072 |
if node_uuid is not None)
|
|
2045 | 2073 |
|
2046 | 2074 |
# Release unneeded node and node resource locks |
2047 | 2075 |
ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes) |
... | ... | |
2056 | 2084 |
instance.FindDisk(disk_idx) |
2057 | 2085 |
|
2058 | 2086 |
# Get secondary node IP addresses |
2059 |
self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
|
|
2087 |
self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
|
|
2060 | 2088 |
in self.cfg.GetMultiNodeInfo(touched_nodes)) |
2061 | 2089 |
|
2062 | 2090 |
def Exec(self, feedback_fn): |
... | ... | |
2089 | 2117 |
|
2090 | 2118 |
feedback_fn("Replacing disk(s) %s for instance '%s'" % |
2091 | 2119 |
(utils.CommaJoin(self.disks), self.instance.name)) |
2092 |
feedback_fn("Current primary node: %s" % self.instance.primary_node) |
|
2120 |
feedback_fn("Current primary node: %s" % |
|
2121 |
self.cfg.GetNodeName(self.instance.primary_node)) |
|
2093 | 2122 |
feedback_fn("Current seconary node: %s" % |
2094 |
utils.CommaJoin(self.instance.secondary_nodes)) |
|
2123 |
utils.CommaJoin(self.cfg.GetNodeNames( |
|
2124 |
self.instance.secondary_nodes))) |
|
2095 | 2125 |
|
2096 | 2126 |
activate_disks = not self.instance.disks_active |
2097 | 2127 |
|
... | ... | |
2101 | 2131 |
|
2102 | 2132 |
try: |
2103 | 2133 |
# Should we replace the secondary node? |
2104 |
if self.new_node is not None: |
|
2134 |
if self.new_node_uuid is not None:
|
|
2105 | 2135 |
fn = self._ExecDrbd8Secondary |
2106 | 2136 |
else: |
2107 | 2137 |
fn = self._ExecDrbd8DiskOnly |
... | ... | |
2126 | 2156 |
|
2127 | 2157 |
return result |
2128 | 2158 |
|
2129 |
def _CheckVolumeGroup(self, nodes): |
|
2159 |
def _CheckVolumeGroup(self, node_uuids):
|
|
2130 | 2160 |
self.lu.LogInfo("Checking volume groups") |
2131 | 2161 |
|
2132 | 2162 |
vgname = self.cfg.GetVGName() |
2133 | 2163 |
|
2134 | 2164 |
# Make sure volume group exists on all involved nodes |
2135 |
results = self.rpc.call_vg_list(nodes) |
|
2165 |
results = self.rpc.call_vg_list(node_uuids)
|
|
2136 | 2166 |
if not results: |
2137 | 2167 |
raise errors.OpExecError("Can't list volume groups on the nodes") |
2138 | 2168 |
|
2139 |
for node in nodes:
|
|
2140 |
res = results[node] |
|
2141 |
res.Raise("Error checking node %s" % node)
|
|
2169 |
for node_uuid in node_uuids:
|
|
2170 |
res = results[node_uuid]
|
|
2171 |
res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
|
|
2142 | 2172 |
if vgname not in res.payload: |
2143 | 2173 |
raise errors.OpExecError("Volume group '%s' not found on node %s" % |
2144 |
(vgname, node))
|
|
2174 |
(vgname, self.cfg.GetNodeName(node_uuid)))
|
|
2145 | 2175 |
|
2146 |
def _CheckDisksExistence(self, nodes): |
|
2176 |
def _CheckDisksExistence(self, node_uuids):
|
|
2147 | 2177 |
# Check disk existence |
2148 | 2178 |
for idx, dev in enumerate(self.instance.disks): |
2149 | 2179 |
if idx not in self.disks: |
2150 | 2180 |
continue |
2151 | 2181 |
|
2152 |
for node in nodes: |
|
2153 |
self.lu.LogInfo("Checking disk/%d on %s", idx, node) |
|
2154 |
self.cfg.SetDiskID(dev, node) |
|
2182 |
for node_uuid in node_uuids: |
|
2183 |
self.lu.LogInfo("Checking disk/%d on %s", idx, |
|
2184 |
self.cfg.GetNodeName(node_uuid)) |
|
2185 |
self.cfg.SetDiskID(dev, node_uuid) |
|
2155 | 2186 |
|
2156 |
result = _BlockdevFind(self, node, dev, self.instance) |
|
2187 |
result = _BlockdevFind(self, node_uuid, dev, self.instance)
|
|
2157 | 2188 |
|
2158 | 2189 |
msg = result.fail_msg |
2159 | 2190 |
if msg or not result.payload: |
2160 | 2191 |
if not msg: |
2161 | 2192 |
msg = "disk not found" |
2162 | 2193 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
2163 |
(idx, node, msg))
|
|
2194 |
(idx, self.cfg.GetNodeName(node_uuid), msg))
|
|
2164 | 2195 |
|
2165 |
def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
|
|
2196 |
def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
|
|
2166 | 2197 |
for idx, dev in enumerate(self.instance.disks): |
2167 | 2198 |
if idx not in self.disks: |
2168 | 2199 |
continue |
2169 | 2200 |
|
2170 | 2201 |
self.lu.LogInfo("Checking disk/%d consistency on node %s" % |
2171 |
(idx, node_name))
|
|
2202 |
(idx, self.cfg.GetNodeName(node_uuid)))
|
|
2172 | 2203 |
|
2173 |
if not CheckDiskConsistency(self.lu, self.instance, dev, node_name,
|
|
2204 |
if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
|
|
2174 | 2205 |
on_primary, ldisk=ldisk): |
2175 | 2206 |
raise errors.OpExecError("Node %s has degraded storage, unsafe to" |
2176 | 2207 |
" replace disks for instance %s" % |
2177 |
(node_name, self.instance.name)) |
|
2208 |
(self.cfg.GetNodeName(node_uuid), |
|
2209 |
self.instance.name)) |
|
2178 | 2210 |
|
2179 |
def _CreateNewStorage(self, node_name):
|
|
2211 |
def _CreateNewStorage(self, node_uuid):
|
|
2180 | 2212 |
"""Create new storage on the primary or secondary node. |
2181 | 2213 |
|
2182 | 2214 |
This is only used for same-node replaces, not for changing the |
... | ... | |
2190 | 2222 |
if idx not in self.disks: |
2191 | 2223 |
continue |
2192 | 2224 |
|
2193 |
self.lu.LogInfo("Adding storage on %s for disk/%d", node_name, idx) |
|
2225 |
self.lu.LogInfo("Adding storage on %s for disk/%d", |
|
2226 |
self.cfg.GetNodeName(node_uuid), idx) |
|
2194 | 2227 |
|
2195 |
self.cfg.SetDiskID(dev, node_name)
|
|
2228 |
self.cfg.SetDiskID(dev, node_uuid)
|
|
2196 | 2229 |
|
2197 | 2230 |
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]] |
2198 | 2231 |
names = _GenerateUniqueNames(self.lu, lv_names) |
... | ... | |
2211 | 2244 |
new_lvs = [lv_data, lv_meta] |
2212 | 2245 |
old_lvs = [child.Copy() for child in dev.children] |
2213 | 2246 |
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) |
2214 |
excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
|
|
2247 |
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
|
|
2215 | 2248 |
|
2216 | 2249 |
# we pass force_create=True to force the LVM creation |
2217 | 2250 |
for new_lv in new_lvs: |
2218 |
_CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
|
|
2251 |
_CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
|
|
2219 | 2252 |
GetInstanceInfoText(self.instance), False, |
2220 | 2253 |
excl_stor) |
2221 | 2254 |
|
2222 | 2255 |
return iv_names |
2223 | 2256 |
|
2224 |
def _CheckDevices(self, node_name, iv_names):
|
|
2257 |
def _CheckDevices(self, node_uuid, iv_names):
|
|
2225 | 2258 |
for name, (dev, _, _) in iv_names.iteritems(): |
2226 |
self.cfg.SetDiskID(dev, node_name)
|
|
2259 |
self.cfg.SetDiskID(dev, node_uuid)
|
|
2227 | 2260 |
|
2228 |
result = _BlockdevFind(self, node_name, dev, self.instance)
|
|
2261 |
result = _BlockdevFind(self, node_uuid, dev, self.instance)
|
|
2229 | 2262 |
|
2230 | 2263 |
msg = result.fail_msg |
2231 | 2264 |
if msg or not result.payload: |
... | ... | |
2237 | 2270 |
if result.payload.is_degraded: |
2238 | 2271 |
raise errors.OpExecError("DRBD device %s is degraded!" % name) |
2239 | 2272 |
|
2240 |
def _RemoveOldStorage(self, node_name, iv_names):
|
|
2273 |
def _RemoveOldStorage(self, node_uuid, iv_names):
|
|
2241 | 2274 |
for name, (_, old_lvs, _) in iv_names.iteritems(): |
2242 | 2275 |
self.lu.LogInfo("Remove logical volumes for %s", name) |
2243 | 2276 |
|
2244 | 2277 |
for lv in old_lvs: |
2245 |
self.cfg.SetDiskID(lv, node_name)
|
|
2278 |
self.cfg.SetDiskID(lv, node_uuid)
|
|
2246 | 2279 |
|
2247 |
msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
|
|
2280 |
msg = self.rpc.call_blockdev_remove(node_uuid, lv).fail_msg
|
|
2248 | 2281 |
if msg: |
2249 | 2282 |
self.lu.LogWarning("Can't remove old LV: %s", msg, |
2250 | 2283 |
hint="remove unused LVs manually") |
... | ... | |
2275 | 2308 |
|
2276 | 2309 |
# Step: check device activation |
2277 | 2310 |
self.lu.LogStep(1, steps_total, "Check device existence") |
2278 |
self._CheckDisksExistence([self.other_node, self.target_node])
|
|
2279 |
self._CheckVolumeGroup([self.target_node, self.other_node])
|
|
2311 |
self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
|
|
2312 |
self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
|
|
2280 | 2313 |
|
2281 | 2314 |
# Step: check other node consistency |
2282 | 2315 |
self.lu.LogStep(2, steps_total, "Check peer consistency") |
2283 |
self._CheckDisksConsistency(self.other_node,
|
|
2284 |
self.other_node == self.instance.primary_node,
|
|
2285 |
False)
|
|
2316 |
self._CheckDisksConsistency( |
|
2317 |
self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
|
|
2318 |
False) |
|
2286 | 2319 |
|
2287 | 2320 |
# Step: create new storage |
2288 | 2321 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
2289 |
iv_names = self._CreateNewStorage(self.target_node) |
|
2322 |
iv_names = self._CreateNewStorage(self.target_node_uuid)
|
|
2290 | 2323 |
|
2291 | 2324 |
# Step: for each lv, detach+rename*2+attach |
2292 | 2325 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
2293 | 2326 |
for dev, old_lvs, new_lvs in iv_names.itervalues(): |
2294 | 2327 |
self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name) |
2295 | 2328 |
|
2296 |
result = self.rpc.call_blockdev_removechildren(self.target_node, dev, |
|
2329 |
result = self.rpc.call_blockdev_removechildren(self.target_node_uuid, dev,
|
|
2297 | 2330 |
old_lvs) |
2298 | 2331 |
result.Raise("Can't detach drbd from local storage on node" |
2299 |
" %s for device %s" % (self.target_node, dev.iv_name)) |
|
2332 |
" %s for device %s" % |
|
2333 |
(self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name)) |
|
2300 | 2334 |
#dev.children = [] |
2301 | 2335 |
#cfg.Update(instance) |
2302 | 2336 |
|
... | ... | |
2314 | 2348 |
# Build the rename list based on what LVs exist on the node |
2315 | 2349 |
rename_old_to_new = [] |
2316 | 2350 |
for to_ren in old_lvs: |
2317 |
result = self.rpc.call_blockdev_find(self.target_node, to_ren) |
|
2351 |
result = self.rpc.call_blockdev_find(self.target_node_uuid, to_ren)
|
|
2318 | 2352 |
if not result.fail_msg and result.payload: |
2319 | 2353 |
# device exists |
2320 | 2354 |
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix))) |
2321 | 2355 |
|
2322 | 2356 |
self.lu.LogInfo("Renaming the old LVs on the target node") |
2323 |
result = self.rpc.call_blockdev_rename(self.target_node, |
|
2357 |
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
|
|
2324 | 2358 |
rename_old_to_new) |
2325 |
result.Raise("Can't rename old LVs on node %s" % self.target_node) |
|
2359 |
result.Raise("Can't rename old LVs on node %s" % |
|
2360 |
self.cfg.GetNodeName(self.target_node_uuid)) |
|
2326 | 2361 |
|
2327 | 2362 |
# Now we rename the new LVs to the old LVs |
2328 | 2363 |
self.lu.LogInfo("Renaming the new LVs on the target node") |
2329 | 2364 |
rename_new_to_old = [(new, old.physical_id) |
2330 | 2365 |
for old, new in zip(old_lvs, new_lvs)] |
2331 |
result = self.rpc.call_blockdev_rename(self.target_node, |
|
2366 |
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
|
|
2332 | 2367 |
rename_new_to_old) |
2333 |
result.Raise("Can't rename new LVs on node %s" % self.target_node) |
|
2368 |
result.Raise("Can't rename new LVs on node %s" % |
|
2369 |
self.cfg.GetNodeName(self.target_node_uuid)) |
|
2334 | 2370 |
|
2335 | 2371 |
# Intermediate steps of in memory modifications |
2336 | 2372 |
for old, new in zip(old_lvs, new_lvs): |
2337 | 2373 |
new.logical_id = old.logical_id |
2338 |
self.cfg.SetDiskID(new, self.target_node) |
|
2374 |
self.cfg.SetDiskID(new, self.target_node_uuid)
|
|
2339 | 2375 |
|
2340 | 2376 |
# We need to modify old_lvs so that removal later removes the |
2341 | 2377 |
# right LVs, not the newly added ones; note that old_lvs is a |
2342 | 2378 |
# copy here |
2343 | 2379 |
for disk in old_lvs: |
2344 | 2380 |
disk.logical_id = ren_fn(disk, temp_suffix) |
2345 |
self.cfg.SetDiskID(disk, self.target_node) |
|
2381 |
self.cfg.SetDiskID(disk, self.target_node_uuid)
|
|
2346 | 2382 |
|
2347 | 2383 |
# Now that the new lvs have the old name, we can add them to the device |
2348 |
self.lu.LogInfo("Adding new mirror component on %s", self.target_node) |
|
2349 |
result = self.rpc.call_blockdev_addchildren(self.target_node, |
|
2384 |
self.lu.LogInfo("Adding new mirror component on %s", |
|
2385 |
self.cfg.GetNodeName(self.target_node_uuid)) |
|
2386 |
result = self.rpc.call_blockdev_addchildren(self.target_node_uuid, |
|
2350 | 2387 |
(dev, self.instance), new_lvs) |
2351 | 2388 |
msg = result.fail_msg |
2352 | 2389 |
if msg: |
2353 | 2390 |
for new_lv in new_lvs: |
2354 |
msg2 = self.rpc.call_blockdev_remove(self.target_node, |
|
2391 |
msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
|
|
2355 | 2392 |
new_lv).fail_msg |
2356 | 2393 |
if msg2: |
2357 | 2394 |
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2, |
... | ... | |
2363 | 2400 |
|
2364 | 2401 |
if self.early_release: |
2365 | 2402 |
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage") |
2366 |
self._RemoveOldStorage(self.target_node, iv_names) |
|
2403 |
self._RemoveOldStorage(self.target_node_uuid, iv_names)
|
|
2367 | 2404 |
# TODO: Check if releasing locks early still makes sense |
2368 | 2405 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES) |
2369 | 2406 |
else: |
... | ... | |
2389 | 2426 |
# Step: remove old storage |
2390 | 2427 |
if not self.early_release: |
2391 | 2428 |
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage") |
2392 |
self._RemoveOldStorage(self.target_node, iv_names) |
|
2429 |
self._RemoveOldStorage(self.target_node_uuid, iv_names)
|
|
2393 | 2430 |
|
2394 | 2431 |
def _ExecDrbd8Secondary(self, feedback_fn): |
2395 | 2432 |
"""Replace the secondary node for DRBD 8. |
... | ... | |
2426 | 2463 |
# Step: create new storage |
2427 | 2464 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
2428 | 2465 |
disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
2429 |
excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node) |
|
2466 |
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, |
|
2467 |
self.new_node_uuid) |
|
2430 | 2468 |
for idx, dev in enumerate(disks): |
2431 | 2469 |
self.lu.LogInfo("Adding new local storage on %s for disk/%d" % |
2432 |
(self.new_node, idx))
|
|
2470 |
(self.cfg.GetNodeName(self.new_node_uuid), idx))
|
|
2433 | 2471 |
# we pass force_create=True to force LVM creation |
2434 | 2472 |
for new_lv in dev.children: |
2435 |
_CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv, |
|
2473 |
_CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance, new_lv,
|
|
2436 | 2474 |
True, GetInstanceInfoText(self.instance), False, |
2437 | 2475 |
excl_stor) |
2438 | 2476 |
|
... | ... | |
2440 | 2478 |
# after this, we must manually remove the drbd minors on both the |
2441 | 2479 |
# error and the success paths |
2442 | 2480 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
2443 |
minors = self.cfg.AllocateDRBDMinor([self.new_node |
|
2444 |
for dev in self.instance.disks],
|
|
2481 |
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
|
|
2482 |
for _ in self.instance.disks],
|
|
2445 | 2483 |
self.instance.name) |
2446 | 2484 |
logging.debug("Allocated minors %r", minors) |
2447 | 2485 |
|
2448 | 2486 |
iv_names = {} |
2449 | 2487 |
for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)): |
2450 | 2488 |
self.lu.LogInfo("activating a new drbd on %s for disk/%d" % |
2451 |
(self.new_node, idx))
|
|
2489 |
(self.cfg.GetNodeName(self.new_node_uuid), idx))
|
|
2452 | 2490 |
# create new devices on new_node; note that we create two IDs: |
2453 | 2491 |
# one without port, so the drbd will be activated without |
2454 | 2492 |
# networking information on the new node at this stage, and one |
... | ... | |
2460 | 2498 |
assert self.instance.primary_node == o_node2, "Three-node instance?" |
2461 | 2499 |
p_minor = o_minor2 |
2462 | 2500 |
|
2463 |
new_alone_id = (self.instance.primary_node, self.new_node, None, |
|
2501 |
new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
|
|
2464 | 2502 |
p_minor, new_minor, o_secret) |
2465 |
new_net_id = (self.instance.primary_node, self.new_node, o_port, |
|
2503 |
new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
|
|
2466 | 2504 |
p_minor, new_minor, o_secret) |
Also available in: Unified diff