Revision 5eacbcae lib/cmdlib/instance_storage.py
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
38 | 38 |
from ganeti import rpc |
39 | 39 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet |
40 | 40 |
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \ |
41 |
_AnnotateDiskParams, _CheckIAllocatorOrNode, _ExpandNodeName, \
|
|
42 |
_CheckNodeOnline, _CheckInstanceNodeGroups, _CheckInstanceState, \
|
|
43 |
_IsExclusiveStorageEnabledNode, _FindFaultyInstanceDisks
|
|
44 |
from ganeti.cmdlib.instance_utils import _GetInstanceInfoText, \
|
|
45 |
_CopyLockList, _ReleaseLocks, _CheckNodeVmCapable, \
|
|
46 |
_BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _CheckTargetNodeIPolicy
|
|
41 |
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeName, \
|
|
42 |
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
|
|
43 |
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks
|
|
44 |
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \ |
|
45 |
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
|
|
46 |
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
|
|
47 | 47 |
|
48 | 48 |
import ganeti.masterd.instance |
49 | 49 |
|
... | ... | |
65 | 65 |
} |
66 | 66 |
|
67 | 67 |
|
68 |
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
|
|
69 |
excl_stor):
|
|
68 |
def CreateSingleBlockDev(lu, node, instance, device, info, force_open, |
|
69 |
excl_stor): |
|
70 | 70 |
"""Create a single block device on a given node. |
71 | 71 |
|
72 | 72 |
This will not recurse over children of the device, so they must be |
... | ... | |
146 | 146 |
if not force_create: |
147 | 147 |
return created_devices |
148 | 148 |
|
149 |
_CreateSingleBlockDev(lu, node, instance, device, info, force_open,
|
|
150 |
excl_stor)
|
|
149 |
CreateSingleBlockDev(lu, node, instance, device, info, force_open, |
|
150 |
excl_stor) |
|
151 | 151 |
# The device has been completely created, so there is no point in keeping |
152 | 152 |
# its subdevices in the list. We just add the device itself instead. |
153 | 153 |
created_devices = [(node, device)] |
... | ... | |
160 | 160 |
raise errors.DeviceCreationError(str(e), created_devices) |
161 | 161 |
|
162 | 162 |
|
163 |
def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
|
|
163 |
def IsExclusiveStorageEnabledNodeName(cfg, nodename): |
|
164 | 164 |
"""Whether exclusive_storage is in effect for the given node. |
165 | 165 |
|
166 | 166 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
176 | 176 |
if ni is None: |
177 | 177 |
raise errors.OpPrereqError("Invalid node name %s" % nodename, |
178 | 178 |
errors.ECODE_NOENT) |
179 |
return _IsExclusiveStorageEnabledNode(cfg, ni)
|
|
179 |
return IsExclusiveStorageEnabledNode(cfg, ni) |
|
180 | 180 |
|
181 | 181 |
|
182 |
def _CreateBlockDev(lu, node, instance, device, force_create, info,
|
|
182 |
def CreateBlockDev(lu, node, instance, device, force_create, info, |
|
183 | 183 |
force_open): |
184 | 184 |
"""Wrapper around L{_CreateBlockDevInner}. |
185 | 185 |
|
186 | 186 |
This method annotates the root device first. |
187 | 187 |
|
188 | 188 |
""" |
189 |
(disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
|
|
190 |
excl_stor = _IsExclusiveStorageEnabledNodeName(lu.cfg, node)
|
|
189 |
(disk,) = AnnotateDiskParams(instance, [device], lu.cfg) |
|
190 |
excl_stor = IsExclusiveStorageEnabledNodeName(lu.cfg, node) |
|
191 | 191 |
return _CreateBlockDevInner(lu, node, instance, disk, force_create, info, |
192 | 192 |
force_open, excl_stor) |
193 | 193 |
|
194 | 194 |
|
195 |
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
|
|
195 |
def CreateDisks(lu, instance, to_skip=None, target_node=None): |
|
196 | 196 |
"""Create all disks for an instance. |
197 | 197 |
|
198 | 198 |
This abstracts away some work from AddInstance. |
... | ... | |
209 | 209 |
@return: the success of the creation |
210 | 210 |
|
211 | 211 |
""" |
212 |
info = _GetInstanceInfoText(instance)
|
|
212 |
info = GetInstanceInfoText(instance) |
|
213 | 213 |
if target_node is None: |
214 | 214 |
pnode = instance.primary_node |
215 | 215 |
all_nodes = instance.all_nodes |
... | ... | |
235 | 235 |
for node in all_nodes: |
236 | 236 |
f_create = node == pnode |
237 | 237 |
try: |
238 |
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
|
|
238 |
CreateBlockDev(lu, node, instance, device, f_create, info, f_create) |
|
239 | 239 |
disks_created.append((node, device)) |
240 | 240 |
except errors.OpExecError: |
241 | 241 |
logging.warning("Creating disk %s for instance '%s' failed", |
... | ... | |
253 | 253 |
raise errors.OpExecError(e.message) |
254 | 254 |
|
255 | 255 |
|
256 |
def _ComputeDiskSizePerVG(disk_template, disks):
|
|
256 |
def ComputeDiskSizePerVG(disk_template, disks): |
|
257 | 257 |
"""Compute disk size requirements in the volume group |
258 | 258 |
|
259 | 259 |
""" |
... | ... | |
285 | 285 |
return req_size_dict[disk_template] |
286 | 286 |
|
287 | 287 |
|
288 |
def _ComputeDisks(op, default_vg):
|
|
288 |
def ComputeDisks(op, default_vg): |
|
289 | 289 |
"""Computes the instance disks. |
290 | 290 |
|
291 | 291 |
@param op: The instance opcode |
... | ... | |
349 | 349 |
return disks |
350 | 350 |
|
351 | 351 |
|
352 |
def _CheckRADOSFreeSpace():
|
|
352 |
def CheckRADOSFreeSpace(): |
|
353 | 353 |
"""Compute disk size requirements inside the RADOS cluster. |
354 | 354 |
|
355 | 355 |
""" |
... | ... | |
385 | 385 |
return drbd_dev |
386 | 386 |
|
387 | 387 |
|
388 |
def _GenerateDiskTemplate(
|
|
388 |
def GenerateDiskTemplate( |
|
389 | 389 |
lu, template_name, instance_name, primary_node, secondary_nodes, |
390 | 390 |
disk_info, file_storage_dir, file_driver, base_index, |
391 | 391 |
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage, |
... | ... | |
591 | 591 |
# We don't want _CheckIAllocatorOrNode selecting the default iallocator |
592 | 592 |
# when neither iallocator nor nodes are specified |
593 | 593 |
if self.op.iallocator or self.op.nodes: |
594 |
_CheckIAllocatorOrNode(self, "iallocator", "nodes")
|
|
594 |
CheckIAllocatorOrNode(self, "iallocator", "nodes") |
|
595 | 595 |
|
596 | 596 |
for (idx, params) in self.op.disks: |
597 | 597 |
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES) |
... | ... | |
607 | 607 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND |
608 | 608 |
|
609 | 609 |
if self.op.nodes: |
610 |
self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
|
|
610 |
self.op.nodes = [ExpandNodeName(self.cfg, n) for n in self.op.nodes] |
|
611 | 611 |
self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes) |
612 | 612 |
else: |
613 | 613 |
self.needed_locks[locking.LEVEL_NODE] = [] |
... | ... | |
652 | 652 |
elif level == locking.LEVEL_NODE_RES: |
653 | 653 |
# Copy node locks |
654 | 654 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
655 |
_CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
|
655 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE]) |
|
656 | 656 |
|
657 | 657 |
def BuildHooksEnv(self): |
658 | 658 |
"""Build hooks env. |
... | ... | |
660 | 660 |
This runs on master, primary and secondary nodes of the instance. |
661 | 661 |
|
662 | 662 |
""" |
663 |
return _BuildInstanceHookEnvByObject(self, self.instance)
|
|
663 |
return BuildInstanceHookEnvByObject(self, self.instance) |
|
664 | 664 |
|
665 | 665 |
def BuildHooksNodes(self): |
666 | 666 |
"""Build hooks nodes. |
... | ... | |
693 | 693 |
else: |
694 | 694 |
primary_node = instance.primary_node |
695 | 695 |
if not self.op.iallocator: |
696 |
_CheckNodeOnline(self, primary_node)
|
|
696 |
CheckNodeOnline(self, primary_node) |
|
697 | 697 |
|
698 | 698 |
if instance.disk_template == constants.DT_DISKLESS: |
699 | 699 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
... | ... | |
704 | 704 |
if owned_groups: |
705 | 705 |
# Node group locks are acquired only for the primary node (and only |
706 | 706 |
# when the allocator is used) |
707 |
_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
|
|
708 |
primary_only=True)
|
|
707 |
CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups, |
|
708 |
primary_only=True) |
|
709 | 709 |
|
710 | 710 |
# if we replace nodes *and* the old primary is offline, we don't |
711 | 711 |
# check the instance state |
712 | 712 |
old_pnode = self.cfg.GetNodeInfo(instance.primary_node) |
713 | 713 |
if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline): |
714 |
_CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
|
|
715 |
msg="cannot recreate disks")
|
|
714 |
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, |
|
715 |
msg="cannot recreate disks") |
|
716 | 716 |
|
717 | 717 |
if self.op.disks: |
718 | 718 |
self.disks = dict(self.op.disks) |
... | ... | |
735 | 735 |
if self.op.iallocator: |
736 | 736 |
self._RunAllocator() |
737 | 737 |
# Release unneeded node and node resource locks |
738 |
_ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
|
|
739 |
_ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
|
|
740 |
_ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
|
|
738 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes) |
|
739 |
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes) |
|
740 |
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) |
|
741 | 741 |
|
742 | 742 |
assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) |
743 | 743 |
|
... | ... | |
800 | 800 |
# All touched nodes must be locked |
801 | 801 |
mylocks = self.owned_locks(locking.LEVEL_NODE) |
802 | 802 |
assert mylocks.issuperset(frozenset(instance.all_nodes)) |
803 |
_CreateDisks(self, instance, to_skip=to_skip)
|
|
803 |
CreateDisks(self, instance, to_skip=to_skip) |
|
804 | 804 |
|
805 | 805 |
|
806 | 806 |
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested): |
... | ... | |
842 | 842 |
errors.ECODE_NORES) |
843 | 843 |
|
844 | 844 |
|
845 |
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
|
|
845 |
def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes): |
|
846 | 846 |
"""Checks if nodes have enough free disk space in all the VGs. |
847 | 847 |
|
848 | 848 |
This function checks if all given nodes have the needed amount of |
... | ... | |
895 | 895 |
return (total_size - written) * avg_time |
896 | 896 |
|
897 | 897 |
|
898 |
def _WipeDisks(lu, instance, disks=None):
|
|
898 |
def WipeDisks(lu, instance, disks=None): |
|
899 | 899 |
"""Wipes instance disks. |
900 | 900 |
|
901 | 901 |
@type lu: L{LogicalUnit} |
... | ... | |
990 | 990 |
" failed", idx, instance.name) |
991 | 991 |
|
992 | 992 |
|
993 |
def _ExpandCheckDisks(instance, disks):
|
|
993 |
def ExpandCheckDisks(instance, disks): |
|
994 | 994 |
"""Return the instance disks selected by the disks list |
995 | 995 |
|
996 | 996 |
@type disks: list of L{objects.Disk} or None |
... | ... | |
1008 | 1008 |
return disks |
1009 | 1009 |
|
1010 | 1010 |
|
1011 |
def _WaitForSync(lu, instance, disks=None, oneshot=False):
|
|
1011 |
def WaitForSync(lu, instance, disks=None, oneshot=False): |
|
1012 | 1012 |
"""Sleep and poll for an instance's disk to sync. |
1013 | 1013 |
|
1014 | 1014 |
""" |
1015 | 1015 |
if not instance.disks or disks is not None and not disks: |
1016 | 1016 |
return True |
1017 | 1017 |
|
1018 |
disks = _ExpandCheckDisks(instance, disks)
|
|
1018 |
disks = ExpandCheckDisks(instance, disks) |
|
1019 | 1019 |
|
1020 | 1020 |
if not oneshot: |
1021 | 1021 |
lu.LogInfo("Waiting for instance %s to sync disks", instance.name) |
... | ... | |
1084 | 1084 |
return not cumul_degraded |
1085 | 1085 |
|
1086 | 1086 |
|
1087 |
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
|
|
1087 |
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False): |
|
1088 | 1088 |
"""Shutdown block devices of an instance. |
1089 | 1089 |
|
1090 | 1090 |
This does the shutdown on all nodes of the instance. |
... | ... | |
1094 | 1094 |
|
1095 | 1095 |
""" |
1096 | 1096 |
all_result = True |
1097 |
disks = _ExpandCheckDisks(instance, disks)
|
|
1097 |
disks = ExpandCheckDisks(instance, disks) |
|
1098 | 1098 |
|
1099 | 1099 |
for disk in disks: |
1100 | 1100 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
... | ... | |
1117 | 1117 |
_ShutdownInstanceDisks. |
1118 | 1118 |
|
1119 | 1119 |
""" |
1120 |
_CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
|
|
1121 |
_ShutdownInstanceDisks(lu, instance, disks=disks)
|
|
1120 |
CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks") |
|
1121 |
ShutdownInstanceDisks(lu, instance, disks=disks) |
|
1122 | 1122 |
|
1123 | 1123 |
|
1124 |
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
|
|
1124 |
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False, |
|
1125 | 1125 |
ignore_size=False): |
1126 | 1126 |
"""Prepare the block devices for an instance. |
1127 | 1127 |
|
... | ... | |
1148 | 1148 |
device_info = [] |
1149 | 1149 |
disks_ok = True |
1150 | 1150 |
iname = instance.name |
1151 |
disks = _ExpandCheckDisks(instance, disks)
|
|
1151 |
disks = ExpandCheckDisks(instance, disks) |
|
1152 | 1152 |
|
1153 | 1153 |
# With the two passes mechanism we try to reduce the window of |
1154 | 1154 |
# opportunity for the race condition of switching DRBD to primary |
... | ... | |
1213 | 1213 |
return disks_ok, device_info |
1214 | 1214 |
|
1215 | 1215 |
|
1216 |
def _StartInstanceDisks(lu, instance, force):
|
|
1216 |
def StartInstanceDisks(lu, instance, force): |
|
1217 | 1217 |
"""Start the disks of an instance. |
1218 | 1218 |
|
1219 | 1219 |
""" |
1220 |
disks_ok, _ = _AssembleInstanceDisks(lu, instance,
|
|
1221 |
ignore_secondaries=force)
|
|
1220 |
disks_ok, _ = AssembleInstanceDisks(lu, instance, |
|
1221 |
ignore_secondaries=force) |
|
1222 | 1222 |
if not disks_ok: |
1223 |
_ShutdownInstanceDisks(lu, instance)
|
|
1223 |
ShutdownInstanceDisks(lu, instance) |
|
1224 | 1224 |
if force is not None and not force: |
1225 | 1225 |
lu.LogWarning("", |
1226 | 1226 |
hint=("If the message above refers to a secondary node," |
... | ... | |
1249 | 1249 |
elif level == locking.LEVEL_NODE_RES: |
1250 | 1250 |
# Copy node locks |
1251 | 1251 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
1252 |
_CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
|
1252 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE]) |
|
1253 | 1253 |
|
1254 | 1254 |
def BuildHooksEnv(self): |
1255 | 1255 |
"""Build hooks env. |
... | ... | |
1262 | 1262 |
"AMOUNT": self.op.amount, |
1263 | 1263 |
"ABSOLUTE": self.op.absolute, |
1264 | 1264 |
} |
1265 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
|
|
1265 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
|
1266 | 1266 |
return env |
1267 | 1267 |
|
1268 | 1268 |
def BuildHooksNodes(self): |
... | ... | |
1283 | 1283 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1284 | 1284 |
nodenames = list(instance.all_nodes) |
1285 | 1285 |
for node in nodenames: |
1286 |
_CheckNodeOnline(self, node)
|
|
1286 |
CheckNodeOnline(self, node) |
|
1287 | 1287 |
|
1288 | 1288 |
self.instance = instance |
1289 | 1289 |
|
... | ... | |
1318 | 1318 |
# TODO: check the free disk space for file, when that feature will be |
1319 | 1319 |
# supported |
1320 | 1320 |
nodes = map(self.cfg.GetNodeInfo, nodenames) |
1321 |
es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
|
|
1321 |
es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n), |
|
1322 | 1322 |
nodes) |
1323 | 1323 |
if es_nodes: |
1324 | 1324 |
# With exclusive storage we need to something smarter than just looking |
1325 | 1325 |
# at free space; for now, let's simply abort the operation. |
1326 | 1326 |
raise errors.OpPrereqError("Cannot grow disks when exclusive_storage" |
1327 | 1327 |
" is enabled", errors.ECODE_STATE) |
1328 |
_CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
|
|
1328 |
CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace) |
|
1329 | 1329 |
|
1330 | 1330 |
def Exec(self, feedback_fn): |
1331 | 1331 |
"""Execute disk grow. |
... | ... | |
1340 | 1340 |
|
1341 | 1341 |
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks |
1342 | 1342 |
|
1343 |
disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
|
|
1343 |
disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[disk]) |
|
1344 | 1344 |
if not disks_ok: |
1345 | 1345 |
raise errors.OpExecError("Cannot activate block device to grow") |
1346 | 1346 |
|
... | ... | |
1395 | 1395 |
self.cfg.Update(instance, feedback_fn) |
1396 | 1396 |
|
1397 | 1397 |
# Changes have been recorded, release node lock |
1398 |
_ReleaseLocks(self, locking.LEVEL_NODE)
|
|
1398 |
ReleaseLocks(self, locking.LEVEL_NODE) |
|
1399 | 1399 |
|
1400 | 1400 |
# Downgrade lock while waiting for sync |
1401 | 1401 |
self.glm.downgrade(locking.LEVEL_INSTANCE) |
... | ... | |
1406 | 1406 |
assert instance.disks[self.op.disk] == disk |
1407 | 1407 |
|
1408 | 1408 |
# Wipe newly added disk space |
1409 |
_WipeDisks(self, instance,
|
|
1410 |
disks=[(self.op.disk, disk, old_disk_size)])
|
|
1409 |
WipeDisks(self, instance, |
|
1410 |
disks=[(self.op.disk, disk, old_disk_size)]) |
|
1411 | 1411 |
|
1412 | 1412 |
if self.op.wait_for_sync: |
1413 |
disk_abort = not _WaitForSync(self, instance, disks=[disk])
|
|
1413 |
disk_abort = not WaitForSync(self, instance, disks=[disk]) |
|
1414 | 1414 |
if disk_abort: |
1415 | 1415 |
self.LogWarning("Disk syncing has not returned a good status; check" |
1416 | 1416 |
" the instance") |
... | ... | |
1445 | 1445 |
" iallocator script must be used or the" |
1446 | 1446 |
" new node given", errors.ECODE_INVAL) |
1447 | 1447 |
else: |
1448 |
_CheckIAllocatorOrNode(self, "iallocator", "remote_node")
|
|
1448 |
CheckIAllocatorOrNode(self, "iallocator", "remote_node") |
|
1449 | 1449 |
|
1450 | 1450 |
elif remote_node is not None or ialloc is not None: |
1451 | 1451 |
# Not replacing the secondary |
... | ... | |
1464 | 1464 |
"Conflicting options" |
1465 | 1465 |
|
1466 | 1466 |
if self.op.remote_node is not None: |
1467 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
|
|
1467 |
self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node) |
|
1468 | 1468 |
|
1469 | 1469 |
# Warning: do not remove the locking of the new secondary here |
1470 | 1470 |
# unless DRBD8.AddChildren is changed to work in parallel; |
... | ... | |
1535 | 1535 |
"NEW_SECONDARY": self.op.remote_node, |
1536 | 1536 |
"OLD_SECONDARY": instance.secondary_nodes[0], |
1537 | 1537 |
} |
1538 |
env.update(_BuildInstanceHookEnvByObject(self, instance))
|
|
1538 |
env.update(BuildInstanceHookEnvByObject(self, instance)) |
|
1539 | 1539 |
return env |
1540 | 1540 |
|
1541 | 1541 |
def BuildHooksNodes(self): |
... | ... | |
1561 | 1561 |
# Verify if node group locks are still correct |
1562 | 1562 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP) |
1563 | 1563 |
if owned_groups: |
1564 |
_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
|
|
1564 |
CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) |
|
1565 | 1565 |
|
1566 | 1566 |
return LogicalUnit.CheckPrereq(self) |
1567 | 1567 |
|
... | ... | |
1590 | 1590 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
1591 | 1591 |
assert self.instance is not None, \ |
1592 | 1592 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1593 |
_CheckNodeOnline(self, self.instance.primary_node)
|
|
1593 |
CheckNodeOnline(self, self.instance.primary_node) |
|
1594 | 1594 |
|
1595 | 1595 |
def Exec(self, feedback_fn): |
1596 | 1596 |
"""Activate the disks. |
1597 | 1597 |
|
1598 | 1598 |
""" |
1599 | 1599 |
disks_ok, disks_info = \ |
1600 |
_AssembleInstanceDisks(self, self.instance,
|
|
1601 |
ignore_size=self.op.ignore_size)
|
|
1600 |
AssembleInstanceDisks(self, self.instance, |
|
1601 |
ignore_size=self.op.ignore_size) |
|
1602 | 1602 |
if not disks_ok: |
1603 | 1603 |
raise errors.OpExecError("Cannot activate block devices") |
1604 | 1604 |
|
1605 | 1605 |
if self.op.wait_for_sync: |
1606 |
if not _WaitForSync(self, self.instance):
|
|
1606 |
if not WaitForSync(self, self.instance): |
|
1607 | 1607 |
raise errors.OpExecError("Some disks of the instance are degraded!") |
1608 | 1608 |
|
1609 | 1609 |
return disks_info |
... | ... | |
1640 | 1640 |
""" |
1641 | 1641 |
instance = self.instance |
1642 | 1642 |
if self.op.force: |
1643 |
_ShutdownInstanceDisks(self, instance)
|
|
1643 |
ShutdownInstanceDisks(self, instance) |
|
1644 | 1644 |
else: |
1645 | 1645 |
_SafeShutdownInstanceDisks(self, instance) |
1646 | 1646 |
|
... | ... | |
1683 | 1683 |
return result |
1684 | 1684 |
|
1685 | 1685 |
|
1686 |
def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
|
|
1686 |
def CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False): |
|
1687 | 1687 |
"""Wrapper around L{_CheckDiskConsistencyInner}. |
1688 | 1688 |
|
1689 | 1689 |
""" |
1690 |
(disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
|
|
1690 |
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg) |
|
1691 | 1691 |
return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary, |
1692 | 1692 |
ldisk=ldisk) |
1693 | 1693 |
|
... | ... | |
1702 | 1702 |
@returns The result of the rpc call |
1703 | 1703 |
|
1704 | 1704 |
""" |
1705 |
(disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
|
|
1705 |
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg) |
|
1706 | 1706 |
return lu.rpc.call_blockdev_find(node, disk) |
1707 | 1707 |
|
1708 | 1708 |
|
... | ... | |
1773 | 1773 |
return remote_node_name |
1774 | 1774 |
|
1775 | 1775 |
def _FindFaultyDisks(self, node_name): |
1776 |
"""Wrapper for L{_FindFaultyInstanceDisks}.
|
|
1776 |
"""Wrapper for L{FindFaultyInstanceDisks}. |
|
1777 | 1777 |
|
1778 | 1778 |
""" |
1779 |
return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
|
|
1780 |
node_name, True)
|
|
1779 |
return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance, |
|
1780 |
node_name, True) |
|
1781 | 1781 |
|
1782 | 1782 |
def _CheckDisksActivated(self, instance): |
1783 | 1783 |
"""Checks if the instance disks are activated. |
... | ... | |
1901 | 1901 |
self.target_node = secondary_node |
1902 | 1902 |
check_nodes = [self.new_node, self.other_node] |
1903 | 1903 |
|
1904 |
_CheckNodeNotDrained(self.lu, remote_node)
|
|
1905 |
_CheckNodeVmCapable(self.lu, remote_node)
|
|
1904 |
CheckNodeNotDrained(self.lu, remote_node) |
|
1905 |
CheckNodeVmCapable(self.lu, remote_node) |
|
1906 | 1906 |
|
1907 | 1907 |
old_node_info = self.cfg.GetNodeInfo(secondary_node) |
1908 | 1908 |
assert old_node_info is not None |
... | ... | |
1928 | 1928 |
cluster = self.cfg.GetClusterInfo() |
1929 | 1929 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, |
1930 | 1930 |
new_group_info) |
1931 |
_CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
|
|
1932 |
self.cfg, ignore=self.ignore_ipolicy)
|
|
1931 |
CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info, |
|
1932 |
self.cfg, ignore=self.ignore_ipolicy) |
|
1933 | 1933 |
|
1934 | 1934 |
for node in check_nodes: |
1935 |
_CheckNodeOnline(self.lu, node)
|
|
1935 |
CheckNodeOnline(self.lu, node) |
|
1936 | 1936 |
|
1937 | 1937 |
touched_nodes = frozenset(node_name for node_name in [self.new_node, |
1938 | 1938 |
self.other_node, |
... | ... | |
1940 | 1940 |
if node_name is not None) |
1941 | 1941 |
|
1942 | 1942 |
# Release unneeded node and node resource locks |
1943 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
|
|
1944 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
|
|
1945 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
|
|
1943 |
ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes) |
|
1944 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes) |
|
1945 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC) |
|
1946 | 1946 |
|
1947 | 1947 |
# Release any owned node group |
1948 |
_ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
|
|
1948 |
ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP) |
|
1949 | 1949 |
|
1950 | 1950 |
# Check whether disks are valid |
1951 | 1951 |
for disk_idx in self.disks: |
... | ... | |
1993 | 1993 |
|
1994 | 1994 |
# Activate the instance disks if we're replacing them on a down instance |
1995 | 1995 |
if activate_disks: |
1996 |
_StartInstanceDisks(self.lu, self.instance, True)
|
|
1996 |
StartInstanceDisks(self.lu, self.instance, True) |
|
1997 | 1997 |
|
1998 | 1998 |
try: |
1999 | 1999 |
# Should we replace the secondary node? |
... | ... | |
2066 | 2066 |
self.lu.LogInfo("Checking disk/%d consistency on node %s" % |
2067 | 2067 |
(idx, node_name)) |
2068 | 2068 |
|
2069 |
if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
|
|
2070 |
on_primary, ldisk=ldisk):
|
|
2069 |
if not CheckDiskConsistency(self.lu, self.instance, dev, node_name, |
|
2070 |
on_primary, ldisk=ldisk): |
|
2071 | 2071 |
raise errors.OpExecError("Node %s has degraded storage, unsafe to" |
2072 | 2072 |
" replace disks for instance %s" % |
2073 | 2073 |
(node_name, self.instance.name)) |
... | ... | |
2081 | 2081 |
""" |
2082 | 2082 |
iv_names = {} |
2083 | 2083 |
|
2084 |
disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
|
|
2084 |
disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
|
2085 | 2085 |
for idx, dev in enumerate(disks): |
2086 | 2086 |
if idx not in self.disks: |
2087 | 2087 |
continue |
... | ... | |
2107 | 2107 |
new_lvs = [lv_data, lv_meta] |
2108 | 2108 |
old_lvs = [child.Copy() for child in dev.children] |
2109 | 2109 |
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) |
2110 |
excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
|
|
2110 |
excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name) |
|
2111 | 2111 |
|
2112 | 2112 |
# we pass force_create=True to force the LVM creation |
2113 | 2113 |
for new_lv in new_lvs: |
2114 | 2114 |
_CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True, |
2115 |
_GetInstanceInfoText(self.instance), False,
|
|
2115 |
GetInstanceInfoText(self.instance), False, |
|
2116 | 2116 |
excl_stor) |
2117 | 2117 |
|
2118 | 2118 |
return iv_names |
... | ... | |
2261 | 2261 |
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage") |
2262 | 2262 |
self._RemoveOldStorage(self.target_node, iv_names) |
2263 | 2263 |
# TODO: Check if releasing locks early still makes sense |
2264 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
|
|
2264 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES) |
|
2265 | 2265 |
else: |
2266 | 2266 |
# Release all resource locks except those used by the instance |
2267 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
|
|
2268 |
keep=self.node_secondary_ip.keys())
|
|
2267 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, |
|
2268 |
keep=self.node_secondary_ip.keys()) |
|
2269 | 2269 |
|
2270 | 2270 |
# Release all node locks while waiting for sync |
2271 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE)
|
|
2271 |
ReleaseLocks(self.lu, locking.LEVEL_NODE) |
|
2272 | 2272 |
|
2273 | 2273 |
# TODO: Can the instance lock be downgraded here? Take the optional disk |
2274 | 2274 |
# shutdown in the caller into consideration. |
... | ... | |
2277 | 2277 |
# This can fail as the old devices are degraded and _WaitForSync |
2278 | 2278 |
# does a combined result over all disks, so we don't check its return value |
2279 | 2279 |
self.lu.LogStep(cstep.next(), steps_total, "Sync devices") |
2280 |
_WaitForSync(self.lu, self.instance)
|
|
2280 |
WaitForSync(self.lu, self.instance) |
|
2281 | 2281 |
|
2282 | 2282 |
# Check all devices manually |
2283 | 2283 |
self._CheckDevices(self.instance.primary_node, iv_names) |
... | ... | |
2321 | 2321 |
|
2322 | 2322 |
# Step: create new storage |
2323 | 2323 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
2324 |
disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
|
|
2325 |
excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
|
|
2324 |
disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
|
2325 |
excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node) |
|
2326 | 2326 |
for idx, dev in enumerate(disks): |
2327 | 2327 |
self.lu.LogInfo("Adding new local storage on %s for disk/%d" % |
2328 | 2328 |
(self.new_node, idx)) |
2329 | 2329 |
# we pass force_create=True to force LVM creation |
2330 | 2330 |
for new_lv in dev.children: |
2331 | 2331 |
_CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv, |
2332 |
True, _GetInstanceInfoText(self.instance), False,
|
|
2332 |
True, GetInstanceInfoText(self.instance), False, |
|
2333 | 2333 |
excl_stor) |
2334 | 2334 |
|
2335 | 2335 |
# Step 4: dbrd minors and drbd setups changes |
... | ... | |
2369 | 2369 |
children=dev.children, |
2370 | 2370 |
size=dev.size, |
2371 | 2371 |
params={}) |
2372 |
(anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
|
|
2373 |
self.cfg)
|
|
2372 |
(anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd], |
|
2373 |
self.cfg) |
|
2374 | 2374 |
try: |
2375 |
_CreateSingleBlockDev(self.lu, self.new_node, self.instance,
|
|
2376 |
anno_new_drbd,
|
|
2377 |
_GetInstanceInfoText(self.instance), False,
|
|
2378 |
excl_stor)
|
|
2375 |
CreateSingleBlockDev(self.lu, self.new_node, self.instance, |
|
2376 |
anno_new_drbd, |
|
2377 |
GetInstanceInfoText(self.instance), False, |
|
2378 |
excl_stor) |
|
2379 | 2379 |
except errors.GenericError: |
2380 | 2380 |
self.cfg.ReleaseDRBDMinors(self.instance.name) |
2381 | 2381 |
raise |
... | ... | |
2413 | 2413 |
self.cfg.Update(self.instance, feedback_fn) |
2414 | 2414 |
|
2415 | 2415 |
# Release all node locks (the configuration has been updated) |
2416 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE)
|
|
2416 |
ReleaseLocks(self.lu, locking.LEVEL_NODE) |
|
2417 | 2417 |
|
2418 | 2418 |
# and now perform the drbd attach |
2419 | 2419 |
self.lu.LogInfo("Attaching primary drbds to new secondary" |
... | ... | |
2438 | 2438 |
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage") |
2439 | 2439 |
self._RemoveOldStorage(self.target_node, iv_names) |
2440 | 2440 |
# TODO: Check if releasing locks early still makes sense |
2441 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
|
|
2441 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES) |
|
2442 | 2442 |
else: |
2443 | 2443 |
# Release all resource locks except those used by the instance |
2444 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
|
|
2445 |
keep=self.node_secondary_ip.keys())
|
|
2444 |
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, |
|
2445 |
keep=self.node_secondary_ip.keys()) |
|
2446 | 2446 |
|
2447 | 2447 |
# TODO: Can the instance lock be downgraded here? Take the optional disk |
2448 | 2448 |
# shutdown in the caller into consideration. |
... | ... | |
2451 | 2451 |
# This can fail as the old devices are degraded and _WaitForSync |
2452 | 2452 |
# does a combined result over all disks, so we don't check its return value |
2453 | 2453 |
self.lu.LogStep(cstep.next(), steps_total, "Sync devices") |
2454 |
_WaitForSync(self.lu, self.instance)
|
|
2454 |
WaitForSync(self.lu, self.instance) |
|
2455 | 2455 |
|
2456 | 2456 |
# Check all devices manually |
2457 | 2457 |
self._CheckDevices(self.instance.primary_node, iv_names) |
Also available in: Unified diff