Revision 987ec378
b/lib/cmdlib/instance.py | ||
---|---|---|
1321 | 1321 |
|
1322 | 1322 |
self.dry_run_result = list(node_uuids) |
1323 | 1323 |
|
1324 |
def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance): |
|
1325 |
"""Removes degraded disks and instance. |
|
1326 |
|
|
1327 |
It optionally checks whether disks are degraded. If the disks are |
|
1328 |
degraded, they are removed and the instance is also removed from |
|
1329 |
the configuration. |
|
1330 |
|
|
1331 |
If L{disk_abort} is True, then the disks are considered degraded |
|
1332 |
and removed, and the instance is removed from the configuration. |
|
1333 |
|
|
1334 |
If L{disk_abort} is False, then it first checks whether disks are |
|
1335 |
degraded and, if so, it removes the disks and the instance is |
|
1336 |
removed from the configuration. |
|
1337 |
|
|
1338 |
@type feedback_fn: callable |
|
1339 |
@param feedback_fn: function used send feedback back to the caller |
|
1340 |
|
|
1341 |
@type disk_abort: boolean |
|
1342 |
@param disk_abort: |
|
1343 |
True if disks are degraded, False to first check if disks are |
|
1344 |
degraded |
|
1345 |
|
|
1346 |
@type instance: L{objects.Instance} |
|
1347 |
@param instance: instance containing the disks to check |
|
1348 |
|
|
1349 |
@rtype: NoneType |
|
1350 |
@return: None |
|
1351 |
@raise errors.OpPrereqError: if disks are degraded |
|
1352 |
|
|
1353 |
""" |
|
1354 |
if disk_abort: |
|
1355 |
pass |
|
1356 |
elif self.op.wait_for_sync: |
|
1357 |
disk_abort = not WaitForSync(self, instance) |
|
1358 |
elif instance.disk_template in constants.DTS_INT_MIRROR: |
|
1359 |
# make sure the disks are not degraded (still sync-ing is ok) |
|
1360 |
feedback_fn("* checking mirrors status") |
|
1361 |
disk_abort = not WaitForSync(self, instance, oneshot=True) |
|
1362 |
else: |
|
1363 |
disk_abort = False |
|
1364 |
|
|
1365 |
if disk_abort: |
|
1366 |
RemoveDisks(self, instance) |
|
1367 |
self.cfg.RemoveInstance(instance.uuid) |
|
1368 |
# Make sure the instance lock gets removed |
|
1369 |
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name |
|
1370 |
raise errors.OpExecError("There are some degraded disks for" |
|
1371 |
" this instance") |
|
1372 |
|
|
1324 | 1373 |
def Exec(self, feedback_fn): |
1325 | 1374 |
"""Create and add the instance to the cluster. |
1326 | 1375 |
|
... | ... | |
1409 | 1458 |
# Release all nodes |
1410 | 1459 |
ReleaseLocks(self, locking.LEVEL_NODE) |
1411 | 1460 |
|
1461 |
# Wipe disks |
|
1412 | 1462 |
disk_abort = False |
1413 | 1463 |
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: |
1414 | 1464 |
feedback_fn("* wiping instance disks...") |
... | ... | |
1419 | 1469 |
self.LogWarning("Wiping instance disks failed (%s)", err) |
1420 | 1470 |
disk_abort = True |
1421 | 1471 |
|
1422 |
if disk_abort: |
|
1423 |
# Something is already wrong with the disks, don't do anything else |
|
1424 |
pass |
|
1425 |
elif self.op.wait_for_sync: |
|
1426 |
disk_abort = not WaitForSync(self, iobj) |
|
1427 |
elif iobj.disk_template in constants.DTS_INT_MIRROR: |
|
1428 |
# make sure the disks are not degraded (still sync-ing is ok) |
|
1429 |
feedback_fn("* checking mirrors status") |
|
1430 |
disk_abort = not WaitForSync(self, iobj, oneshot=True) |
|
1431 |
else: |
|
1432 |
disk_abort = False |
|
1472 |
self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj) |
|
1433 | 1473 |
|
1434 |
if disk_abort: |
|
1435 |
RemoveDisks(self, iobj) |
|
1436 |
self.cfg.RemoveInstance(iobj.uuid) |
|
1437 |
# Make sure the instance lock gets removed |
|
1438 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name |
|
1439 |
raise errors.OpExecError("There are some degraded disks for" |
|
1440 |
" this instance") |
|
1441 | 1474 |
|
1442 | 1475 |
# instance disks are now active |
1443 | 1476 |
iobj.disks_active = True |
Also available in: Unified diff