Revision 1d4a4b26
b/lib/cmdlib/backup.py | ||
---|---|---|
398 | 398 |
for disk in instance.disks: |
399 | 399 |
self.cfg.SetDiskID(disk, src_node) |
400 | 400 |
|
401 |
activate_disks = (instance.admin_state != constants.ADMINST_UP)
|
|
401 |
activate_disks = not instance.disks_active
|
|
402 | 402 |
|
403 | 403 |
if activate_disks: |
404 | 404 |
# Activate the instance disks if we'exporting a stopped instance |
b/lib/cmdlib/cluster.py | ||
---|---|---|
1779 | 1779 |
# node here |
1780 | 1780 |
snode = node_image[nname] |
1781 | 1781 |
bad_snode = snode.ghost or snode.offline |
1782 |
_ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
|
|
1782 |
_ErrorIf(inst_config.disks_active and
|
|
1783 | 1783 |
not success and not bad_snode, |
1784 | 1784 |
constants.CV_EINSTANCEFAULTYDISK, instance, |
1785 | 1785 |
"couldn't retrieve status for disk/%s on %s: %s", |
1786 | 1786 |
idx, nname, bdev_status) |
1787 |
_ErrorIf((inst_config.admin_state == constants.ADMINST_UP and
|
|
1787 |
_ErrorIf((inst_config.disks_active and
|
|
1788 | 1788 |
success and bdev_status.ldisk_status == constants.LDS_FAULTY), |
1789 | 1789 |
constants.CV_EINSTANCEFAULTYDISK, instance, |
1790 | 1790 |
"disk/%s on %s is faulty", idx, nname) |
... | ... | |
2062 | 2062 |
node_drbd[minor] = (instance, False) |
2063 | 2063 |
else: |
2064 | 2064 |
instance = instanceinfo[instance] |
2065 |
node_drbd[minor] = (instance.name, |
|
2066 |
instance.admin_state == constants.ADMINST_UP) |
|
2065 |
node_drbd[minor] = (instance.name, instance.disks_active) |
|
2067 | 2066 |
|
2068 | 2067 |
# and now check them |
2069 | 2068 |
used_minors = nresult.get(constants.NV_DRBDLIST, []) |
b/lib/cmdlib/group.py | ||
---|---|---|
910 | 910 |
res_missing = {} |
911 | 911 |
|
912 | 912 |
nv_dict = MapInstanceDisksToNodes( |
913 |
[inst for inst in self.instances.values() |
|
914 |
if inst.admin_state == constants.ADMINST_UP]) |
|
913 |
[inst for inst in self.instances.values() if inst.disks_active]) |
|
915 | 914 |
|
916 | 915 |
if nv_dict: |
917 | 916 |
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) & |
b/lib/cmdlib/instance.py | ||
---|---|---|
1197 | 1197 |
primary_node=pnode_name, |
1198 | 1198 |
nics=self.nics, disks=disks, |
1199 | 1199 |
disk_template=self.op.disk_template, |
1200 |
disks_active=False, |
|
1200 | 1201 |
admin_state=constants.ADMINST_DOWN, |
1201 | 1202 |
network_port=network_port, |
1202 | 1203 |
beparams=self.op.beparams, |
... | ... | |
1276 | 1277 |
raise errors.OpExecError("There are some degraded disks for" |
1277 | 1278 |
" this instance") |
1278 | 1279 |
|
1280 |
# instance disks are now active |
|
1281 |
iobj.disks_active = True |
|
1282 |
|
|
1279 | 1283 |
# Release all node resource locks |
1280 | 1284 |
ReleaseLocks(self, locking.LEVEL_NODE_RES) |
1281 | 1285 |
|
b/lib/cmdlib/instance_migration.py | ||
---|---|---|
832 | 832 |
source_node = instance.primary_node |
833 | 833 |
target_node = self.target_node |
834 | 834 |
|
835 |
if instance.admin_state == constants.ADMINST_UP:
|
|
835 |
if instance.disks_active:
|
|
836 | 836 |
self.feedback_fn("* checking disk consistency between source and target") |
837 | 837 |
for (idx, dev) in enumerate(instance.disks): |
838 | 838 |
# for drbd, these are drbd over lvm |
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
1139 | 1139 |
ignored. |
1140 | 1140 |
|
1141 | 1141 |
""" |
1142 |
lu.cfg.MarkInstanceDisksInactive(instance.name) |
|
1142 | 1143 |
all_result = True |
1143 | 1144 |
disks = ExpandCheckDisks(instance, disks) |
1144 | 1145 |
|
... | ... | |
1205 | 1206 |
# into any other network-connected state (Connected, SyncTarget, |
1206 | 1207 |
# SyncSource, etc.) |
1207 | 1208 |
|
1209 |
# mark instance disks as active before doing actual work, so watcher does |
|
1210 |
# not try to shut them down erroneously |
|
1211 |
lu.cfg.MarkInstanceDisksActive(iname) |
|
1212 |
|
|
1208 | 1213 |
# 1st pass, assemble on all nodes in secondary mode |
1209 | 1214 |
for idx, inst_disk in enumerate(disks): |
1210 | 1215 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
... | ... | |
1256 | 1261 |
for disk in disks: |
1257 | 1262 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
1258 | 1263 |
|
1264 |
if not disks_ok: |
|
1265 |
lu.cfg.MarkInstanceDisksInactive(iname) |
|
1266 |
|
|
1259 | 1267 |
return disks_ok, device_info |
1260 | 1268 |
|
1261 | 1269 |
|
... | ... | |
1460 | 1468 |
if disk_abort: |
1461 | 1469 |
self.LogWarning("Disk syncing has not returned a good status; check" |
1462 | 1470 |
" the instance") |
1463 |
if instance.admin_state != constants.ADMINST_UP:
|
|
1471 |
if not instance.disks_active:
|
|
1464 | 1472 |
_SafeShutdownInstanceDisks(self, instance, disks=[disk]) |
1465 |
elif instance.admin_state != constants.ADMINST_UP:
|
|
1473 |
elif not instance.disks_active:
|
|
1466 | 1474 |
self.LogWarning("Not shutting down the disk even if the instance is" |
1467 | 1475 |
" not supposed to be running because no wait for" |
1468 | 1476 |
" sync mode was requested") |
... | ... | |
1650 | 1658 |
|
1651 | 1659 |
if self.op.wait_for_sync: |
1652 | 1660 |
if not WaitForSync(self, self.instance): |
1661 |
self.cfg.MarkInstanceDisksInactive(self.instance.name) |
|
1653 | 1662 |
raise errors.OpExecError("Some disks of the instance are degraded!") |
1654 | 1663 |
|
1655 | 1664 |
return disks_info |
... | ... | |
2035 | 2044 |
feedback_fn("Current seconary node: %s" % |
2036 | 2045 |
utils.CommaJoin(self.instance.secondary_nodes)) |
2037 | 2046 |
|
2038 |
activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
|
|
2047 |
activate_disks = not self.instance.disks_active
|
|
2039 | 2048 |
|
2040 | 2049 |
# Activate the instance disks if we're replacing them on a down instance |
2041 | 2050 |
if activate_disks: |
b/lib/cmdlib/node.py | ||
---|---|---|
1549 | 1549 |
""" |
1550 | 1550 |
# Check whether any instance on this node has faulty disks |
1551 | 1551 |
for inst in _GetNodeInstances(self.cfg, self.op.node_name): |
1552 |
if inst.admin_state != constants.ADMINST_UP:
|
|
1552 |
if not inst.disks_active:
|
|
1553 | 1553 |
continue |
1554 | 1554 |
check_nodes = set(inst.all_nodes) |
1555 | 1555 |
check_nodes.discard(self.op.node_name) |
b/lib/config.py | ||
---|---|---|
1436 | 1436 |
raise errors.ConfigurationError("Cannot add '%s': UUID %s already" |
1437 | 1437 |
" in use" % (item.name, item.uuid)) |
1438 | 1438 |
|
1439 |
def _SetInstanceStatus(self, instance_name, status): |
|
1439 |
def _SetInstanceStatus(self, instance_name, status, disks_active):
|
|
1440 | 1440 |
"""Set the instance's status to a given value. |
1441 | 1441 |
|
1442 | 1442 |
""" |
1443 |
assert status in constants.ADMINST_ALL, \ |
|
1444 |
"Invalid status '%s' passed to SetInstanceStatus" % (status,) |
|
1445 |
|
|
1446 | 1443 |
if instance_name not in self._config_data.instances: |
1447 | 1444 |
raise errors.ConfigurationError("Unknown instance '%s'" % |
1448 | 1445 |
instance_name) |
1449 | 1446 |
instance = self._config_data.instances[instance_name] |
1450 |
if instance.admin_state != status: |
|
1447 |
|
|
1448 |
if status is None: |
|
1449 |
status = instance.admin_state |
|
1450 |
if disks_active is None: |
|
1451 |
disks_active = instance.disks_active |
|
1452 |
|
|
1453 |
assert status in constants.ADMINST_ALL, \ |
|
1454 |
"Invalid status '%s' passed to SetInstanceStatus" % (status,) |
|
1455 |
|
|
1456 |
if instance.admin_state != status or \ |
|
1457 |
instance.disks_active != disks_active: |
|
1451 | 1458 |
instance.admin_state = status |
1459 |
instance.disks_active = disks_active |
|
1452 | 1460 |
instance.serial_no += 1 |
1453 | 1461 |
instance.mtime = time.time() |
1454 | 1462 |
self._WriteConfig() |
... | ... | |
1457 | 1465 |
def MarkInstanceUp(self, instance_name): |
1458 | 1466 |
"""Mark the instance status to up in the config. |
1459 | 1467 |
|
1468 |
This also sets the instance disks active flag. |
|
1469 |
|
|
1460 | 1470 |
""" |
1461 |
self._SetInstanceStatus(instance_name, constants.ADMINST_UP) |
|
1471 |
self._SetInstanceStatus(instance_name, constants.ADMINST_UP, True)
|
|
1462 | 1472 |
|
1463 | 1473 |
@locking.ssynchronized(_config_lock) |
1464 | 1474 |
def MarkInstanceOffline(self, instance_name): |
1465 | 1475 |
"""Mark the instance status to down in the config. |
1466 | 1476 |
|
1477 |
This also clears the instance disks active flag. |
|
1478 |
|
|
1467 | 1479 |
""" |
1468 |
self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE) |
|
1480 |
self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE, False)
|
|
1469 | 1481 |
|
1470 | 1482 |
@locking.ssynchronized(_config_lock) |
1471 | 1483 |
def RemoveInstance(self, instance_name): |
... | ... | |
1531 | 1543 |
def MarkInstanceDown(self, instance_name): |
1532 | 1544 |
"""Mark the status of an instance to down in the configuration. |
1533 | 1545 |
|
1546 |
This does not touch the instance disks active flag, as shut down instances |
|
1547 |
can still have active disks. |
|
1548 |
|
|
1549 |
""" |
|
1550 |
self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN, None) |
|
1551 |
|
|
1552 |
@locking.ssynchronized(_config_lock) |
|
1553 |
def MarkInstanceDisksActive(self, instance_name): |
|
1554 |
"""Mark the status of instance disks active. |
|
1555 |
|
|
1556 |
""" |
|
1557 |
self._SetInstanceStatus(instance_name, None, True) |
|
1558 |
|
|
1559 |
@locking.ssynchronized(_config_lock) |
|
1560 |
def MarkInstanceDisksInactive(self, instance_name): |
|
1561 |
"""Mark the status of instance disks inactive. |
|
1562 |
|
|
1534 | 1563 |
""" |
1535 |
self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN)
|
|
1564 |
self._SetInstanceStatus(instance_name, None, False)
|
|
1536 | 1565 |
|
1537 | 1566 |
def _UnlockedGetInstanceList(self): |
1538 | 1567 |
"""Get the list of instances. |
b/lib/masterd/iallocator.py | ||
---|---|---|
605 | 605 |
constants.IDISK_MODE: dsk.mode} |
606 | 606 |
for dsk in iinfo.disks], |
607 | 607 |
"disk_template": iinfo.disk_template, |
608 |
"disks_active": iinfo.disks_active, |
|
608 | 609 |
"hypervisor": iinfo.hypervisor, |
609 | 610 |
} |
610 | 611 |
pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template, |
b/lib/objects.py | ||
---|---|---|
1057 | 1057 |
"nics", |
1058 | 1058 |
"disks", |
1059 | 1059 |
"disk_template", |
1060 |
"disks_active", |
|
1060 | 1061 |
"network_port", |
1061 | 1062 |
"serial_no", |
1062 | 1063 |
] + _TIMESTAMPS + _UUID |
b/src/Ganeti/Objects.hs | ||
---|---|---|
472 | 472 |
, simpleField "nics" [t| [PartialNic] |] |
473 | 473 |
, simpleField "disks" [t| [Disk] |] |
474 | 474 |
, simpleField "disk_template" [t| DiskTemplate |] |
475 |
, simpleField "disks_active" [t| Bool |] |
|
475 | 476 |
, optionalField $ simpleField "network_port" [t| Int |] |
476 | 477 |
] |
477 | 478 |
++ timeStampFields |
b/test/hs/Test/Ganeti/Objects.hs | ||
---|---|---|
131 | 131 |
<*> vectorOf 5 genDisk |
132 | 132 |
-- disk template |
133 | 133 |
<*> arbitrary |
134 |
-- disks active |
|
135 |
<*> arbitrary |
|
134 | 136 |
-- network port |
135 | 137 |
<*> arbitrary |
136 | 138 |
-- ts |
Also available in: Unified diff