Revision 23829f6f lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
1580 | 1580 |
result = True |
1581 | 1581 |
if on_primary or dev.AssembleOnSecondary(): |
1582 | 1582 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
1583 |
if rstats.failed or not rstats.data: |
|
1584 |
logging.warning("Node %s: disk degraded, not found or node down", node) |
|
1583 |
msg = rstats.RemoteFailMsg() |
|
1584 |
if msg: |
|
1585 |
lu.LogWarning("Can't find disk on node %s: %s", node, msg) |
|
1586 |
result = False |
|
1587 |
elif not rstats.payload: |
|
1588 |
lu.LogWarning("Can't find disk on node %s", node) |
|
1585 | 1589 |
result = False |
1586 | 1590 |
else: |
1587 |
result = result and (not rstats.data[idx])
|
|
1591 |
result = result and (not rstats.payload[idx])
|
|
1588 | 1592 |
if dev.children: |
1589 | 1593 |
for child in dev.children: |
1590 | 1594 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary) |
... | ... | |
4929 | 4933 |
for node in tgt_node, oth_node: |
4930 | 4934 |
info("checking disk/%d on %s" % (idx, node)) |
4931 | 4935 |
cfg.SetDiskID(dev, node) |
4932 |
if not self.rpc.call_blockdev_find(node, dev): |
|
4933 |
raise errors.OpExecError("Can't find disk/%d on node %s" % |
|
4934 |
(idx, node)) |
|
4936 |
result = self.rpc.call_blockdev_find(node, dev) |
|
4937 |
msg = result.RemoteFailMsg() |
|
4938 |
if not msg and not result.payload: |
|
4939 |
msg = "disk not found" |
|
4940 |
if msg: |
|
4941 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
|
4942 |
(idx, node, msg)) |
|
4935 | 4943 |
|
4936 | 4944 |
# Step: check other node consistency |
4937 | 4945 |
self.proc.LogStep(2, steps_total, "check peer consistency") |
... | ... | |
4994 | 5002 |
# build the rename list based on what LVs exist on the node |
4995 | 5003 |
rlist = [] |
4996 | 5004 |
for to_ren in old_lvs: |
4997 |
find_res = self.rpc.call_blockdev_find(tgt_node, to_ren) |
|
4998 |
if not find_res.failed and find_res.data is not None: # device exists |
|
5005 |
result = self.rpc.call_blockdev_find(tgt_node, to_ren) |
|
5006 |
if not result.RemoteFailMsg() and result.payload: |
|
5007 |
# device exists |
|
4999 | 5008 |
rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) |
5000 | 5009 |
|
5001 | 5010 |
info("renaming the old LVs on the target node") |
... | ... | |
5045 | 5054 |
for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): |
5046 | 5055 |
cfg.SetDiskID(dev, instance.primary_node) |
5047 | 5056 |
result = self.rpc.call_blockdev_find(instance.primary_node, dev) |
5048 |
if result.failed or result.data[5]: |
|
5057 |
msg = result.RemoteFailMsg() |
|
5058 |
if not msg and not result.payload: |
|
5059 |
msg = "disk not found" |
|
5060 |
if msg: |
|
5061 |
raise errors.OpExecError("Can't find DRBD device %s: %s" % |
|
5062 |
(name, msg)) |
|
5063 |
if result.payload[5]: |
|
5049 | 5064 |
raise errors.OpExecError("DRBD device %s is degraded!" % name) |
5050 | 5065 |
|
5051 | 5066 |
# Step: remove old storage |
... | ... | |
5109 | 5124 |
info("checking disk/%d on %s" % (idx, pri_node)) |
5110 | 5125 |
cfg.SetDiskID(dev, pri_node) |
5111 | 5126 |
result = self.rpc.call_blockdev_find(pri_node, dev) |
5112 |
result.Raise() |
|
5113 |
if not result.data: |
|
5114 |
raise errors.OpExecError("Can't find disk/%d on node %s" % |
|
5115 |
(idx, pri_node)) |
|
5127 |
msg = result.RemoteFailMsg() |
|
5128 |
if not msg and not result.payload: |
|
5129 |
msg = "disk not found" |
|
5130 |
if msg: |
|
5131 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
|
5132 |
(idx, pri_node, msg)) |
|
5116 | 5133 |
|
5117 | 5134 |
# Step: check other node consistency |
5118 | 5135 |
self.proc.LogStep(2, steps_total, "check peer consistency") |
... | ... | |
5221 | 5238 |
for idx, (dev, old_lvs, _) in iv_names.iteritems(): |
5222 | 5239 |
cfg.SetDiskID(dev, pri_node) |
5223 | 5240 |
result = self.rpc.call_blockdev_find(pri_node, dev) |
5224 |
result.Raise() |
|
5225 |
if result.data[5]: |
|
5241 |
msg = result.RemoteFailMsg() |
|
5242 |
if not msg and not result.payload: |
|
5243 |
msg = "disk not found" |
|
5244 |
if msg: |
|
5245 |
raise errors.OpExecError("Can't find DRBD device disk/%d: %s" % |
|
5246 |
(idx, msg)) |
|
5247 |
if result.payload[5]: |
|
5226 | 5248 |
raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx) |
5227 | 5249 |
|
5228 | 5250 |
self.proc.LogStep(6, steps_total, "removing old storage") |
... | ... | |
5410 | 5432 |
if not static: |
5411 | 5433 |
self.cfg.SetDiskID(dev, instance.primary_node) |
5412 | 5434 |
dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev) |
5413 |
dev_pstatus.Raise() |
|
5414 |
dev_pstatus = dev_pstatus.data |
|
5435 |
msg = dev_pstatus.RemoteFailMsg() |
|
5436 |
if msg: |
|
5437 |
raise errors.OpExecError("Can't compute disk status for %s: %s" % |
|
5438 |
(instance.name, msg)) |
|
5439 |
dev_pstatus = dev_pstatus.payload |
|
5415 | 5440 |
else: |
5416 | 5441 |
dev_pstatus = None |
5417 | 5442 |
|
... | ... | |
5425 | 5450 |
if snode and not static: |
5426 | 5451 |
self.cfg.SetDiskID(dev, snode) |
5427 | 5452 |
dev_sstatus = self.rpc.call_blockdev_find(snode, dev) |
5428 |
dev_sstatus.Raise() |
|
5429 |
dev_sstatus = dev_sstatus.data |
|
5453 |
msg = dev_sstatus.RemoteFailMsg() |
|
5454 |
if msg: |
|
5455 |
raise errors.OpExecError("Can't compute disk status for %s: %s" % |
|
5456 |
(instance.name, msg)) |
|
5457 |
dev_sstatus = dev_sstatus.payload |
|
5430 | 5458 |
else: |
5431 | 5459 |
dev_sstatus = None |
5432 | 5460 |
|
Also available in: Unified diff