Revision 7e608e89
b/hw/virtio-blk.c | ||
---|---|---|
239 | 239 |
} |
240 | 240 |
|
241 | 241 |
static void virtio_blk_handle_flush(BlockRequest *blkreq, int *num_writes, |
242 |
VirtIOBlockReq *req, BlockDriverState **old_bs)
|
|
242 |
VirtIOBlockReq *req) |
|
243 | 243 |
{ |
244 | 244 |
BlockDriverAIOCB *acb; |
245 | 245 |
|
246 | 246 |
/* |
247 | 247 |
* Make sure all outstanding writes are posted to the backing device. |
248 | 248 |
*/ |
249 |
if (*old_bs != NULL) {
|
|
250 |
do_multiwrite(*old_bs, blkreq, *num_writes);
|
|
249 |
if (*num_writes > 0) {
|
|
250 |
do_multiwrite(req->dev->bs, blkreq, *num_writes);
|
|
251 | 251 |
} |
252 | 252 |
*num_writes = 0; |
253 |
*old_bs = req->dev->bs; |
|
254 | 253 |
|
255 | 254 |
acb = bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req); |
256 | 255 |
if (!acb) { |
... | ... | |
259 | 258 |
} |
260 | 259 |
|
261 | 260 |
static void virtio_blk_handle_write(BlockRequest *blkreq, int *num_writes, |
262 |
VirtIOBlockReq *req, BlockDriverState **old_bs)
|
|
261 |
VirtIOBlockReq *req) |
|
263 | 262 |
{ |
264 | 263 |
if (req->out->sector & req->dev->sector_mask) { |
265 | 264 |
virtio_blk_rw_complete(req, -EIO); |
266 | 265 |
return; |
267 | 266 |
} |
268 | 267 |
|
269 |
if (req->dev->bs != *old_bs || *num_writes == 32) { |
|
270 |
if (*old_bs != NULL) { |
|
271 |
do_multiwrite(*old_bs, blkreq, *num_writes); |
|
272 |
} |
|
268 |
if (*num_writes == 32) { |
|
269 |
do_multiwrite(req->dev->bs, blkreq, *num_writes); |
|
273 | 270 |
*num_writes = 0; |
274 |
*old_bs = req->dev->bs; |
|
275 | 271 |
} |
276 | 272 |
|
277 | 273 |
blkreq[*num_writes].sector = req->out->sector; |
... | ... | |
304 | 300 |
typedef struct MultiReqBuffer { |
305 | 301 |
BlockRequest blkreq[32]; |
306 | 302 |
int num_writes; |
307 |
BlockDriverState *old_bs; |
|
308 | 303 |
} MultiReqBuffer; |
309 | 304 |
|
310 | 305 |
static void virtio_blk_handle_request(VirtIOBlockReq *req, |
... | ... | |
325 | 320 |
req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; |
326 | 321 |
|
327 | 322 |
if (req->out->type & VIRTIO_BLK_T_FLUSH) { |
328 |
virtio_blk_handle_flush(mrb->blkreq, &mrb->num_writes, |
|
329 |
req, &mrb->old_bs); |
|
323 |
virtio_blk_handle_flush(mrb->blkreq, &mrb->num_writes, req); |
|
330 | 324 |
} else if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) { |
331 | 325 |
virtio_blk_handle_scsi(req); |
332 | 326 |
} else if (req->out->type & VIRTIO_BLK_T_OUT) { |
333 | 327 |
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1], |
334 | 328 |
req->elem.out_num - 1); |
335 |
virtio_blk_handle_write(mrb->blkreq, &mrb->num_writes, |
|
336 |
req, &mrb->old_bs); |
|
329 |
virtio_blk_handle_write(mrb->blkreq, &mrb->num_writes, req); |
|
337 | 330 |
} else { |
338 | 331 |
qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0], |
339 | 332 |
req->elem.in_num - 1); |
... | ... | |
347 | 340 |
VirtIOBlockReq *req; |
348 | 341 |
MultiReqBuffer mrb = { |
349 | 342 |
.num_writes = 0, |
350 |
.old_bs = NULL, |
|
351 | 343 |
}; |
352 | 344 |
|
353 | 345 |
while ((req = virtio_blk_get_request(s))) { |
... | ... | |
355 | 347 |
} |
356 | 348 |
|
357 | 349 |
if (mrb.num_writes > 0) { |
358 |
do_multiwrite(mrb.old_bs, mrb.blkreq, mrb.num_writes);
|
|
350 |
do_multiwrite(s->bs, mrb.blkreq, mrb.num_writes);
|
|
359 | 351 |
} |
360 | 352 |
|
361 | 353 |
/* |
... | ... | |
371 | 363 |
VirtIOBlockReq *req = s->rq; |
372 | 364 |
MultiReqBuffer mrb = { |
373 | 365 |
.num_writes = 0, |
374 |
.old_bs = NULL, |
|
375 | 366 |
}; |
376 | 367 |
|
377 | 368 |
qemu_bh_delete(s->bh); |
... | ... | |
385 | 376 |
} |
386 | 377 |
|
387 | 378 |
if (mrb.num_writes > 0) { |
388 |
do_multiwrite(mrb.old_bs, mrb.blkreq, mrb.num_writes);
|
|
379 |
do_multiwrite(s->bs, mrb.blkreq, mrb.num_writes);
|
|
389 | 380 |
} |
390 | 381 |
} |
391 | 382 |
|
Also available in: Unified diff