Revision 6f5f060b
b/block/qcow2.c | ||
---|---|---|
331 | 331 |
BlockDriverAIOCB common; |
332 | 332 |
int64_t sector_num; |
333 | 333 |
QEMUIOVector *qiov; |
334 |
uint8_t *buf; |
|
335 |
void *orig_buf; |
|
336 | 334 |
int remaining_sectors; |
337 | 335 |
int cur_nr_sectors; /* number of sectors in current iteration */ |
338 | 336 |
uint64_t bytes_done; |
339 | 337 |
uint64_t cluster_offset; |
340 | 338 |
uint8_t *cluster_data; |
341 | 339 |
BlockDriverAIOCB *hd_aiocb; |
342 |
struct iovec hd_iov; |
|
343 | 340 |
QEMUIOVector hd_qiov; |
344 | 341 |
QEMUBH *bh; |
345 | 342 |
QCowL2Meta l2meta; |
... | ... | |
530 | 527 |
acb->sector_num = sector_num; |
531 | 528 |
acb->qiov = qiov; |
532 | 529 |
|
533 |
if (!is_write) { |
|
534 |
qemu_iovec_init(&acb->hd_qiov, qiov->niov); |
|
535 |
} else if (qiov->niov == 1) { |
|
536 |
acb->buf = (uint8_t *)qiov->iov->iov_base; |
|
537 |
} else { |
|
538 |
acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); |
|
539 |
qemu_iovec_to_buffer(qiov, acb->buf); |
|
540 |
} |
|
530 |
qemu_iovec_init(&acb->hd_qiov, qiov->niov); |
|
541 | 531 |
|
542 | 532 |
acb->bytes_done = 0; |
543 | 533 |
acb->remaining_sectors = nb_sectors; |
... | ... | |
589 | 579 |
BlockDriverState *bs = acb->common.bs; |
590 | 580 |
BDRVQcowState *s = bs->opaque; |
591 | 581 |
int index_in_cluster; |
592 |
const uint8_t *src_buf; |
|
593 | 582 |
int n_end; |
594 | 583 |
|
595 | 584 |
acb->hd_aiocb = NULL; |
... | ... | |
605 | 594 |
|
606 | 595 |
acb->remaining_sectors -= acb->cur_nr_sectors; |
607 | 596 |
acb->sector_num += acb->cur_nr_sectors; |
608 |
acb->buf += acb->cur_nr_sectors * 512;
|
|
597 |
acb->bytes_done += acb->cur_nr_sectors * 512;
|
|
609 | 598 |
|
610 | 599 |
if (acb->remaining_sectors == 0) { |
611 | 600 |
/* request completed */ |
... | ... | |
636 | 625 |
|
637 | 626 |
assert((acb->cluster_offset & 511) == 0); |
638 | 627 |
|
628 |
qemu_iovec_reset(&acb->hd_qiov); |
|
629 |
qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done, |
|
630 |
acb->cur_nr_sectors * 512); |
|
631 |
|
|
639 | 632 |
if (s->crypt_method) { |
640 | 633 |
if (!acb->cluster_data) { |
641 | 634 |
acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS * |
642 | 635 |
s->cluster_size); |
643 | 636 |
} |
644 |
qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, |
|
645 |
acb->cur_nr_sectors, 1, &s->aes_encrypt_key); |
|
646 |
src_buf = acb->cluster_data; |
|
647 |
} else { |
|
648 |
src_buf = acb->buf; |
|
637 |
|
|
638 |
assert(acb->hd_qiov.size <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); |
|
639 |
qemu_iovec_to_buffer(&acb->hd_qiov, acb->cluster_data); |
|
640 |
|
|
641 |
qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, |
|
642 |
acb->cluster_data, acb->cur_nr_sectors, 1, &s->aes_encrypt_key); |
|
643 |
|
|
644 |
qemu_iovec_reset(&acb->hd_qiov); |
|
645 |
qemu_iovec_add(&acb->hd_qiov, acb->cluster_data, |
|
646 |
acb->cur_nr_sectors * 512); |
|
649 | 647 |
} |
650 |
acb->hd_iov.iov_base = (void *)src_buf; |
|
651 |
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; |
|
652 |
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); |
|
648 |
|
|
653 | 649 |
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); |
654 | 650 |
acb->hd_aiocb = bdrv_aio_writev(bs->file, |
655 | 651 |
(acb->cluster_offset >> 9) + index_in_cluster, |
... | ... | |
667 | 663 |
QLIST_REMOVE(&acb->l2meta, next_in_flight); |
668 | 664 |
} |
669 | 665 |
done: |
670 |
if (acb->qiov->niov > 1) |
|
671 |
qemu_vfree(acb->orig_buf); |
|
672 | 666 |
acb->common.cb(acb->common.opaque, ret); |
667 |
qemu_iovec_destroy(&acb->hd_qiov); |
|
673 | 668 |
qemu_aio_release(acb); |
674 | 669 |
} |
675 | 670 |
|
Also available in: Unified diff