Revision 593fb83c block/qcow2-cluster.c
b/block/qcow2-cluster.c | ||
---|---|---|
615 | 615 |
return cluster_offset; |
616 | 616 |
} |
617 | 617 |
|
618 |
static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) |
|
619 |
{ |
|
620 |
BDRVQcowState *s = bs->opaque; |
|
621 |
int ret; |
|
622 |
|
|
623 |
if (r->nb_sectors == 0) { |
|
624 |
return 0; |
|
625 |
} |
|
626 |
|
|
627 |
qemu_co_mutex_unlock(&s->lock); |
|
628 |
ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, |
|
629 |
r->offset / BDRV_SECTOR_SIZE, |
|
630 |
r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); |
|
631 |
qemu_co_mutex_lock(&s->lock); |
|
632 |
|
|
633 |
if (ret < 0) { |
|
634 |
return ret; |
|
635 |
} |
|
636 |
|
|
637 |
/* |
|
638 |
* Before we update the L2 table to actually point to the new cluster, we |
|
639 |
* need to be sure that the refcounts have been increased and COW was |
|
640 |
* handled. |
|
641 |
*/ |
|
642 |
qcow2_cache_depends_on_flush(s->l2_table_cache); |
|
643 |
|
|
644 |
return 0; |
|
645 |
} |
|
646 |
|
|
618 | 647 |
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
619 | 648 |
{ |
620 | 649 |
BDRVQcowState *s = bs->opaque; |
621 | 650 |
int i, j = 0, l2_index, ret; |
622 |
uint64_t *old_cluster, start_sect, *l2_table;
|
|
651 |
uint64_t *old_cluster, *l2_table; |
|
623 | 652 |
uint64_t cluster_offset = m->alloc_offset; |
624 |
bool cow = false; |
|
625 | 653 |
|
626 | 654 |
trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
627 | 655 |
|
... | ... | |
631 | 659 |
old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); |
632 | 660 |
|
633 | 661 |
/* copy content of unmodified sectors */ |
634 |
start_sect = m->offset >> 9; |
|
635 |
if (m->n_start) { |
|
636 |
cow = true; |
|
637 |
qemu_co_mutex_unlock(&s->lock); |
|
638 |
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start); |
|
639 |
qemu_co_mutex_lock(&s->lock); |
|
640 |
if (ret < 0) |
|
641 |
goto err; |
|
662 |
ret = perform_cow(bs, m, &m->cow_start); |
|
663 |
if (ret < 0) { |
|
664 |
goto err; |
|
642 | 665 |
} |
643 | 666 |
|
644 |
if (m->nb_available & (s->cluster_sectors - 1)) { |
|
645 |
cow = true; |
|
646 |
qemu_co_mutex_unlock(&s->lock); |
|
647 |
ret = copy_sectors(bs, start_sect, cluster_offset, m->nb_available, |
|
648 |
align_offset(m->nb_available, s->cluster_sectors)); |
|
649 |
qemu_co_mutex_lock(&s->lock); |
|
650 |
if (ret < 0) |
|
651 |
goto err; |
|
667 |
ret = perform_cow(bs, m, &m->cow_end); |
|
668 |
if (ret < 0) { |
|
669 |
goto err; |
|
652 | 670 |
} |
653 | 671 |
|
654 |
/* |
|
655 |
* Update L2 table. |
|
656 |
* |
|
657 |
* Before we update the L2 table to actually point to the new cluster, we |
|
658 |
* need to be sure that the refcounts have been increased and COW was |
|
659 |
* handled. |
|
660 |
*/ |
|
661 |
if (cow) { |
|
662 |
qcow2_cache_depends_on_flush(s->l2_table_cache); |
|
663 |
} |
|
672 |
/* Update L2 table. */ |
|
664 | 673 |
|
665 | 674 |
if (qcow2_need_accurate_refcounts(s)) { |
666 | 675 |
qcow2_cache_set_dependency(bs, s->l2_table_cache, |
... | ... | |
957 | 966 |
* |
958 | 967 |
* avail_sectors: Number of sectors from the start of the first |
959 | 968 |
* newly allocated to the end of the last newly allocated cluster. |
969 |
* |
|
970 |
* nb_sectors: The number of sectors from the start of the first |
|
971 |
* newly allocated cluster to the end of the aread that the write |
|
972 |
* request actually writes to (excluding COW at the end) |
|
960 | 973 |
*/ |
961 | 974 |
int requested_sectors = n_end - keep_clusters * s->cluster_sectors; |
962 | 975 |
int avail_sectors = nb_clusters |
963 | 976 |
<< (s->cluster_bits - BDRV_SECTOR_BITS); |
977 |
int alloc_n_start = keep_clusters == 0 ? n_start : 0; |
|
978 |
int nb_sectors = MIN(requested_sectors, avail_sectors); |
|
964 | 979 |
|
965 | 980 |
*m = (QCowL2Meta) { |
966 | 981 |
.cluster_offset = keep_clusters == 0 ? |
967 | 982 |
alloc_cluster_offset : cluster_offset, |
968 | 983 |
.alloc_offset = alloc_cluster_offset, |
969 | 984 |
.offset = alloc_offset & ~(s->cluster_size - 1), |
970 |
.n_start = keep_clusters == 0 ? n_start : 0, |
|
971 | 985 |
.nb_clusters = nb_clusters, |
972 |
.nb_available = MIN(requested_sectors, avail_sectors), |
|
986 |
.nb_available = nb_sectors, |
|
987 |
|
|
988 |
.cow_start = { |
|
989 |
.offset = 0, |
|
990 |
.nb_sectors = alloc_n_start, |
|
991 |
}, |
|
992 |
.cow_end = { |
|
993 |
.offset = nb_sectors * BDRV_SECTOR_SIZE, |
|
994 |
.nb_sectors = avail_sectors - nb_sectors, |
|
995 |
}, |
|
973 | 996 |
}; |
974 | 997 |
qemu_co_queue_init(&m->dependent_requests); |
975 | 998 |
QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); |
Also available in: Unified diff