Revision 5ea929e3 block/qcow2-cluster.c
b/block/qcow2-cluster.c | ||
---|---|---|
888 | 888 |
} |
889 | 889 |
return 0; |
890 | 890 |
} |
891 |
|
|
892 |
/* |
|
893 |
* This discards as many clusters of nb_clusters as possible at once (i.e. |
|
894 |
* all clusters in the same L2 table) and returns the number of discarded |
|
895 |
* clusters. |
|
896 |
*/ |
|
897 |
static int discard_single_l2(BlockDriverState *bs, uint64_t offset, |
|
898 |
unsigned int nb_clusters) |
|
899 |
{ |
|
900 |
BDRVQcowState *s = bs->opaque; |
|
901 |
uint64_t l2_offset, *l2_table; |
|
902 |
int l2_index; |
|
903 |
int ret; |
|
904 |
int i; |
|
905 |
|
|
906 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
|
907 |
if (ret < 0) { |
|
908 |
return ret; |
|
909 |
} |
|
910 |
|
|
911 |
/* Limit nb_clusters to one L2 table */ |
|
912 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
|
913 |
|
|
914 |
for (i = 0; i < nb_clusters; i++) { |
|
915 |
uint64_t old_offset; |
|
916 |
|
|
917 |
old_offset = be64_to_cpu(l2_table[l2_index + i]); |
|
918 |
old_offset &= ~QCOW_OFLAG_COPIED; |
|
919 |
|
|
920 |
if (old_offset == 0) { |
|
921 |
continue; |
|
922 |
} |
|
923 |
|
|
924 |
/* First remove L2 entries */ |
|
925 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
|
926 |
l2_table[l2_index + i] = cpu_to_be64(0); |
|
927 |
|
|
928 |
/* Then decrease the refcount */ |
|
929 |
qcow2_free_any_clusters(bs, old_offset, 1); |
|
930 |
} |
|
931 |
|
|
932 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
|
933 |
if (ret < 0) { |
|
934 |
return ret; |
|
935 |
} |
|
936 |
|
|
937 |
return nb_clusters; |
|
938 |
} |
|
939 |
|
|
940 |
int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, |
|
941 |
int nb_sectors) |
|
942 |
{ |
|
943 |
BDRVQcowState *s = bs->opaque; |
|
944 |
uint64_t end_offset; |
|
945 |
unsigned int nb_clusters; |
|
946 |
int ret; |
|
947 |
|
|
948 |
end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); |
|
949 |
|
|
950 |
/* Round start up and end down */ |
|
951 |
offset = align_offset(offset, s->cluster_size); |
|
952 |
end_offset &= ~(s->cluster_size - 1); |
|
953 |
|
|
954 |
if (offset > end_offset) { |
|
955 |
return 0; |
|
956 |
} |
|
957 |
|
|
958 |
nb_clusters = size_to_clusters(s, end_offset - offset); |
|
959 |
|
|
960 |
/* Each L2 table is handled by its own loop iteration */ |
|
961 |
while (nb_clusters > 0) { |
|
962 |
ret = discard_single_l2(bs, offset, nb_clusters); |
|
963 |
if (ret < 0) { |
|
964 |
return ret; |
|
965 |
} |
|
966 |
|
|
967 |
nb_clusters -= ret; |
|
968 |
offset += (ret * s->cluster_size); |
|
969 |
} |
|
970 |
|
|
971 |
return 0; |
|
972 |
} |
Also available in: Unified diff