Revision 6db6c638
b/block-qcow2.c | ||
---|---|---|
601 | 601 |
return l2_table; |
602 | 602 |
} |
603 | 603 |
|
604 |
static int size_to_clusters(BDRVQcowState *s, int64_t size) |
|
605 |
{ |
|
606 |
return (size + (s->cluster_size - 1)) >> s->cluster_bits; |
|
607 |
} |
|
608 |
|
|
609 |
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
|
610 |
uint64_t *l2_table, uint64_t mask) |
|
611 |
{ |
|
612 |
int i; |
|
613 |
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask; |
|
614 |
|
|
615 |
for (i = 0; i < nb_clusters; i++) |
|
616 |
if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask)) |
|
617 |
break; |
|
618 |
|
|
619 |
return i; |
|
620 |
} |
|
621 |
|
|
622 |
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) |
|
623 |
{ |
|
624 |
int i = 0; |
|
625 |
|
|
626 |
while(nb_clusters-- && l2_table[i] == 0) |
|
627 |
i++; |
|
628 |
|
|
629 |
return i; |
|
630 |
} |
|
631 |
|
|
604 | 632 |
/* |
605 | 633 |
* get_cluster_offset |
606 | 634 |
* |
... | ... | |
622 | 650 |
{ |
623 | 651 |
BDRVQcowState *s = bs->opaque; |
624 | 652 |
int l1_index, l2_index; |
625 |
uint64_t l2_offset, *l2_table, cluster_offset, next;
|
|
626 |
int l1_bits; |
|
627 |
int index_in_cluster, nb_available, nb_needed; |
|
653 |
uint64_t l2_offset, *l2_table, cluster_offset; |
|
654 |
int l1_bits, c;
|
|
655 |
int index_in_cluster, nb_available, nb_needed, nb_clusters;
|
|
628 | 656 |
|
629 | 657 |
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
630 | 658 |
nb_needed = *num + index_in_cluster; |
... | ... | |
632 | 660 |
l1_bits = s->l2_bits + s->cluster_bits; |
633 | 661 |
|
634 | 662 |
/* compute how many bytes there are between the offset and |
635 |
* and the end of the l1 entry
|
|
663 |
* the end of the l1 entry |
|
636 | 664 |
*/ |
637 | 665 |
|
638 | 666 |
nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1)); |
... | ... | |
667 | 695 |
|
668 | 696 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); |
669 | 697 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
670 |
nb_available = s->cluster_sectors; |
|
671 |
l2_index++; |
|
698 |
nb_clusters = size_to_clusters(s, nb_needed << 9); |
|
672 | 699 |
|
673 | 700 |
if (!cluster_offset) { |
674 |
|
|
675 |
/* how many empty clusters ? */ |
|
676 |
|
|
677 |
while (nb_available < nb_needed && !l2_table[l2_index]) { |
|
678 |
l2_index++; |
|
679 |
nb_available += s->cluster_sectors; |
|
680 |
} |
|
701 |
/* how many empty clusters ? */ |
|
702 |
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); |
|
681 | 703 |
} else { |
704 |
/* how many allocated clusters ? */ |
|
705 |
c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
|
706 |
&l2_table[l2_index], QCOW_OFLAG_COPIED); |
|
707 |
} |
|
682 | 708 |
|
683 |
/* how many allocated clusters ? */ |
|
684 |
|
|
685 |
cluster_offset &= ~QCOW_OFLAG_COPIED; |
|
686 |
while (nb_available < nb_needed) { |
|
687 |
next = be64_to_cpu(l2_table[l2_index]) & ~QCOW_OFLAG_COPIED; |
|
688 |
if (next != cluster_offset + (nb_available << 9)) |
|
689 |
break; |
|
690 |
l2_index++; |
|
691 |
nb_available += s->cluster_sectors; |
|
692 |
} |
|
693 |
} |
|
694 |
|
|
709 |
nb_available = (c * s->cluster_sectors); |
|
695 | 710 |
out: |
696 | 711 |
if (nb_available > nb_needed) |
697 | 712 |
nb_available = nb_needed; |
698 | 713 |
|
699 | 714 |
*num = nb_available - index_in_cluster; |
700 | 715 |
|
701 |
return cluster_offset; |
|
716 |
return cluster_offset & ~QCOW_OFLAG_COPIED;
|
|
702 | 717 |
} |
703 | 718 |
|
704 | 719 |
/* |
... | ... | |
862 | 877 |
BDRVQcowState *s = bs->opaque; |
863 | 878 |
int l2_index, ret; |
864 | 879 |
uint64_t l2_offset, *l2_table, cluster_offset; |
865 |
int nb_available, nb_clusters, i, j;
|
|
866 |
uint64_t start_sect, current;
|
|
880 |
int nb_available, nb_clusters, i = 0;
|
|
881 |
uint64_t start_sect; |
|
867 | 882 |
|
868 | 883 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
869 | 884 |
if (ret == 0) |
870 | 885 |
return 0; |
871 | 886 |
|
872 |
nb_clusters = ((n_end << 9) + s->cluster_size - 1) >>
|
|
873 |
s->cluster_bits; |
|
887 |
nb_clusters = size_to_clusters(s, n_end << 9);
|
|
888 |
|
|
874 | 889 |
if (nb_clusters > s->l2_size - l2_index) |
875 | 890 |
nb_clusters = s->l2_size - l2_index; |
876 | 891 |
|
... | ... | |
879 | 894 |
/* We keep all QCOW_OFLAG_COPIED clusters */ |
880 | 895 |
|
881 | 896 |
if (cluster_offset & QCOW_OFLAG_COPIED) { |
882 |
|
|
883 |
for (i = 1; i < nb_clusters; i++) { |
|
884 |
current = be64_to_cpu(l2_table[l2_index + i]); |
|
885 |
if (cluster_offset + (i << s->cluster_bits) != current) |
|
886 |
break; |
|
887 |
} |
|
888 |
nb_clusters = i; |
|
897 |
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, |
|
898 |
&l2_table[l2_index], 0); |
|
889 | 899 |
|
890 | 900 |
nb_available = nb_clusters << (s->cluster_bits - 9); |
891 | 901 |
if (nb_available > n_end) |
... | ... | |
903 | 913 |
|
904 | 914 |
/* how many available clusters ? */ |
905 | 915 |
|
906 |
i = 0; |
|
907 | 916 |
while (i < nb_clusters) { |
917 |
int j; |
|
918 |
i += count_contiguous_free_clusters(nb_clusters - i, |
|
919 |
&l2_table[l2_index + i]); |
|
908 | 920 |
|
909 |
i++; |
|
910 |
|
|
911 |
if (!cluster_offset) { |
|
912 |
|
|
913 |
/* how many free clusters ? */ |
|
914 |
|
|
915 |
while (i < nb_clusters) { |
|
916 |
cluster_offset = be64_to_cpu(l2_table[l2_index + i]); |
|
917 |
if (cluster_offset != 0) |
|
918 |
break; |
|
919 |
i++; |
|
920 |
} |
|
921 |
cluster_offset = be64_to_cpu(l2_table[l2_index + i]); |
|
921 | 922 |
|
922 |
if ((cluster_offset & QCOW_OFLAG_COPIED) ||
|
|
923 |
if ((cluster_offset & QCOW_OFLAG_COPIED) || |
|
923 | 924 |
(cluster_offset & QCOW_OFLAG_COMPRESSED)) |
924 |
break; |
|
925 |
|
|
926 |
} else { |
|
925 |
break; |
|
927 | 926 |
|
928 |
/* how many contiguous clusters ? */ |
|
927 |
j = count_contiguous_clusters(nb_clusters - i, s->cluster_size, |
|
928 |
&l2_table[l2_index + i], 0); |
|
929 | 929 |
|
930 |
j = 1; |
|
931 |
current = 0; |
|
932 |
while (i < nb_clusters) { |
|
933 |
current = be64_to_cpu(l2_table[l2_index + i]); |
|
934 |
if (cluster_offset + (j << s->cluster_bits) != current) |
|
935 |
break; |
|
930 |
if (j) |
|
931 |
free_any_clusters(bs, cluster_offset, j); |
|
936 | 932 |
|
937 |
i++; |
|
938 |
j++; |
|
939 |
} |
|
933 |
i += j; |
|
940 | 934 |
|
941 |
free_any_clusters(bs, cluster_offset, j); |
|
942 |
if (current) |
|
943 |
break; |
|
944 |
cluster_offset = current; |
|
945 |
} |
|
935 |
if(be64_to_cpu(l2_table[l2_index + i])) |
|
936 |
break; |
|
946 | 937 |
} |
947 | 938 |
nb_clusters = i; |
948 | 939 |
|
... | ... | |
2194 | 2185 |
BDRVQcowState *s = bs->opaque; |
2195 | 2186 |
int i, nb_clusters; |
2196 | 2187 |
|
2197 |
nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits; |
|
2198 |
for(;;) { |
|
2199 |
if (get_refcount(bs, s->free_cluster_index) == 0) { |
|
2200 |
s->free_cluster_index++; |
|
2201 |
for(i = 1; i < nb_clusters; i++) { |
|
2202 |
if (get_refcount(bs, s->free_cluster_index) != 0) |
|
2203 |
goto not_found; |
|
2204 |
s->free_cluster_index++; |
|
2205 |
} |
|
2188 |
nb_clusters = size_to_clusters(s, size); |
|
2189 |
retry: |
|
2190 |
for(i = 0; i < nb_clusters; i++) { |
|
2191 |
int64_t i = s->free_cluster_index++; |
|
2192 |
if (get_refcount(bs, i) != 0) |
|
2193 |
goto retry; |
|
2194 |
} |
|
2206 | 2195 |
#ifdef DEBUG_ALLOC2 |
2207 |
printf("alloc_clusters: size=%lld -> %lld\n",
|
|
2208 |
size,
|
|
2209 |
(s->free_cluster_index - nb_clusters) << s->cluster_bits);
|
|
2196 |
printf("alloc_clusters: size=%lld -> %lld\n", |
|
2197 |
size, |
|
2198 |
(s->free_cluster_index - nb_clusters) << s->cluster_bits); |
|
2210 | 2199 |
#endif |
2211 |
return (s->free_cluster_index - nb_clusters) << s->cluster_bits; |
|
2212 |
} else { |
|
2213 |
not_found: |
|
2214 |
s->free_cluster_index++; |
|
2215 |
} |
|
2216 |
} |
|
2200 |
return (s->free_cluster_index - nb_clusters) << s->cluster_bits; |
|
2217 | 2201 |
} |
2218 | 2202 |
|
2219 | 2203 |
static int64_t alloc_clusters(BlockDriverState *bs, int64_t size) |
... | ... | |
2548 | 2532 |
uint16_t *refcount_table; |
2549 | 2533 |
|
2550 | 2534 |
size = bdrv_getlength(s->hd); |
2551 |
nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits;
|
|
2535 |
nb_clusters = size_to_clusters(s, size);
|
|
2552 | 2536 |
refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t)); |
2553 | 2537 |
|
2554 | 2538 |
/* header */ |
... | ... | |
2600 | 2584 |
int refcount; |
2601 | 2585 |
|
2602 | 2586 |
size = bdrv_getlength(s->hd); |
2603 |
nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits;
|
|
2587 |
nb_clusters = size_to_clusters(s, size);
|
|
2604 | 2588 |
for(k = 0; k < nb_clusters;) { |
2605 | 2589 |
k1 = k; |
2606 | 2590 |
refcount = get_refcount(bs, k); |
Also available in: Unified diff