Revision 095a9c58
b/block-qcow2.c | ||
---|---|---|
52 | 52 |
#define QCOW_CRYPT_NONE 0 |
53 | 53 |
#define QCOW_CRYPT_AES 1 |
54 | 54 |
|
55 |
#define QCOW_MAX_CRYPT_CLUSTERS 32 |
|
56 |
|
|
55 | 57 |
/* indicate that the refcount of the referenced cluster is exactly one. */ |
56 | 58 |
#define QCOW_OFLAG_COPIED (1LL << 63) |
57 | 59 |
/* indicate that the cluster is compressed (they never have the copied flag) */ |
... | ... | |
263 | 265 |
if (!s->cluster_cache) |
264 | 266 |
goto fail; |
265 | 267 |
/* one more sector for decompressed data alignment */ |
266 |
s->cluster_data = qemu_malloc(s->cluster_size + 512); |
|
268 |
s->cluster_data = qemu_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size |
|
269 |
+ 512); |
|
267 | 270 |
if (!s->cluster_data) |
268 | 271 |
goto fail; |
269 | 272 |
s->cluster_cache_offset = -1; |
... | ... | |
612 | 615 |
* For a given offset of the disk image, return cluster offset in |
613 | 616 |
* qcow2 file. |
614 | 617 |
* |
618 |
* on entry, *num is the number of contiguous clusters we'd like to |
|
619 |
* access following offset. |
|
620 |
* |
|
621 |
* on exit, *num is the number of contiguous clusters we can read. |
|
622 |
* |
|
615 | 623 |
* Return 1, if the offset is found |
616 | 624 |
* Return 0, otherwise. |
617 | 625 |
* |
618 | 626 |
*/ |
619 | 627 |
|
620 |
static uint64_t get_cluster_offset(BlockDriverState *bs, uint64_t offset) |
|
628 |
static uint64_t get_cluster_offset(BlockDriverState *bs, |
|
629 |
uint64_t offset, int *num) |
|
621 | 630 |
{ |
622 | 631 |
BDRVQcowState *s = bs->opaque; |
623 | 632 |
int l1_index, l2_index; |
624 |
uint64_t l2_offset, *l2_table, cluster_offset; |
|
633 |
uint64_t l2_offset, *l2_table, cluster_offset, next; |
|
634 |
int l1_bits; |
|
635 |
int index_in_cluster, nb_available, nb_needed; |
|
636 |
|
|
637 |
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
|
638 |
nb_needed = *num + index_in_cluster; |
|
639 |
|
|
640 |
l1_bits = s->l2_bits + s->cluster_bits; |
|
641 |
|
|
642 |
/* compute how many bytes there are between the offset and |
|
643 |
* and the end of the l1 entry |
|
644 |
*/ |
|
645 |
|
|
646 |
nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1)); |
|
647 |
|
|
648 |
/* compute the number of available sectors */ |
|
649 |
|
|
650 |
nb_available = (nb_available >> 9) + index_in_cluster; |
|
651 |
|
|
652 |
cluster_offset = 0; |
|
625 | 653 |
|
626 | 654 |
/* seek the the l2 offset in the l1 table */ |
627 | 655 |
|
628 |
l1_index = offset >> (s->l2_bits + s->cluster_bits);
|
|
656 |
l1_index = offset >> l1_bits;
|
|
629 | 657 |
if (l1_index >= s->l1_size) |
630 |
return 0;
|
|
658 |
goto out;
|
|
631 | 659 |
|
632 | 660 |
l2_offset = s->l1_table[l1_index]; |
633 | 661 |
|
634 | 662 |
/* seek the l2 table of the given l2 offset */ |
635 | 663 |
|
636 | 664 |
if (!l2_offset) |
637 |
return 0;
|
|
665 |
goto out;
|
|
638 | 666 |
|
639 | 667 |
/* load the l2 table in memory */ |
640 | 668 |
|
641 | 669 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
642 | 670 |
l2_table = l2_load(bs, l2_offset); |
643 | 671 |
if (l2_table == NULL) |
644 |
return 0;
|
|
672 |
goto out;
|
|
645 | 673 |
|
646 | 674 |
/* find the cluster offset for the given disk offset */ |
647 | 675 |
|
648 | 676 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); |
649 | 677 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
678 |
nb_available = s->cluster_sectors; |
|
679 |
l2_index++; |
|
680 |
|
|
681 |
if (!cluster_offset) { |
|
650 | 682 |
|
651 |
return cluster_offset & ~QCOW_OFLAG_COPIED; |
|
683 |
/* how many empty clusters ? */ |
|
684 |
|
|
685 |
while (nb_available < nb_needed && !l2_table[l2_index]) { |
|
686 |
l2_index++; |
|
687 |
nb_available += s->cluster_sectors; |
|
688 |
} |
|
689 |
} else { |
|
690 |
|
|
691 |
/* how many allocated clusters ? */ |
|
692 |
|
|
693 |
cluster_offset &= ~QCOW_OFLAG_COPIED; |
|
694 |
while (nb_available < nb_needed) { |
|
695 |
next = be64_to_cpu(l2_table[l2_index]) & ~QCOW_OFLAG_COPIED; |
|
696 |
if (next != cluster_offset + (nb_available << 9)) |
|
697 |
break; |
|
698 |
l2_index++; |
|
699 |
nb_available += s->cluster_sectors; |
|
700 |
} |
|
701 |
} |
|
702 |
|
|
703 |
out: |
|
704 |
if (nb_available > nb_needed) |
|
705 |
nb_available = nb_needed; |
|
706 |
|
|
707 |
*num = nb_available - index_in_cluster; |
|
708 |
|
|
709 |
return cluster_offset; |
|
652 | 710 |
} |
653 | 711 |
|
654 | 712 |
/* |
... | ... | |
659 | 717 |
*/ |
660 | 718 |
|
661 | 719 |
static void free_any_clusters(BlockDriverState *bs, |
662 |
uint64_t cluster_offset) |
|
720 |
uint64_t cluster_offset, int nb_clusters)
|
|
663 | 721 |
{ |
664 | 722 |
BDRVQcowState *s = bs->opaque; |
665 | 723 |
|
666 |
if (cluster_offset == 0) |
|
667 |
return; |
|
668 |
|
|
669 | 724 |
/* free the cluster */ |
670 | 725 |
|
671 | 726 |
if (cluster_offset & QCOW_OFLAG_COMPRESSED) { |
... | ... | |
677 | 732 |
return; |
678 | 733 |
} |
679 | 734 |
|
680 |
free_clusters(bs, cluster_offset, s->cluster_size); |
|
735 |
free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits); |
|
736 |
|
|
737 |
return; |
|
681 | 738 |
} |
682 | 739 |
|
683 | 740 |
/* |
... | ... | |
768 | 825 |
if (cluster_offset & QCOW_OFLAG_COPIED) |
769 | 826 |
return cluster_offset & ~QCOW_OFLAG_COPIED; |
770 | 827 |
|
771 |
free_any_clusters(bs, cluster_offset); |
|
828 |
if (cluster_offset) |
|
829 |
free_any_clusters(bs, cluster_offset, 1); |
|
772 | 830 |
|
773 | 831 |
cluster_offset = alloc_bytes(bs, compressed_size); |
774 | 832 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
... | ... | |
806 | 864 |
|
807 | 865 |
static uint64_t alloc_cluster_offset(BlockDriverState *bs, |
808 | 866 |
uint64_t offset, |
809 |
int n_start, int n_end) |
|
867 |
int n_start, int n_end, |
|
868 |
int *num) |
|
810 | 869 |
{ |
811 | 870 |
BDRVQcowState *s = bs->opaque; |
812 | 871 |
int l2_index, ret; |
813 | 872 |
uint64_t l2_offset, *l2_table, cluster_offset; |
873 |
int nb_available, nb_clusters, i; |
|
874 |
uint64_t start_sect, current; |
|
814 | 875 |
|
815 | 876 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
816 | 877 |
if (ret == 0) |
817 | 878 |
return 0; |
818 | 879 |
|
880 |
nb_clusters = ((n_end << 9) + s->cluster_size - 1) >> |
|
881 |
s->cluster_bits; |
|
882 |
if (nb_clusters > s->l2_size - l2_index) |
|
883 |
nb_clusters = s->l2_size - l2_index; |
|
884 |
|
|
819 | 885 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
820 |
if (cluster_offset & QCOW_OFLAG_COPIED) |
|
821 |
return cluster_offset & ~QCOW_OFLAG_COPIED; |
|
822 | 886 |
|
823 |
free_any_clusters(bs, cluster_offset); |
|
887 |
/* We keep all QCOW_OFLAG_COPIED clusters */ |
|
888 |
|
|
889 |
if (cluster_offset & QCOW_OFLAG_COPIED) { |
|
890 |
|
|
891 |
for (i = 1; i < nb_clusters; i++) { |
|
892 |
current = be64_to_cpu(l2_table[l2_index + i]); |
|
893 |
if (cluster_offset + (i << s->cluster_bits) != current) |
|
894 |
break; |
|
895 |
} |
|
896 |
nb_clusters = i; |
|
897 |
|
|
898 |
nb_available = nb_clusters << (s->cluster_bits - 9); |
|
899 |
if (nb_available > n_end) |
|
900 |
nb_available = n_end; |
|
901 |
|
|
902 |
cluster_offset &= ~QCOW_OFLAG_COPIED; |
|
903 |
|
|
904 |
goto out; |
|
905 |
} |
|
906 |
|
|
907 |
/* for the moment, multiple compressed clusters are not managed */ |
|
908 |
|
|
909 |
if (cluster_offset & QCOW_OFLAG_COMPRESSED) |
|
910 |
nb_clusters = 1; |
|
911 |
|
|
912 |
/* how many empty or how many to free ? */ |
|
913 |
|
|
914 |
if (!cluster_offset) { |
|
915 |
|
|
916 |
/* how many free clusters ? */ |
|
917 |
|
|
918 |
i = 1; |
|
919 |
while (i < nb_clusters && |
|
920 |
l2_table[l2_index + i] == 0) { |
|
921 |
i++; |
|
922 |
} |
|
923 |
nb_clusters = i; |
|
924 |
|
|
925 |
} else { |
|
926 |
|
|
927 |
/* how many contiguous clusters ? */ |
|
928 |
|
|
929 |
for (i = 1; i < nb_clusters; i++) { |
|
930 |
current = be64_to_cpu(l2_table[l2_index + i]); |
|
931 |
if (cluster_offset + (i << s->cluster_bits) != current) |
|
932 |
break; |
|
933 |
} |
|
934 |
nb_clusters = i; |
|
935 |
|
|
936 |
free_any_clusters(bs, cluster_offset, i); |
|
937 |
} |
|
824 | 938 |
|
825 | 939 |
/* allocate a new cluster */ |
826 | 940 |
|
827 |
cluster_offset = alloc_clusters(bs, s->cluster_size); |
|
941 |
cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);
|
|
828 | 942 |
|
829 | 943 |
/* we must initialize the cluster content which won't be |
830 | 944 |
written */ |
831 | 945 |
|
832 |
if ((n_end - n_start) < s->cluster_sectors) { |
|
833 |
uint64_t start_sect; |
|
946 |
nb_available = nb_clusters << (s->cluster_bits - 9); |
|
947 |
if (nb_available > n_end) |
|
948 |
nb_available = n_end; |
|
834 | 949 |
|
835 |
start_sect = (offset & ~(s->cluster_size - 1)) >> 9; |
|
836 |
ret = copy_sectors(bs, start_sect, |
|
837 |
cluster_offset, 0, n_start); |
|
950 |
/* copy content of unmodified sectors */ |
|
951 |
|
|
952 |
start_sect = (offset & ~(s->cluster_size - 1)) >> 9; |
|
953 |
if (n_start) { |
|
954 |
ret = copy_sectors(bs, start_sect, cluster_offset, 0, n_start); |
|
838 | 955 |
if (ret < 0) |
839 | 956 |
return 0; |
840 |
ret = copy_sectors(bs, start_sect, |
|
841 |
cluster_offset, n_end, s->cluster_sectors); |
|
957 |
} |
|
958 |
|
|
959 |
if (nb_available & (s->cluster_sectors - 1)) { |
|
960 |
uint64_t end = nb_available & ~(uint64_t)(s->cluster_sectors - 1); |
|
961 |
ret = copy_sectors(bs, start_sect + end, |
|
962 |
cluster_offset + (end << 9), |
|
963 |
nb_available - end, |
|
964 |
s->cluster_sectors); |
|
842 | 965 |
if (ret < 0) |
843 | 966 |
return 0; |
844 | 967 |
} |
845 | 968 |
|
846 | 969 |
/* update L2 table */ |
847 | 970 |
|
848 |
l2_table[l2_index] = cpu_to_be64(cluster_offset | QCOW_OFLAG_COPIED); |
|
971 |
for (i = 0; i < nb_clusters; i++) |
|
972 |
l2_table[l2_index + i] = cpu_to_be64((cluster_offset + |
|
973 |
(i << s->cluster_bits)) | |
|
974 |
QCOW_OFLAG_COPIED); |
|
975 |
|
|
849 | 976 |
if (bdrv_pwrite(s->hd, |
850 | 977 |
l2_offset + l2_index * sizeof(uint64_t), |
851 | 978 |
l2_table + l2_index, |
852 |
sizeof(uint64_t)) != sizeof(uint64_t)) |
|
979 |
nb_clusters * sizeof(uint64_t)) != |
|
980 |
nb_clusters * sizeof(uint64_t)) |
|
853 | 981 |
return 0; |
854 | 982 |
|
983 |
out: |
|
984 |
*num = nb_available - n_start; |
|
985 |
|
|
855 | 986 |
return cluster_offset; |
856 | 987 |
} |
857 | 988 |
|
858 | 989 |
static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, |
859 | 990 |
int nb_sectors, int *pnum) |
860 | 991 |
{ |
861 |
BDRVQcowState *s = bs->opaque; |
|
862 |
int index_in_cluster, n; |
|
863 | 992 |
uint64_t cluster_offset; |
864 | 993 |
|
865 |
cluster_offset = get_cluster_offset(bs, sector_num << 9); |
|
866 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
|
867 |
n = s->cluster_sectors - index_in_cluster; |
|
868 |
if (n > nb_sectors) |
|
869 |
n = nb_sectors; |
|
870 |
*pnum = n; |
|
994 |
*pnum = nb_sectors; |
|
995 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, pnum); |
|
996 |
|
|
871 | 997 |
return (cluster_offset != 0); |
872 | 998 |
} |
873 | 999 |
|
... | ... | |
944 | 1070 |
uint64_t cluster_offset; |
945 | 1071 |
|
946 | 1072 |
while (nb_sectors > 0) { |
947 |
cluster_offset = get_cluster_offset(bs, sector_num << 9); |
|
1073 |
n = nb_sectors; |
|
1074 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, &n); |
|
948 | 1075 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
949 |
n = s->cluster_sectors - index_in_cluster; |
|
950 |
if (n > nb_sectors) |
|
951 |
n = nb_sectors; |
|
952 | 1076 |
if (!cluster_offset) { |
953 | 1077 |
if (bs->backing_hd) { |
954 | 1078 |
/* read from the base image */ |
... | ... | |
987 | 1111 |
BDRVQcowState *s = bs->opaque; |
988 | 1112 |
int ret, index_in_cluster, n; |
989 | 1113 |
uint64_t cluster_offset; |
1114 |
int n_end; |
|
990 | 1115 |
|
991 | 1116 |
while (nb_sectors > 0) { |
992 | 1117 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
993 |
n = s->cluster_sectors - index_in_cluster; |
|
994 |
if (n > nb_sectors) |
|
995 |
n = nb_sectors; |
|
1118 |
n_end = index_in_cluster + nb_sectors; |
|
1119 |
if (s->crypt_method && |
|
1120 |
n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) |
|
1121 |
n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; |
|
996 | 1122 |
cluster_offset = alloc_cluster_offset(bs, sector_num << 9, |
997 | 1123 |
index_in_cluster, |
998 |
index_in_cluster + n);
|
|
1124 |
n_end, &n);
|
|
999 | 1125 |
if (!cluster_offset) |
1000 | 1126 |
return -1; |
1001 | 1127 |
if (s->crypt_method) { |
... | ... | |
1068 | 1194 |
} |
1069 | 1195 |
|
1070 | 1196 |
/* prepare next AIO request */ |
1071 |
acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9); |
|
1197 |
acb->n = acb->nb_sectors; |
|
1198 |
acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, &acb->n); |
|
1072 | 1199 |
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); |
1073 |
acb->n = s->cluster_sectors - index_in_cluster; |
|
1074 |
if (acb->n > acb->nb_sectors) |
|
1075 |
acb->n = acb->nb_sectors; |
|
1076 | 1200 |
|
1077 | 1201 |
if (!acb->cluster_offset) { |
1078 | 1202 |
if (bs->backing_hd) { |
... | ... | |
1152 | 1276 |
int index_in_cluster; |
1153 | 1277 |
uint64_t cluster_offset; |
1154 | 1278 |
const uint8_t *src_buf; |
1279 |
int n_end; |
|
1155 | 1280 |
|
1156 | 1281 |
acb->hd_aiocb = NULL; |
1157 | 1282 |
|
... | ... | |
1174 | 1299 |
} |
1175 | 1300 |
|
1176 | 1301 |
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); |
1177 |
acb->n = s->cluster_sectors - index_in_cluster; |
|
1178 |
if (acb->n > acb->nb_sectors) |
|
1179 |
acb->n = acb->nb_sectors; |
|
1302 |
n_end = index_in_cluster + acb->nb_sectors; |
|
1303 |
if (s->crypt_method && |
|
1304 |
n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) |
|
1305 |
n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; |
|
1306 |
|
|
1180 | 1307 |
cluster_offset = alloc_cluster_offset(bs, acb->sector_num << 9, |
1181 | 1308 |
index_in_cluster, |
1182 |
index_in_cluster + acb->n);
|
|
1309 |
n_end, &acb->n);
|
|
1183 | 1310 |
if (!cluster_offset || (cluster_offset & 511) != 0) { |
1184 | 1311 |
ret = -EIO; |
1185 | 1312 |
goto fail; |
1186 | 1313 |
} |
1187 | 1314 |
if (s->crypt_method) { |
1188 | 1315 |
if (!acb->cluster_data) { |
1189 |
acb->cluster_data = qemu_mallocz(s->cluster_size); |
|
1316 |
acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS * |
|
1317 |
s->cluster_size); |
|
1190 | 1318 |
if (!acb->cluster_data) { |
1191 | 1319 |
ret = -ENOMEM; |
1192 | 1320 |
goto fail; |
Also available in: Unified diff