Revision 5fafdf24 block-qcow2.c
b/block-qcow2.c | ||
---|---|---|
1 | 1 |
/* |
2 | 2 |
* Block driver for the QCOW version 2 format |
3 |
*
|
|
3 |
* |
|
4 | 4 |
* Copyright (c) 2004-2006 Fabrice Bellard |
5 |
*
|
|
5 |
* |
|
6 | 6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | 7 |
* of this software and associated documentation files (the "Software"), to deal |
8 | 8 |
* in the Software without restriction, including without limitation the rights |
... | ... | |
34 | 34 |
- Memory management by reference counts. |
35 | 35 |
- Clusters which have a reference count of one have the bit |
36 | 36 |
QCOW_OFLAG_COPIED to optimize write performance. |
37 |
- Size of compressed clusters is stored in sectors to reduce bit usage
|
|
37 |
- Size of compressed clusters is stored in sectors to reduce bit usage |
|
38 | 38 |
in the cluster offsets. |
39 | 39 |
- Support for storing additional data (such as the VM state) in the |
40 |
snapshots.
|
|
40 |
snapshots. |
|
41 | 41 |
- If a backing store is used, the cluster size is not constrained |
42 | 42 |
(could be backported to QCOW). |
43 | 43 |
- L2 tables have always a size of one cluster. |
... | ... | |
45 | 45 |
|
46 | 46 |
//#define DEBUG_ALLOC |
47 | 47 |
//#define DEBUG_ALLOC2 |
48 |
|
|
48 |
|
|
49 | 49 |
#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb) |
50 | 50 |
#define QCOW_VERSION 2 |
51 | 51 |
|
... | ... | |
152 | 152 |
} BDRVQcowState; |
153 | 153 |
|
154 | 154 |
static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset); |
155 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num,
|
|
155 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num, |
|
156 | 156 |
uint8_t *buf, int nb_sectors); |
157 | 157 |
static int qcow_read_snapshots(BlockDriverState *bs); |
158 | 158 |
static void qcow_free_snapshots(BlockDriverState *bs); |
159 | 159 |
static int refcount_init(BlockDriverState *bs); |
160 | 160 |
static void refcount_close(BlockDriverState *bs); |
161 | 161 |
static int get_refcount(BlockDriverState *bs, int64_t cluster_index); |
162 |
static int update_cluster_refcount(BlockDriverState *bs,
|
|
162 |
static int update_cluster_refcount(BlockDriverState *bs, |
|
163 | 163 |
int64_t cluster_index, |
164 | 164 |
int addend); |
165 |
static void update_refcount(BlockDriverState *bs,
|
|
166 |
int64_t offset, int64_t length,
|
|
165 |
static void update_refcount(BlockDriverState *bs, |
|
166 |
int64_t offset, int64_t length, |
|
167 | 167 |
int addend); |
168 | 168 |
static int64_t alloc_clusters(BlockDriverState *bs, int64_t size); |
169 | 169 |
static int64_t alloc_bytes(BlockDriverState *bs, int size); |
170 |
static void free_clusters(BlockDriverState *bs,
|
|
170 |
static void free_clusters(BlockDriverState *bs, |
|
171 | 171 |
int64_t offset, int64_t size); |
172 | 172 |
#ifdef DEBUG_ALLOC |
173 | 173 |
static void check_refcounts(BlockDriverState *bs); |
... | ... | |
176 | 176 |
static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) |
177 | 177 |
{ |
178 | 178 |
const QCowHeader *cow_header = (const void *)buf; |
179 |
|
|
179 |
|
|
180 | 180 |
if (buf_size >= sizeof(QCowHeader) && |
181 | 181 |
be32_to_cpu(cow_header->magic) == QCOW_MAGIC && |
182 |
be32_to_cpu(cow_header->version) == QCOW_VERSION)
|
|
182 |
be32_to_cpu(cow_header->version) == QCOW_VERSION) |
|
183 | 183 |
return 100; |
184 | 184 |
else |
185 | 185 |
return 0; |
... | ... | |
209 | 209 |
be32_to_cpus(&header.refcount_table_clusters); |
210 | 210 |
be64_to_cpus(&header.snapshots_offset); |
211 | 211 |
be32_to_cpus(&header.nb_snapshots); |
212 |
|
|
212 |
|
|
213 | 213 |
if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION) |
214 | 214 |
goto fail; |
215 |
if (header.size <= 1 ||
|
|
216 |
header.cluster_bits < 9 ||
|
|
215 |
if (header.size <= 1 || |
|
216 |
header.cluster_bits < 9 || |
|
217 | 217 |
header.cluster_bits > 16) |
218 | 218 |
goto fail; |
219 | 219 |
if (header.crypt_method > QCOW_CRYPT_AES) |
... | ... | |
231 | 231 |
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; |
232 | 232 |
s->cluster_offset_mask = (1LL << s->csize_shift) - 1; |
233 | 233 |
s->refcount_table_offset = header.refcount_table_offset; |
234 |
s->refcount_table_size =
|
|
234 |
s->refcount_table_size = |
|
235 | 235 |
header.refcount_table_clusters << (s->cluster_bits - 3); |
236 | 236 |
|
237 | 237 |
s->snapshots_offset = header.snapshots_offset; |
... | ... | |
249 | 249 |
s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t)); |
250 | 250 |
if (!s->l1_table) |
251 | 251 |
goto fail; |
252 |
if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
|
|
252 |
if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != |
|
253 | 253 |
s->l1_size * sizeof(uint64_t)) |
254 | 254 |
goto fail; |
255 | 255 |
for(i = 0;i < s->l1_size; i++) { |
... | ... | |
267 | 267 |
if (!s->cluster_data) |
268 | 268 |
goto fail; |
269 | 269 |
s->cluster_cache_offset = -1; |
270 |
|
|
270 |
|
|
271 | 271 |
if (refcount_init(bs) < 0) |
272 | 272 |
goto fail; |
273 | 273 |
|
... | ... | |
304 | 304 |
BDRVQcowState *s = bs->opaque; |
305 | 305 |
uint8_t keybuf[16]; |
306 | 306 |
int len, i; |
307 |
|
|
307 |
|
|
308 | 308 |
memset(keybuf, 0, 16); |
309 | 309 |
len = strlen(key); |
310 | 310 |
if (len > 16) |
... | ... | |
358 | 358 |
for(i = 0; i < nb_sectors; i++) { |
359 | 359 |
ivec.ll[0] = cpu_to_le64(sector_num); |
360 | 360 |
ivec.ll[1] = 0; |
361 |
AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
|
361 |
AES_cbc_encrypt(in_buf, out_buf, 512, key, |
|
362 | 362 |
ivec.b, enc); |
363 | 363 |
sector_num++; |
364 | 364 |
in_buf += 512; |
... | ... | |
379 | 379 |
if (ret < 0) |
380 | 380 |
return ret; |
381 | 381 |
if (s->crypt_method) { |
382 |
encrypt_sectors(s, start_sect + n_start,
|
|
383 |
s->cluster_data,
|
|
382 |
encrypt_sectors(s, start_sect + n_start, |
|
383 |
s->cluster_data, |
|
384 | 384 |
s->cluster_data, n, 1, |
385 | 385 |
&s->aes_encrypt_key); |
386 | 386 |
} |
387 |
ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
|
|
387 |
ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start, |
|
388 | 388 |
s->cluster_data, n); |
389 | 389 |
if (ret < 0) |
390 | 390 |
return ret; |
... | ... | |
451 | 451 |
|
452 | 452 |
/* write new table (align to cluster) */ |
453 | 453 |
new_l1_table_offset = alloc_clusters(bs, new_l1_size2); |
454 |
|
|
454 |
|
|
455 | 455 |
for(i = 0; i < s->l1_size; i++) |
456 | 456 |
new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
457 | 457 |
ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2); |
... | ... | |
459 | 459 |
goto fail; |
460 | 460 |
for(i = 0; i < s->l1_size; i++) |
461 | 461 |
new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
462 |
|
|
462 |
|
|
463 | 463 |
/* set new table */ |
464 | 464 |
data64 = cpu_to_be64(new_l1_table_offset); |
465 | 465 |
if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_table_offset), |
... | ... | |
489 | 489 |
* |
490 | 490 |
* 2 to allocate a compressed cluster of size |
491 | 491 |
* 'compressed_size'. 'compressed_size' must be > 0 and < |
492 |
* cluster_size
|
|
492 |
* cluster_size |
|
493 | 493 |
* |
494 | 494 |
* return 0 if not allocated. |
495 | 495 |
*/ |
... | ... | |
501 | 501 |
BDRVQcowState *s = bs->opaque; |
502 | 502 |
int min_index, i, j, l1_index, l2_index, ret; |
503 | 503 |
uint64_t l2_offset, *l2_table, cluster_offset, tmp, old_l2_offset; |
504 |
|
|
504 |
|
|
505 | 505 |
l1_index = offset >> (s->l2_bits + s->cluster_bits); |
506 | 506 |
if (l1_index >= s->l1_size) { |
507 | 507 |
/* outside l1 table is allowed: we grow the table if needed */ |
... | ... | |
521 | 521 |
/* update the L1 entry */ |
522 | 522 |
s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
523 | 523 |
tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED); |
524 |
if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
|
|
524 |
if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp), |
|
525 | 525 |
&tmp, sizeof(tmp)) != sizeof(tmp)) |
526 | 526 |
return 0; |
527 | 527 |
min_index = l2_cache_new_entry(bs); |
... | ... | |
530 | 530 |
if (old_l2_offset == 0) { |
531 | 531 |
memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); |
532 | 532 |
} else { |
533 |
if (bdrv_pread(s->hd, old_l2_offset,
|
|
533 |
if (bdrv_pread(s->hd, old_l2_offset, |
|
534 | 534 |
l2_table, s->l2_size * sizeof(uint64_t)) != |
535 | 535 |
s->l2_size * sizeof(uint64_t)) |
536 | 536 |
return 0; |
537 | 537 |
} |
538 |
if (bdrv_pwrite(s->hd, l2_offset,
|
|
538 |
if (bdrv_pwrite(s->hd, l2_offset, |
|
539 | 539 |
l2_table, s->l2_size * sizeof(uint64_t)) != |
540 | 540 |
s->l2_size * sizeof(uint64_t)) |
541 | 541 |
return 0; |
... | ... | |
563 | 563 |
/* not found: load a new entry in the least used one */ |
564 | 564 |
min_index = l2_cache_new_entry(bs); |
565 | 565 |
l2_table = s->l2_cache + (min_index << s->l2_bits); |
566 |
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
|
|
566 |
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != |
|
567 | 567 |
s->l2_size * sizeof(uint64_t)) |
568 | 568 |
return 0; |
569 | 569 |
} |
... | ... | |
581 | 581 |
/* free the cluster */ |
582 | 582 |
if (cluster_offset & QCOW_OFLAG_COMPRESSED) { |
583 | 583 |
int nb_csectors; |
584 |
nb_csectors = ((cluster_offset >> s->csize_shift) &
|
|
584 |
nb_csectors = ((cluster_offset >> s->csize_shift) & |
|
585 | 585 |
s->csize_mask) + 1; |
586 | 586 |
free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511, |
587 | 587 |
nb_csectors * 512); |
... | ... | |
600 | 600 |
written */ |
601 | 601 |
if ((n_end - n_start) < s->cluster_sectors) { |
602 | 602 |
uint64_t start_sect; |
603 |
|
|
603 |
|
|
604 | 604 |
start_sect = (offset & ~(s->cluster_size - 1)) >> 9; |
605 | 605 |
ret = copy_sectors(bs, start_sect, |
606 | 606 |
cluster_offset, 0, n_start); |
... | ... | |
615 | 615 |
} else { |
616 | 616 |
int nb_csectors; |
617 | 617 |
cluster_offset = alloc_bytes(bs, compressed_size); |
618 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
|
|
618 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
|
619 | 619 |
(cluster_offset >> 9); |
620 |
cluster_offset |= QCOW_OFLAG_COMPRESSED |
|
|
620 |
cluster_offset |= QCOW_OFLAG_COMPRESSED | |
|
621 | 621 |
((uint64_t)nb_csectors << s->csize_shift); |
622 | 622 |
/* compressed clusters never have the copied flag */ |
623 | 623 |
tmp = cpu_to_be64(cluster_offset); |
624 | 624 |
} |
625 | 625 |
/* update L2 table */ |
626 | 626 |
l2_table[l2_index] = tmp; |
627 |
if (bdrv_pwrite(s->hd,
|
|
627 |
if (bdrv_pwrite(s->hd, |
|
628 | 628 |
l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp)) |
629 | 629 |
return 0; |
630 | 630 |
return cluster_offset; |
631 | 631 |
} |
632 | 632 |
|
633 |
static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
|
|
633 |
static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, |
|
634 | 634 |
int nb_sectors, int *pnum) |
635 | 635 |
{ |
636 | 636 |
BDRVQcowState *s = bs->opaque; |
... | ... | |
672 | 672 |
inflateEnd(strm); |
673 | 673 |
return 0; |
674 | 674 |
} |
675 |
|
|
675 |
|
|
676 | 676 |
static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset) |
677 | 677 |
{ |
678 | 678 |
int ret, csize, nb_csectors, sector_offset; |
... | ... | |
697 | 697 |
} |
698 | 698 |
|
699 | 699 |
/* handle reading after the end of the backing file */ |
700 |
static int backing_read1(BlockDriverState *bs,
|
|
700 |
static int backing_read1(BlockDriverState *bs, |
|
701 | 701 |
int64_t sector_num, uint8_t *buf, int nb_sectors) |
702 | 702 |
{ |
703 | 703 |
int n1; |
... | ... | |
711 | 711 |
return n1; |
712 | 712 |
} |
713 | 713 |
|
714 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num,
|
|
714 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num, |
|
715 | 715 |
uint8_t *buf, int nb_sectors) |
716 | 716 |
{ |
717 | 717 |
BDRVQcowState *s = bs->opaque; |
718 | 718 |
int ret, index_in_cluster, n, n1; |
719 | 719 |
uint64_t cluster_offset; |
720 |
|
|
720 |
|
|
721 | 721 |
while (nb_sectors > 0) { |
722 | 722 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); |
723 | 723 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
... | ... | |
742 | 742 |
memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); |
743 | 743 |
} else { |
744 | 744 |
ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512); |
745 |
if (ret != n * 512)
|
|
745 |
if (ret != n * 512) |
|
746 | 746 |
return -1; |
747 | 747 |
if (s->crypt_method) { |
748 |
encrypt_sectors(s, sector_num, buf, buf, n, 0,
|
|
748 |
encrypt_sectors(s, sector_num, buf, buf, n, 0, |
|
749 | 749 |
&s->aes_decrypt_key); |
750 | 750 |
} |
751 | 751 |
} |
... | ... | |
756 | 756 |
return 0; |
757 | 757 |
} |
758 | 758 |
|
759 |
static int qcow_write(BlockDriverState *bs, int64_t sector_num,
|
|
759 |
static int qcow_write(BlockDriverState *bs, int64_t sector_num, |
|
760 | 760 |
const uint8_t *buf, int nb_sectors) |
761 | 761 |
{ |
762 | 762 |
BDRVQcowState *s = bs->opaque; |
763 | 763 |
int ret, index_in_cluster, n; |
764 | 764 |
uint64_t cluster_offset; |
765 |
|
|
765 |
|
|
766 | 766 |
while (nb_sectors > 0) { |
767 | 767 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
768 | 768 |
n = s->cluster_sectors - index_in_cluster; |
769 | 769 |
if (n > nb_sectors) |
770 | 770 |
n = nb_sectors; |
771 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
|
|
772 |
index_in_cluster,
|
|
771 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0, |
|
772 |
index_in_cluster, |
|
773 | 773 |
index_in_cluster + n); |
774 | 774 |
if (!cluster_offset) |
775 | 775 |
return -1; |
776 | 776 |
if (s->crypt_method) { |
777 | 777 |
encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1, |
778 | 778 |
&s->aes_encrypt_key); |
779 |
ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
|
|
779 |
ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, |
|
780 | 780 |
s->cluster_data, n * 512); |
781 | 781 |
} else { |
782 | 782 |
ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512); |
783 | 783 |
} |
784 |
if (ret != n * 512)
|
|
784 |
if (ret != n * 512) |
|
785 | 785 |
return -1; |
786 | 786 |
nb_sectors -= n; |
787 | 787 |
sector_num += n; |
... | ... | |
798 | 798 |
int nb_sectors; |
799 | 799 |
int n; |
800 | 800 |
uint64_t cluster_offset; |
801 |
uint8_t *cluster_data;
|
|
801 |
uint8_t *cluster_data; |
|
802 | 802 |
BlockDriverAIOCB *hd_aiocb; |
803 | 803 |
} QCowAIOCB; |
804 | 804 |
|
... | ... | |
825 | 825 |
/* nothing to do */ |
826 | 826 |
} else { |
827 | 827 |
if (s->crypt_method) { |
828 |
encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
|
|
829 |
acb->n, 0,
|
|
828 |
encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, |
|
829 |
acb->n, 0, |
|
830 | 830 |
&s->aes_decrypt_key); |
831 | 831 |
} |
832 | 832 |
} |
... | ... | |
841 | 841 |
qemu_aio_release(acb); |
842 | 842 |
return; |
843 | 843 |
} |
844 |
|
|
844 |
|
|
845 | 845 |
/* prepare next AIO request */ |
846 |
acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
|
|
846 |
acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, |
|
847 | 847 |
0, 0, 0, 0); |
848 | 848 |
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); |
849 | 849 |
acb->n = s->cluster_sectors - index_in_cluster; |
... | ... | |
853 | 853 |
if (!acb->cluster_offset) { |
854 | 854 |
if (bs->backing_hd) { |
855 | 855 |
/* read from the base image */ |
856 |
n1 = backing_read1(bs->backing_hd, acb->sector_num,
|
|
856 |
n1 = backing_read1(bs->backing_hd, acb->sector_num, |
|
857 | 857 |
acb->buf, acb->n); |
858 | 858 |
if (n1 > 0) { |
859 |
acb->hd_aiocb = bdrv_aio_read(bs->backing_hd, acb->sector_num,
|
|
859 |
acb->hd_aiocb = bdrv_aio_read(bs->backing_hd, acb->sector_num, |
|
860 | 860 |
acb->buf, acb->n, qcow_aio_read_cb, acb); |
861 | 861 |
if (acb->hd_aiocb == NULL) |
862 | 862 |
goto fail; |
... | ... | |
872 | 872 |
/* add AIO support for compressed blocks ? */ |
873 | 873 |
if (decompress_cluster(s, acb->cluster_offset) < 0) |
874 | 874 |
goto fail; |
875 |
memcpy(acb->buf,
|
|
875 |
memcpy(acb->buf, |
|
876 | 876 |
s->cluster_cache + index_in_cluster * 512, 512 * acb->n); |
877 | 877 |
goto redo; |
878 | 878 |
} else { |
... | ... | |
881 | 881 |
goto fail; |
882 | 882 |
} |
883 | 883 |
acb->hd_aiocb = bdrv_aio_read(s->hd, |
884 |
(acb->cluster_offset >> 9) + index_in_cluster,
|
|
884 |
(acb->cluster_offset >> 9) + index_in_cluster, |
|
885 | 885 |
acb->buf, acb->n, qcow_aio_read_cb, acb); |
886 | 886 |
if (acb->hd_aiocb == NULL) |
887 | 887 |
goto fail; |
... | ... | |
948 | 948 |
qemu_aio_release(acb); |
949 | 949 |
return; |
950 | 950 |
} |
951 |
|
|
951 |
|
|
952 | 952 |
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); |
953 | 953 |
acb->n = s->cluster_sectors - index_in_cluster; |
954 | 954 |
if (acb->n > acb->nb_sectors) |
955 | 955 |
acb->n = acb->nb_sectors; |
956 |
cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
|
|
957 |
index_in_cluster,
|
|
956 |
cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0, |
|
957 |
index_in_cluster, |
|
958 | 958 |
index_in_cluster + acb->n); |
959 | 959 |
if (!cluster_offset || (cluster_offset & 511) != 0) { |
960 | 960 |
ret = -EIO; |
... | ... | |
968 | 968 |
goto fail; |
969 | 969 |
} |
970 | 970 |
} |
971 |
encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
|
|
971 |
encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, |
|
972 | 972 |
acb->n, 1, &s->aes_encrypt_key); |
973 | 973 |
src_buf = acb->cluster_data; |
974 | 974 |
} else { |
975 | 975 |
src_buf = acb->buf; |
976 | 976 |
} |
977 | 977 |
acb->hd_aiocb = bdrv_aio_write(s->hd, |
978 |
(cluster_offset >> 9) + index_in_cluster,
|
|
979 |
src_buf, acb->n,
|
|
978 |
(cluster_offset >> 9) + index_in_cluster, |
|
979 |
src_buf, acb->n, |
|
980 | 980 |
qcow_aio_write_cb, acb); |
981 | 981 |
if (acb->hd_aiocb == NULL) |
982 | 982 |
goto fail; |
... | ... | |
988 | 988 |
{ |
989 | 989 |
BDRVQcowState *s = bs->opaque; |
990 | 990 |
QCowAIOCB *acb; |
991 |
|
|
991 |
|
|
992 | 992 |
s->cluster_cache_offset = -1; /* disable compressed cache */ |
993 | 993 |
|
994 | 994 |
acb = qcow_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque); |
995 | 995 |
if (!acb) |
996 | 996 |
return NULL; |
997 |
|
|
997 |
|
|
998 | 998 |
qcow_aio_write_cb(acb, 0); |
999 | 999 |
return &acb->common; |
1000 | 1000 |
} |
... | ... | |
1038 | 1038 |
|
1039 | 1039 |
start = offset & ~(s->cluster_size - 1); |
1040 | 1040 |
last = (offset + size - 1) & ~(s->cluster_size - 1); |
1041 |
for(cluster_offset = start; cluster_offset <= last;
|
|
1041 |
for(cluster_offset = start; cluster_offset <= last; |
|
1042 | 1042 |
cluster_offset += s->cluster_size) { |
1043 | 1043 |
p = &s->refcount_block[cluster_offset >> s->cluster_bits]; |
1044 | 1044 |
refcount = be16_to_cpu(*p); |
... | ... | |
1054 | 1054 |
QCowHeader header; |
1055 | 1055 |
uint64_t tmp, offset; |
1056 | 1056 |
QCowCreateState s1, *s = &s1; |
1057 |
|
|
1057 |
|
|
1058 | 1058 |
memset(s, 0, sizeof(*s)); |
1059 | 1059 |
|
1060 | 1060 |
fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); |
... | ... | |
1096 | 1096 |
s->refcount_block = qemu_mallocz(s->cluster_size); |
1097 | 1097 |
if (!s->refcount_block) |
1098 | 1098 |
goto fail; |
1099 |
|
|
1099 |
|
|
1100 | 1100 |
s->refcount_table_offset = offset; |
1101 | 1101 |
header.refcount_table_offset = cpu_to_be64(offset); |
1102 | 1102 |
header.refcount_table_clusters = cpu_to_be32(1); |
... | ... | |
1111 | 1111 |
create_refcount_update(s, s->l1_table_offset, l1_size * sizeof(uint64_t)); |
1112 | 1112 |
create_refcount_update(s, s->refcount_table_offset, s->cluster_size); |
1113 | 1113 |
create_refcount_update(s, s->refcount_block_offset, s->cluster_size); |
1114 |
|
|
1114 |
|
|
1115 | 1115 |
/* write all the data */ |
1116 | 1116 |
write(fd, &header, sizeof(header)); |
1117 | 1117 |
if (backing_file) { |
... | ... | |
1124 | 1124 |
} |
1125 | 1125 |
lseek(fd, s->refcount_table_offset, SEEK_SET); |
1126 | 1126 |
write(fd, s->refcount_table, s->cluster_size); |
1127 |
|
|
1127 |
|
|
1128 | 1128 |
lseek(fd, s->refcount_block_offset, SEEK_SET); |
1129 | 1129 |
write(fd, s->refcount_block, s->cluster_size); |
1130 | 1130 |
|
... | ... | |
1153 | 1153 |
ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length); |
1154 | 1154 |
if (ret < 0) |
1155 | 1155 |
return ret; |
1156 |
|
|
1156 |
|
|
1157 | 1157 |
l2_cache_reset(bs); |
1158 | 1158 |
#endif |
1159 | 1159 |
return 0; |
... | ... | |
1161 | 1161 |
|
1162 | 1162 |
/* XXX: put compressed sectors first, then all the cluster aligned |
1163 | 1163 |
tables to avoid losing bytes in alignment */ |
1164 |
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
|
|
1164 |
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num, |
|
1165 | 1165 |
const uint8_t *buf, int nb_sectors) |
1166 | 1166 |
{ |
1167 | 1167 |
BDRVQcowState *s = bs->opaque; |
... | ... | |
1189 | 1189 |
/* best compression, small window, no zlib header */ |
1190 | 1190 |
memset(&strm, 0, sizeof(strm)); |
1191 | 1191 |
ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, |
1192 |
Z_DEFLATED, -12,
|
|
1192 |
Z_DEFLATED, -12, |
|
1193 | 1193 |
9, Z_DEFAULT_STRATEGY); |
1194 | 1194 |
if (ret != 0) { |
1195 | 1195 |
qemu_free(out_buf); |
... | ... | |
1215 | 1215 |
/* could not compress: write normal cluster */ |
1216 | 1216 |
qcow_write(bs, sector_num, buf, s->cluster_sectors); |
1217 | 1217 |
} else { |
1218 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 2,
|
|
1218 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 2, |
|
1219 | 1219 |
out_len, 0, 0); |
1220 | 1220 |
cluster_offset &= s->cluster_offset_mask; |
1221 | 1221 |
if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) { |
... | ... | |
1223 | 1223 |
return -1; |
1224 | 1224 |
} |
1225 | 1225 |
} |
1226 |
|
|
1226 |
|
|
1227 | 1227 |
qemu_free(out_buf); |
1228 | 1228 |
return 0; |
1229 | 1229 |
} |
... | ... | |
1238 | 1238 |
{ |
1239 | 1239 |
BDRVQcowState *s = bs->opaque; |
1240 | 1240 |
bdi->cluster_size = s->cluster_size; |
1241 |
bdi->vm_state_offset = (int64_t)s->l1_vm_state_index <<
|
|
1241 |
bdi->vm_state_offset = (int64_t)s->l1_vm_state_index << |
|
1242 | 1242 |
(s->cluster_bits + s->l2_bits); |
1243 | 1243 |
return 0; |
1244 | 1244 |
} |
... | ... | |
1247 | 1247 |
/* snapshot support */ |
1248 | 1248 |
|
1249 | 1249 |
/* update the refcounts of snapshots and the copied flag */ |
1250 |
static int update_snapshot_refcount(BlockDriverState *bs,
|
|
1250 |
static int update_snapshot_refcount(BlockDriverState *bs, |
|
1251 | 1251 |
int64_t l1_table_offset, |
1252 | 1252 |
int l1_size, |
1253 | 1253 |
int addend) |
... | ... | |
1256 | 1256 |
uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated; |
1257 | 1257 |
int64_t old_offset, old_l2_offset; |
1258 | 1258 |
int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount; |
1259 |
|
|
1259 |
|
|
1260 | 1260 |
l2_cache_reset(bs); |
1261 | 1261 |
|
1262 | 1262 |
l2_table = NULL; |
... | ... | |
1268 | 1268 |
if (!l1_table) |
1269 | 1269 |
goto fail; |
1270 | 1270 |
l1_allocated = 1; |
1271 |
if (bdrv_pread(s->hd, l1_table_offset,
|
|
1271 |
if (bdrv_pread(s->hd, l1_table_offset, |
|
1272 | 1272 |
l1_table, l1_size2) != l1_size2) |
1273 | 1273 |
goto fail; |
1274 | 1274 |
for(i = 0;i < l1_size; i++) |
... | ... | |
1278 | 1278 |
l1_table = s->l1_table; |
1279 | 1279 |
l1_allocated = 0; |
1280 | 1280 |
} |
1281 |
|
|
1281 |
|
|
1282 | 1282 |
l2_size = s->l2_size * sizeof(uint64_t); |
1283 | 1283 |
l2_table = qemu_malloc(l2_size); |
1284 | 1284 |
if (!l2_table) |
... | ... | |
1298 | 1298 |
old_offset = offset; |
1299 | 1299 |
offset &= ~QCOW_OFLAG_COPIED; |
1300 | 1300 |
if (offset & QCOW_OFLAG_COMPRESSED) { |
1301 |
nb_csectors = ((offset >> s->csize_shift) &
|
|
1301 |
nb_csectors = ((offset >> s->csize_shift) & |
|
1302 | 1302 |
s->csize_mask) + 1; |
1303 | 1303 |
if (addend != 0) |
1304 | 1304 |
update_refcount(bs, (offset & s->cluster_offset_mask) & ~511, |
1305 | 1305 |
nb_csectors * 512, addend); |
1306 | 1306 |
/* compressed clusters are never modified */ |
1307 |
refcount = 2;
|
|
1307 |
refcount = 2; |
|
1308 | 1308 |
} else { |
1309 | 1309 |
if (addend != 0) { |
1310 | 1310 |
refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend); |
... | ... | |
1323 | 1323 |
} |
1324 | 1324 |
} |
1325 | 1325 |
if (l2_modified) { |
1326 |
if (bdrv_pwrite(s->hd,
|
|
1326 |
if (bdrv_pwrite(s->hd, |
|
1327 | 1327 |
l2_offset, l2_table, l2_size) != l2_size) |
1328 | 1328 |
goto fail; |
1329 | 1329 |
} |
... | ... | |
1345 | 1345 |
if (l1_modified) { |
1346 | 1346 |
for(i = 0; i < l1_size; i++) |
1347 | 1347 |
cpu_to_be64s(&l1_table[i]); |
1348 |
if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
|
|
1348 |
if (bdrv_pwrite(s->hd, l1_table_offset, l1_table, |
|
1349 | 1349 |
l1_size2) != l1_size2) |
1350 | 1350 |
goto fail; |
1351 | 1351 |
for(i = 0; i < l1_size; i++) |
... | ... | |
1455 | 1455 |
|
1456 | 1456 |
snapshots_offset = alloc_clusters(bs, snapshots_size); |
1457 | 1457 |
offset = snapshots_offset; |
1458 |
|
|
1458 |
|
|
1459 | 1459 |
for(i = 0; i < s->nb_snapshots; i++) { |
1460 | 1460 |
sn = s->snapshots + i; |
1461 | 1461 |
memset(&h, 0, sizeof(h)); |
... | ... | |
1465 | 1465 |
h.date_sec = cpu_to_be32(sn->date_sec); |
1466 | 1466 |
h.date_nsec = cpu_to_be32(sn->date_nsec); |
1467 | 1467 |
h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec); |
1468 |
|
|
1468 |
|
|
1469 | 1469 |
id_str_size = strlen(sn->id_str); |
1470 | 1470 |
name_size = strlen(sn->name); |
1471 | 1471 |
h.id_str_size = cpu_to_be16(id_str_size); |
... | ... | |
1533 | 1533 |
{ |
1534 | 1534 |
BDRVQcowState *s = bs->opaque; |
1535 | 1535 |
int i, ret; |
1536 |
|
|
1536 |
|
|
1537 | 1537 |
ret = find_snapshot_by_id(bs, name); |
1538 | 1538 |
if (ret >= 0) |
1539 | 1539 |
return ret; |
... | ... | |
1545 | 1545 |
} |
1546 | 1546 |
|
1547 | 1547 |
/* if no id is provided, a new one is constructed */ |
1548 |
static int qcow_snapshot_create(BlockDriverState *bs,
|
|
1548 |
static int qcow_snapshot_create(BlockDriverState *bs, |
|
1549 | 1549 |
QEMUSnapshotInfo *sn_info) |
1550 | 1550 |
{ |
1551 | 1551 |
BDRVQcowState *s = bs->opaque; |
1552 | 1552 |
QCowSnapshot *snapshots1, sn1, *sn = &sn1; |
1553 | 1553 |
int i, ret; |
1554 | 1554 |
uint64_t *l1_table = NULL; |
1555 |
|
|
1555 |
|
|
1556 | 1556 |
memset(sn, 0, sizeof(*sn)); |
1557 | 1557 |
|
1558 | 1558 |
if (sn_info->id_str[0] == '\0') { |
... | ... | |
1590 | 1590 |
l1_table[i] = cpu_to_be64(s->l1_table[i]); |
1591 | 1591 |
} |
1592 | 1592 |
if (bdrv_pwrite(s->hd, sn->l1_table_offset, |
1593 |
l1_table, s->l1_size * sizeof(uint64_t)) !=
|
|
1593 |
l1_table, s->l1_size * sizeof(uint64_t)) != |
|
1594 | 1594 |
(s->l1_size * sizeof(uint64_t))) |
1595 | 1595 |
goto fail; |
1596 | 1596 |
qemu_free(l1_table); |
... | ... | |
1616 | 1616 |
} |
1617 | 1617 |
|
1618 | 1618 |
/* copy the snapshot 'snapshot_name' into the current disk image */ |
1619 |
static int qcow_snapshot_goto(BlockDriverState *bs,
|
|
1619 |
static int qcow_snapshot_goto(BlockDriverState *bs, |
|
1620 | 1620 |
const char *snapshot_id) |
1621 | 1621 |
{ |
1622 | 1622 |
BDRVQcowState *s = bs->opaque; |
... | ... | |
1637 | 1637 |
s->l1_size = sn->l1_size; |
1638 | 1638 |
l1_size2 = s->l1_size * sizeof(uint64_t); |
1639 | 1639 |
/* copy the snapshot l1 table to the current l1 table */ |
1640 |
if (bdrv_pread(s->hd, sn->l1_table_offset,
|
|
1640 |
if (bdrv_pread(s->hd, sn->l1_table_offset, |
|
1641 | 1641 |
s->l1_table, l1_size2) != l1_size2) |
1642 | 1642 |
goto fail; |
1643 | 1643 |
if (bdrv_pwrite(s->hd, s->l1_table_offset, |
... | ... | |
1663 | 1663 |
BDRVQcowState *s = bs->opaque; |
1664 | 1664 |
QCowSnapshot *sn; |
1665 | 1665 |
int snapshot_index, ret; |
1666 |
|
|
1666 |
|
|
1667 | 1667 |
snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id); |
1668 | 1668 |
if (snapshot_index < 0) |
1669 | 1669 |
return -ENOENT; |
... | ... | |
1693 | 1693 |
return 0; |
1694 | 1694 |
} |
1695 | 1695 |
|
1696 |
static int qcow_snapshot_list(BlockDriverState *bs,
|
|
1696 |
static int qcow_snapshot_list(BlockDriverState *bs, |
|
1697 | 1697 |
QEMUSnapshotInfo **psn_tab) |
1698 | 1698 |
{ |
1699 | 1699 |
BDRVQcowState *s = bs->opaque; |
... | ... | |
1731 | 1731 |
{ |
1732 | 1732 |
BDRVQcowState *s = bs->opaque; |
1733 | 1733 |
int ret, refcount_table_size2, i; |
1734 |
|
|
1734 |
|
|
1735 | 1735 |
s->refcount_block_cache = qemu_malloc(s->cluster_size); |
1736 | 1736 |
if (!s->refcount_block_cache) |
1737 | 1737 |
goto fail; |
... | ... | |
1760 | 1760 |
} |
1761 | 1761 |
|
1762 | 1762 |
|
1763 |
static int load_refcount_block(BlockDriverState *bs,
|
|
1763 |
static int load_refcount_block(BlockDriverState *bs, |
|
1764 | 1764 |
int64_t refcount_block_offset) |
1765 | 1765 |
{ |
1766 | 1766 |
BDRVQcowState *s = bs->opaque; |
1767 | 1767 |
int ret; |
1768 |
ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
|
|
1768 |
ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache, |
|
1769 | 1769 |
s->cluster_size); |
1770 | 1770 |
if (ret != s->cluster_size) |
1771 | 1771 |
return -EIO; |
... | ... | |
1790 | 1790 |
if (load_refcount_block(bs, refcount_block_offset) < 0) |
1791 | 1791 |
return 1; |
1792 | 1792 |
} |
1793 |
block_index = cluster_index &
|
|
1793 |
block_index = cluster_index & |
|
1794 | 1794 |
((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); |
1795 | 1795 |
return be16_to_cpu(s->refcount_block_cache[block_index]); |
1796 | 1796 |
} |
... | ... | |
1812 | 1812 |
} |
1813 | 1813 |
#ifdef DEBUG_ALLOC2 |
1814 | 1814 |
printf("alloc_clusters: size=%lld -> %lld\n", |
1815 |
size,
|
|
1815 |
size, |
|
1816 | 1816 |
(s->free_cluster_index - nb_clusters) << s->cluster_bits); |
1817 | 1817 |
#endif |
1818 | 1818 |
return (s->free_cluster_index - nb_clusters) << s->cluster_bits; |
... | ... | |
1839 | 1839 |
BDRVQcowState *s = bs->opaque; |
1840 | 1840 |
int64_t offset, cluster_offset; |
1841 | 1841 |
int free_in_cluster; |
1842 |
|
|
1842 |
|
|
1843 | 1843 |
assert(size > 0 && size <= s->cluster_size); |
1844 | 1844 |
if (s->free_byte_offset == 0) { |
1845 | 1845 |
s->free_byte_offset = alloc_clusters(bs, s->cluster_size); |
1846 | 1846 |
} |
1847 | 1847 |
redo: |
1848 |
free_in_cluster = s->cluster_size -
|
|
1848 |
free_in_cluster = s->cluster_size - |
|
1849 | 1849 |
(s->free_byte_offset & (s->cluster_size - 1)); |
1850 | 1850 |
if (size <= free_in_cluster) { |
1851 | 1851 |
/* enough space in current cluster */ |
... | ... | |
1872 | 1872 |
return offset; |
1873 | 1873 |
} |
1874 | 1874 |
|
1875 |
static void free_clusters(BlockDriverState *bs,
|
|
1875 |
static void free_clusters(BlockDriverState *bs, |
|
1876 | 1876 |
int64_t offset, int64_t size) |
1877 | 1877 |
{ |
1878 | 1878 |
update_refcount(bs, offset, size, -1); |
... | ... | |
1912 | 1912 |
new_table = qemu_mallocz(new_table_size2); |
1913 | 1913 |
if (!new_table) |
1914 | 1914 |
return -ENOMEM; |
1915 |
memcpy(new_table, s->refcount_table,
|
|
1915 |
memcpy(new_table, s->refcount_table, |
|
1916 | 1916 |
s->refcount_table_size * sizeof(uint64_t)); |
1917 | 1917 |
for(i = 0; i < s->refcount_table_size; i++) |
1918 | 1918 |
cpu_to_be64s(&new_table[i]); |
1919 | 1919 |
/* Note: we cannot update the refcount now to avoid recursion */ |
1920 | 1920 |
table_offset = alloc_clusters_noref(bs, new_table_size2); |
1921 | 1921 |
ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2); |
1922 |
if (ret != new_table_size2)
|
|
1922 |
if (ret != new_table_size2) |
|
1923 | 1923 |
goto fail; |
1924 | 1924 |
for(i = 0; i < s->refcount_table_size; i++) |
1925 | 1925 |
be64_to_cpus(&new_table[i]); |
... | ... | |
1950 | 1950 |
|
1951 | 1951 |
/* addend must be 1 or -1 */ |
1952 | 1952 |
/* XXX: cache several refcount block clusters ? */ |
1953 |
static int update_cluster_refcount(BlockDriverState *bs,
|
|
1953 |
static int update_cluster_refcount(BlockDriverState *bs, |
|
1954 | 1954 |
int64_t cluster_index, |
1955 | 1955 |
int addend) |
1956 | 1956 |
{ |
... | ... | |
1980 | 1980 |
return -EINVAL; |
1981 | 1981 |
s->refcount_table[refcount_table_index] = offset; |
1982 | 1982 |
data64 = cpu_to_be64(offset); |
1983 |
ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
|
|
1984 |
refcount_table_index * sizeof(uint64_t),
|
|
1983 |
ret = bdrv_pwrite(s->hd, s->refcount_table_offset + |
|
1984 |
refcount_table_index * sizeof(uint64_t), |
|
1985 | 1985 |
&data64, sizeof(data64)); |
1986 | 1986 |
if (ret != sizeof(data64)) |
1987 | 1987 |
return -EINVAL; |
... | ... | |
1996 | 1996 |
} |
1997 | 1997 |
} |
1998 | 1998 |
/* we can update the count and save it */ |
1999 |
block_index = cluster_index &
|
|
1999 |
block_index = cluster_index & |
|
2000 | 2000 |
((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); |
2001 | 2001 |
refcount = be16_to_cpu(s->refcount_block_cache[block_index]); |
2002 | 2002 |
refcount += addend; |
... | ... | |
2006 | 2006 |
s->free_cluster_index = cluster_index; |
2007 | 2007 |
} |
2008 | 2008 |
s->refcount_block_cache[block_index] = cpu_to_be16(refcount); |
2009 |
if (bdrv_pwrite(s->hd,
|
|
2010 |
refcount_block_offset + (block_index << REFCOUNT_SHIFT),
|
|
2009 |
if (bdrv_pwrite(s->hd, |
|
2010 |
refcount_block_offset + (block_index << REFCOUNT_SHIFT), |
|
2011 | 2011 |
&s->refcount_block_cache[block_index], 2) != 2) |
2012 | 2012 |
return -EIO; |
2013 | 2013 |
return refcount; |
2014 | 2014 |
} |
2015 | 2015 |
|
2016 |
static void update_refcount(BlockDriverState *bs,
|
|
2017 |
int64_t offset, int64_t length,
|
|
2016 |
static void update_refcount(BlockDriverState *bs, |
|
2017 |
int64_t offset, int64_t length, |
|
2018 | 2018 |
int addend) |
2019 | 2019 |
{ |
2020 | 2020 |
BDRVQcowState *s = bs->opaque; |
2021 | 2021 |
int64_t start, last, cluster_offset; |
2022 | 2022 |
|
2023 | 2023 |
#ifdef DEBUG_ALLOC2 |
2024 |
printf("update_refcount: offset=%lld size=%lld addend=%d\n",
|
|
2024 |
printf("update_refcount: offset=%lld size=%lld addend=%d\n", |
|
2025 | 2025 |
offset, length, addend); |
2026 | 2026 |
#endif |
2027 | 2027 |
if (length <= 0) |
2028 | 2028 |
return; |
2029 | 2029 |
start = offset & ~(s->cluster_size - 1); |
2030 | 2030 |
last = (offset + length - 1) & ~(s->cluster_size - 1); |
2031 |
for(cluster_offset = start; cluster_offset <= last;
|
|
2031 |
for(cluster_offset = start; cluster_offset <= last; |
|
2032 | 2032 |
cluster_offset += s->cluster_size) { |
2033 | 2033 |
update_cluster_refcount(bs, cluster_offset >> s->cluster_bits, addend); |
2034 | 2034 |
} |
2035 | 2035 |
} |
2036 | 2036 |
|
2037 | 2037 |
#ifdef DEBUG_ALLOC |
2038 |
static void inc_refcounts(BlockDriverState *bs,
|
|
2039 |
uint16_t *refcount_table,
|
|
2038 |
static void inc_refcounts(BlockDriverState *bs, |
|
2039 |
uint16_t *refcount_table, |
|
2040 | 2040 |
int refcount_table_size, |
2041 | 2041 |
int64_t offset, int64_t size) |
2042 | 2042 |
{ |
2043 | 2043 |
BDRVQcowState *s = bs->opaque; |
2044 | 2044 |
int64_t start, last, cluster_offset; |
2045 | 2045 |
int k; |
2046 |
|
|
2046 |
|
|
2047 | 2047 |
if (size <= 0) |
2048 | 2048 |
return; |
2049 | 2049 |
|
2050 | 2050 |
start = offset & ~(s->cluster_size - 1); |
2051 | 2051 |
last = (offset + size - 1) & ~(s->cluster_size - 1); |
2052 |
for(cluster_offset = start; cluster_offset <= last;
|
|
2052 |
for(cluster_offset = start; cluster_offset <= last; |
|
2053 | 2053 |
cluster_offset += s->cluster_size) { |
2054 | 2054 |
k = cluster_offset >> s->cluster_bits; |
2055 | 2055 |
if (k < 0 || k >= refcount_table_size) { |
... | ... | |
2062 | 2062 |
} |
2063 | 2063 |
} |
2064 | 2064 |
|
2065 |
static int check_refcounts_l1(BlockDriverState *bs,
|
|
2066 |
uint16_t *refcount_table,
|
|
2065 |
static int check_refcounts_l1(BlockDriverState *bs, |
|
2066 |
uint16_t *refcount_table, |
|
2067 | 2067 |
int refcount_table_size, |
2068 | 2068 |
int64_t l1_table_offset, int l1_size, |
2069 | 2069 |
int check_copied) |
... | ... | |
2081 | 2081 |
l1_table = qemu_malloc(l1_size2); |
2082 | 2082 |
if (!l1_table) |
2083 | 2083 |
goto fail; |
2084 |
if (bdrv_pread(s->hd, l1_table_offset,
|
|
2084 |
if (bdrv_pread(s->hd, l1_table_offset, |
|
2085 | 2085 |
l1_table, l1_size2) != l1_size2) |
2086 | 2086 |
goto fail; |
2087 | 2087 |
for(i = 0;i < l1_size; i++) |
2088 | 2088 |
be64_to_cpus(&l1_table[i]); |
2089 |
|
|
2089 |
|
|
2090 | 2090 |
l2_size = s->l2_size * sizeof(uint64_t); |
2091 | 2091 |
l2_table = qemu_malloc(l2_size); |
2092 | 2092 |
if (!l2_table) |
... | ... | |
2113 | 2113 |
offset >> s->cluster_bits); |
2114 | 2114 |
offset &= ~QCOW_OFLAG_COPIED; |
2115 | 2115 |
} |
2116 |
nb_csectors = ((offset >> s->csize_shift) &
|
|
2116 |
nb_csectors = ((offset >> s->csize_shift) & |
|
2117 | 2117 |
s->csize_mask) + 1; |
2118 | 2118 |
offset &= s->cluster_offset_mask; |
2119 |
inc_refcounts(bs, refcount_table,
|
|
2119 |
inc_refcounts(bs, refcount_table, |
|
2120 | 2120 |
refcount_table_size, |
2121 | 2121 |
offset & ~511, nb_csectors * 512); |
2122 | 2122 |
} else { |
... | ... | |
2128 | 2128 |
} |
2129 | 2129 |
} |
2130 | 2130 |
offset &= ~QCOW_OFLAG_COPIED; |
2131 |
inc_refcounts(bs, refcount_table,
|
|
2131 |
inc_refcounts(bs, refcount_table, |
|
2132 | 2132 |
refcount_table_size, |
2133 | 2133 |
offset, s->cluster_size); |
2134 | 2134 |
} |
2135 | 2135 |
} |
2136 | 2136 |
} |
2137 |
inc_refcounts(bs, refcount_table,
|
|
2137 |
inc_refcounts(bs, refcount_table, |
|
2138 | 2138 |
refcount_table_size, |
2139 | 2139 |
l2_offset, |
2140 | 2140 |
s->cluster_size); |
... | ... | |
2165 | 2165 |
/* header */ |
2166 | 2166 |
inc_refcounts(bs, refcount_table, nb_clusters, |
2167 | 2167 |
0, s->cluster_size); |
2168 |
|
|
2168 |
|
|
2169 | 2169 |
check_refcounts_l1(bs, refcount_table, nb_clusters, |
2170 | 2170 |
s->l1_table_offset, s->l1_size, 1); |
2171 | 2171 |
|
... | ... | |
2180 | 2180 |
|
2181 | 2181 |
/* refcount data */ |
2182 | 2182 |
inc_refcounts(bs, refcount_table, nb_clusters, |
2183 |
s->refcount_table_offset,
|
|
2183 |
s->refcount_table_offset, |
|
2184 | 2184 |
s->refcount_table_size * sizeof(uint64_t)); |
2185 | 2185 |
for(i = 0; i < s->refcount_table_size; i++) { |
2186 | 2186 |
int64_t offset; |
Also available in: Unified diff