Revision 5fafdf24 block-qcow.c
b/block-qcow.c | ||
---|---|---|
1 | 1 |
/* |
2 | 2 |
* Block driver for the QCOW format |
3 |
*
|
|
3 |
* |
|
4 | 4 |
* Copyright (c) 2004-2006 Fabrice Bellard |
5 |
*
|
|
5 |
* |
|
6 | 6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | 7 |
* of this software and associated documentation files (the "Software"), to deal |
8 | 8 |
* in the Software without restriction, including without limitation the rights |
... | ... | |
80 | 80 |
static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) |
81 | 81 |
{ |
82 | 82 |
const QCowHeader *cow_header = (const void *)buf; |
83 |
|
|
83 |
|
|
84 | 84 |
if (buf_size >= sizeof(QCowHeader) && |
85 | 85 |
be32_to_cpu(cow_header->magic) == QCOW_MAGIC && |
86 |
be32_to_cpu(cow_header->version) == QCOW_VERSION)
|
|
86 |
be32_to_cpu(cow_header->version) == QCOW_VERSION) |
|
87 | 87 |
return 100; |
88 | 88 |
else |
89 | 89 |
return 0; |
... | ... | |
108 | 108 |
be64_to_cpus(&header.size); |
109 | 109 |
be32_to_cpus(&header.crypt_method); |
110 | 110 |
be64_to_cpus(&header.l1_table_offset); |
111 |
|
|
111 |
|
|
112 | 112 |
if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION) |
113 | 113 |
goto fail; |
114 | 114 |
if (header.size <= 1 || header.cluster_bits < 9) |
... | ... | |
134 | 134 |
s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t)); |
135 | 135 |
if (!s->l1_table) |
136 | 136 |
goto fail; |
137 |
if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
|
|
137 |
if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != |
|
138 | 138 |
s->l1_size * sizeof(uint64_t)) |
139 | 139 |
goto fail; |
140 | 140 |
for(i = 0;i < s->l1_size; i++) { |
... | ... | |
151 | 151 |
if (!s->cluster_data) |
152 | 152 |
goto fail; |
153 | 153 |
s->cluster_cache_offset = -1; |
154 |
|
|
154 |
|
|
155 | 155 |
/* read the backing file name */ |
156 | 156 |
if (header.backing_file_offset != 0) { |
157 | 157 |
len = header.backing_file_size; |
... | ... | |
177 | 177 |
BDRVQcowState *s = bs->opaque; |
178 | 178 |
uint8_t keybuf[16]; |
179 | 179 |
int len, i; |
180 |
|
|
180 |
|
|
181 | 181 |
memset(keybuf, 0, 16); |
182 | 182 |
len = strlen(key); |
183 | 183 |
if (len > 16) |
... | ... | |
231 | 231 |
for(i = 0; i < nb_sectors; i++) { |
232 | 232 |
ivec.ll[0] = cpu_to_le64(sector_num); |
233 | 233 |
ivec.ll[1] = 0; |
234 |
AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
|
234 |
AES_cbc_encrypt(in_buf, out_buf, 512, key, |
|
235 | 235 |
ivec.b, enc); |
236 | 236 |
sector_num++; |
237 | 237 |
in_buf += 512; |
... | ... | |
248 | 248 |
* |
249 | 249 |
* 2 to allocate a compressed cluster of size |
250 | 250 |
* 'compressed_size'. 'compressed_size' must be > 0 and < |
251 |
* cluster_size
|
|
251 |
* cluster_size |
|
252 | 252 |
* |
253 | 253 |
* return 0 if not allocated. |
254 | 254 |
*/ |
... | ... | |
262 | 262 |
uint64_t l2_offset, *l2_table, cluster_offset, tmp; |
263 | 263 |
uint32_t min_count; |
264 | 264 |
int new_l2_table; |
265 |
|
|
265 |
|
|
266 | 266 |
l1_index = offset >> (s->l2_bits + s->cluster_bits); |
267 | 267 |
l2_offset = s->l1_table[l1_index]; |
268 | 268 |
new_l2_table = 0; |
... | ... | |
276 | 276 |
/* update the L1 entry */ |
277 | 277 |
s->l1_table[l1_index] = l2_offset; |
278 | 278 |
tmp = cpu_to_be64(l2_offset); |
279 |
if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
|
|
279 |
if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp), |
|
280 | 280 |
&tmp, sizeof(tmp)) != sizeof(tmp)) |
281 | 281 |
return 0; |
282 | 282 |
new_l2_table = 1; |
... | ... | |
309 | 309 |
s->l2_size * sizeof(uint64_t)) |
310 | 310 |
return 0; |
311 | 311 |
} else { |
312 |
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
|
|
312 |
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != |
|
313 | 313 |
s->l2_size * sizeof(uint64_t)) |
314 | 314 |
return 0; |
315 | 315 |
} |
... | ... | |
318 | 318 |
found: |
319 | 319 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); |
320 | 320 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
321 |
if (!cluster_offset ||
|
|
321 |
if (!cluster_offset || |
|
322 | 322 |
((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) { |
323 | 323 |
if (!allocate) |
324 | 324 |
return 0; |
... | ... | |
331 | 331 |
if (decompress_cluster(s, cluster_offset) < 0) |
332 | 332 |
return 0; |
333 | 333 |
cluster_offset = bdrv_getlength(s->hd); |
334 |
cluster_offset = (cluster_offset + s->cluster_size - 1) &
|
|
334 |
cluster_offset = (cluster_offset + s->cluster_size - 1) & |
|
335 | 335 |
~(s->cluster_size - 1); |
336 | 336 |
/* write the cluster content */ |
337 |
if (bdrv_pwrite(s->hd, cluster_offset, s->cluster_cache, s->cluster_size) !=
|
|
337 |
if (bdrv_pwrite(s->hd, cluster_offset, s->cluster_cache, s->cluster_size) != |
|
338 | 338 |
s->cluster_size) |
339 | 339 |
return -1; |
340 | 340 |
} else { |
341 | 341 |
cluster_offset = bdrv_getlength(s->hd); |
342 | 342 |
if (allocate == 1) { |
343 | 343 |
/* round to cluster size */ |
344 |
cluster_offset = (cluster_offset + s->cluster_size - 1) &
|
|
344 |
cluster_offset = (cluster_offset + s->cluster_size - 1) & |
|
345 | 345 |
~(s->cluster_size - 1); |
346 | 346 |
bdrv_truncate(s->hd, cluster_offset + s->cluster_size); |
347 | 347 |
/* if encrypted, we must initialize the cluster |
348 | 348 |
content which won't be written */ |
349 |
if (s->crypt_method &&
|
|
349 |
if (s->crypt_method && |
|
350 | 350 |
(n_end - n_start) < s->cluster_sectors) { |
351 | 351 |
uint64_t start_sect; |
352 | 352 |
start_sect = (offset & ~(s->cluster_size - 1)) >> 9; |
353 | 353 |
memset(s->cluster_data + 512, 0x00, 512); |
354 | 354 |
for(i = 0; i < s->cluster_sectors; i++) { |
355 | 355 |
if (i < n_start || i >= n_end) { |
356 |
encrypt_sectors(s, start_sect + i,
|
|
357 |
s->cluster_data,
|
|
356 |
encrypt_sectors(s, start_sect + i, |
|
357 |
s->cluster_data, |
|
358 | 358 |
s->cluster_data + 512, 1, 1, |
359 | 359 |
&s->aes_encrypt_key); |
360 |
if (bdrv_pwrite(s->hd, cluster_offset + i * 512,
|
|
360 |
if (bdrv_pwrite(s->hd, cluster_offset + i * 512, |
|
361 | 361 |
s->cluster_data, 512) != 512) |
362 | 362 |
return -1; |
363 | 363 |
} |
364 | 364 |
} |
365 | 365 |
} |
366 | 366 |
} else { |
367 |
cluster_offset |= QCOW_OFLAG_COMPRESSED |
|
|
367 |
cluster_offset |= QCOW_OFLAG_COMPRESSED | |
|
368 | 368 |
(uint64_t)compressed_size << (63 - s->cluster_bits); |
369 | 369 |
} |
370 | 370 |
} |
371 | 371 |
/* update L2 table */ |
372 | 372 |
tmp = cpu_to_be64(cluster_offset); |
373 | 373 |
l2_table[l2_index] = tmp; |
374 |
if (bdrv_pwrite(s->hd,
|
|
374 |
if (bdrv_pwrite(s->hd, |
|
375 | 375 |
l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp)) |
376 | 376 |
return 0; |
377 | 377 |
} |
378 | 378 |
return cluster_offset; |
379 | 379 |
} |
380 | 380 |
|
381 |
static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
|
|
381 |
static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, |
|
382 | 382 |
int nb_sectors, int *pnum) |
383 | 383 |
{ |
384 | 384 |
BDRVQcowState *s = bs->opaque; |
... | ... | |
420 | 420 |
inflateEnd(strm); |
421 | 421 |
return 0; |
422 | 422 |
} |
423 |
|
|
423 |
|
|
424 | 424 |
static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset) |
425 | 425 |
{ |
426 | 426 |
int ret, csize; |
... | ... | |
431 | 431 |
csize = cluster_offset >> (63 - s->cluster_bits); |
432 | 432 |
csize &= (s->cluster_size - 1); |
433 | 433 |
ret = bdrv_pread(s->hd, coffset, s->cluster_data, csize); |
434 |
if (ret != csize)
|
|
434 |
if (ret != csize) |
|
435 | 435 |
return -1; |
436 | 436 |
if (decompress_buffer(s->cluster_cache, s->cluster_size, |
437 | 437 |
s->cluster_data, csize) < 0) { |
... | ... | |
444 | 444 |
|
445 | 445 |
#if 0 |
446 | 446 |
|
447 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num,
|
|
447 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num, |
|
448 | 448 |
uint8_t *buf, int nb_sectors) |
449 | 449 |
{ |
450 | 450 |
BDRVQcowState *s = bs->opaque; |
451 | 451 |
int ret, index_in_cluster, n; |
452 | 452 |
uint64_t cluster_offset; |
453 |
|
|
453 |
|
|
454 | 454 |
while (nb_sectors > 0) { |
455 | 455 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); |
456 | 456 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
... | ... | |
472 | 472 |
memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); |
473 | 473 |
} else { |
474 | 474 |
ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512); |
475 |
if (ret != n * 512)
|
|
475 |
if (ret != n * 512) |
|
476 | 476 |
return -1; |
477 | 477 |
if (s->crypt_method) { |
478 |
encrypt_sectors(s, sector_num, buf, buf, n, 0,
|
|
478 |
encrypt_sectors(s, sector_num, buf, buf, n, 0, |
|
479 | 479 |
&s->aes_decrypt_key); |
480 | 480 |
} |
481 | 481 |
} |
... | ... | |
487 | 487 |
} |
488 | 488 |
#endif |
489 | 489 |
|
490 |
static int qcow_write(BlockDriverState *bs, int64_t sector_num,
|
|
490 |
static int qcow_write(BlockDriverState *bs, int64_t sector_num, |
|
491 | 491 |
const uint8_t *buf, int nb_sectors) |
492 | 492 |
{ |
493 | 493 |
BDRVQcowState *s = bs->opaque; |
494 | 494 |
int ret, index_in_cluster, n; |
495 | 495 |
uint64_t cluster_offset; |
496 |
|
|
496 |
|
|
497 | 497 |
while (nb_sectors > 0) { |
498 | 498 |
index_in_cluster = sector_num & (s->cluster_sectors - 1); |
499 | 499 |
n = s->cluster_sectors - index_in_cluster; |
500 | 500 |
if (n > nb_sectors) |
501 | 501 |
n = nb_sectors; |
502 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
|
|
503 |
index_in_cluster,
|
|
502 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0, |
|
503 |
index_in_cluster, |
|
504 | 504 |
index_in_cluster + n); |
505 | 505 |
if (!cluster_offset) |
506 | 506 |
return -1; |
507 | 507 |
if (s->crypt_method) { |
508 | 508 |
encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1, |
509 | 509 |
&s->aes_encrypt_key); |
510 |
ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
|
|
510 |
ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, |
|
511 | 511 |
s->cluster_data, n * 512); |
512 | 512 |
} else { |
513 | 513 |
ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512); |
514 | 514 |
} |
515 |
if (ret != n * 512)
|
|
515 |
if (ret != n * 512) |
|
516 | 516 |
return -1; |
517 | 517 |
nb_sectors -= n; |
518 | 518 |
sector_num += n; |
... | ... | |
529 | 529 |
int nb_sectors; |
530 | 530 |
int n; |
531 | 531 |
uint64_t cluster_offset; |
532 |
uint8_t *cluster_data;
|
|
532 |
uint8_t *cluster_data; |
|
533 | 533 |
BlockDriverAIOCB *hd_aiocb; |
534 | 534 |
} QCowAIOCB; |
535 | 535 |
|
... | ... | |
556 | 556 |
/* nothing to do */ |
557 | 557 |
} else { |
558 | 558 |
if (s->crypt_method) { |
559 |
encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
|
|
560 |
acb->n, 0,
|
|
559 |
encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, |
|
560 |
acb->n, 0, |
|
561 | 561 |
&s->aes_decrypt_key); |
562 | 562 |
} |
563 | 563 |
} |
... | ... | |
572 | 572 |
qemu_aio_release(acb); |
573 | 573 |
return; |
574 | 574 |
} |
575 |
|
|
575 |
|
|
576 | 576 |
/* prepare next AIO request */ |
577 |
acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
|
|
577 |
acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, |
|
578 | 578 |
0, 0, 0, 0); |
579 | 579 |
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); |
580 | 580 |
acb->n = s->cluster_sectors - index_in_cluster; |
... | ... | |
597 | 597 |
/* add AIO support for compressed blocks ? */ |
598 | 598 |
if (decompress_cluster(s, acb->cluster_offset) < 0) |
599 | 599 |
goto fail; |
600 |
memcpy(acb->buf,
|
|
600 |
memcpy(acb->buf, |
|
601 | 601 |
s->cluster_cache + index_in_cluster * 512, 512 * acb->n); |
602 | 602 |
goto redo; |
603 | 603 |
} else { |
... | ... | |
606 | 606 |
goto fail; |
607 | 607 |
} |
608 | 608 |
acb->hd_aiocb = bdrv_aio_read(s->hd, |
609 |
(acb->cluster_offset >> 9) + index_in_cluster,
|
|
609 |
(acb->cluster_offset >> 9) + index_in_cluster, |
|
610 | 610 |
acb->buf, acb->n, qcow_aio_read_cb, acb); |
611 | 611 |
if (acb->hd_aiocb == NULL) |
612 | 612 |
goto fail; |
... | ... | |
627 | 627 |
acb->buf = buf; |
628 | 628 |
acb->nb_sectors = nb_sectors; |
629 | 629 |
acb->n = 0; |
630 |
acb->cluster_offset = 0;
|
|
630 |
acb->cluster_offset = 0; |
|
631 | 631 |
|
632 | 632 |
qcow_aio_read_cb(acb, 0); |
633 | 633 |
return &acb->common; |
... | ... | |
661 | 661 |
qemu_aio_release(acb); |
662 | 662 |
return; |
663 | 663 |
} |
664 |
|
|
664 |
|
|
665 | 665 |
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); |
666 | 666 |
acb->n = s->cluster_sectors - index_in_cluster; |
667 | 667 |
if (acb->n > acb->nb_sectors) |
668 | 668 |
acb->n = acb->nb_sectors; |
669 |
cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
|
|
670 |
index_in_cluster,
|
|
669 |
cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0, |
|
670 |
index_in_cluster, |
|
671 | 671 |
index_in_cluster + acb->n); |
672 | 672 |
if (!cluster_offset || (cluster_offset & 511) != 0) { |
673 | 673 |
ret = -EIO; |
... | ... | |
681 | 681 |
goto fail; |
682 | 682 |
} |
683 | 683 |
} |
684 |
encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
|
|
684 |
encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, |
|
685 | 685 |
acb->n, 1, &s->aes_encrypt_key); |
686 | 686 |
src_buf = acb->cluster_data; |
687 | 687 |
} else { |
688 | 688 |
src_buf = acb->buf; |
689 | 689 |
} |
690 | 690 |
acb->hd_aiocb = bdrv_aio_write(s->hd, |
691 |
(cluster_offset >> 9) + index_in_cluster,
|
|
692 |
src_buf, acb->n,
|
|
691 |
(cluster_offset >> 9) + index_in_cluster, |
|
692 |
src_buf, acb->n, |
|
693 | 693 |
qcow_aio_write_cb, acb); |
694 | 694 |
if (acb->hd_aiocb == NULL) |
695 | 695 |
goto fail; |
... | ... | |
701 | 701 |
{ |
702 | 702 |
BDRVQcowState *s = bs->opaque; |
703 | 703 |
QCowAIOCB *acb; |
704 |
|
|
704 |
|
|
705 | 705 |
s->cluster_cache_offset = -1; /* disable compressed cache */ |
706 | 706 |
|
707 | 707 |
acb = qemu_aio_get(bs, cb, opaque); |
... | ... | |
712 | 712 |
acb->buf = (uint8_t *)buf; |
713 | 713 |
acb->nb_sectors = nb_sectors; |
714 | 714 |
acb->n = 0; |
715 |
|
|
715 |
|
|
716 | 716 |
qcow_aio_write_cb(acb, 0); |
717 | 717 |
return &acb->common; |
718 | 718 |
} |
... | ... | |
774 | 774 |
} else { |
775 | 775 |
header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); |
776 | 776 |
} |
777 |
|
|
777 |
|
|
778 | 778 |
/* write all the data */ |
779 | 779 |
write(fd, &header, sizeof(header)); |
780 | 780 |
if (backing_file) { |
... | ... | |
811 | 811 |
|
812 | 812 |
/* XXX: put compressed sectors first, then all the cluster aligned |
813 | 813 |
tables to avoid losing bytes in alignment */ |
814 |
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
|
|
814 |
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num, |
|
815 | 815 |
const uint8_t *buf, int nb_sectors) |
816 | 816 |
{ |
817 | 817 |
BDRVQcowState *s = bs->opaque; |
... | ... | |
830 | 830 |
/* best compression, small window, no zlib header */ |
831 | 831 |
memset(&strm, 0, sizeof(strm)); |
832 | 832 |
ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, |
833 |
Z_DEFLATED, -12,
|
|
833 |
Z_DEFLATED, -12, |
|
834 | 834 |
9, Z_DEFAULT_STRATEGY); |
835 | 835 |
if (ret != 0) { |
836 | 836 |
qemu_free(out_buf); |
... | ... | |
856 | 856 |
/* could not compress: write normal cluster */ |
857 | 857 |
qcow_write(bs, sector_num, buf, s->cluster_sectors); |
858 | 858 |
} else { |
859 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 2,
|
|
859 |
cluster_offset = get_cluster_offset(bs, sector_num << 9, 2, |
|
860 | 860 |
out_len, 0, 0); |
861 | 861 |
cluster_offset &= s->cluster_offset_mask; |
862 | 862 |
if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) { |
... | ... | |
864 | 864 |
return -1; |
865 | 865 |
} |
866 | 866 |
} |
867 |
|
|
867 |
|
|
868 | 868 |
qemu_free(out_buf); |
869 | 869 |
return 0; |
870 | 870 |
} |
Also available in: Unified diff