Statistics
| Branch: | Revision:

root / block / qcow2.c @ ab0997e0

History | View | Annotate | Download (37.1 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include "qemu-common.h"
25
#include "block_int.h"
26
#include "module.h"
27
#include <zlib.h>
28
#include "aes.h"
29
#include "block/qcow2.h"
30
#include "qemu-error.h"
31
#include "qerror.h"
32

    
33
/*
34
  Differences with QCOW:
35

36
  - Support for multiple incremental snapshots.
37
  - Memory management by reference counts.
38
  - Clusters which have a reference count of one have the bit
39
    QCOW_OFLAG_COPIED to optimize write performance.
40
  - Size of compressed clusters is stored in sectors to reduce bit usage
41
    in the cluster offsets.
42
  - Support for storing additional data (such as the VM state) in the
43
    snapshots.
44
  - If a backing store is used, the cluster size is not constrained
45
    (could be backported to QCOW).
46
  - L2 tables have always a size of one cluster.
47
*/
48

    
49

    
50
typedef struct {
51
    uint32_t magic;
52
    uint32_t len;
53
} QCowExtension;
54
#define  QCOW2_EXT_MAGIC_END 0
55
#define  QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA
56

    
57
static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
58
{
59
    const QCowHeader *cow_header = (const void *)buf;
60

    
61
    if (buf_size >= sizeof(QCowHeader) &&
62
        be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
63
        be32_to_cpu(cow_header->version) >= QCOW_VERSION)
64
        return 100;
65
    else
66
        return 0;
67
}
68

    
69

    
70
/* 
71
 * read qcow2 extension and fill bs
72
 * start reading from start_offset
73
 * finish reading upon magic of value 0 or when end_offset reached
74
 * unknown magic is skipped (future extension this version knows nothing about)
75
 * return 0 upon success, non-0 otherwise
76
 */
77
static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
78
                                 uint64_t end_offset)
79
{
80
    QCowExtension ext;
81
    uint64_t offset;
82

    
83
#ifdef DEBUG_EXT
84
    printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
85
#endif
86
    offset = start_offset;
87
    while (offset < end_offset) {
88

    
89
#ifdef DEBUG_EXT
90
        BDRVQcowState *s = bs->opaque;
91
        /* Sanity check */
92
        if (offset > s->cluster_size)
93
            printf("qcow2_read_extension: suspicious offset %lu\n", offset);
94

    
95
        printf("attemting to read extended header in offset %lu\n", offset);
96
#endif
97

    
98
        if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) {
99
            fprintf(stderr, "qcow2_read_extension: ERROR: "
100
                    "pread fail from offset %" PRIu64 "\n",
101
                    offset);
102
            return 1;
103
        }
104
        be32_to_cpus(&ext.magic);
105
        be32_to_cpus(&ext.len);
106
        offset += sizeof(ext);
107
#ifdef DEBUG_EXT
108
        printf("ext.magic = 0x%x\n", ext.magic);
109
#endif
110
        switch (ext.magic) {
111
        case QCOW2_EXT_MAGIC_END:
112
            return 0;
113

    
114
        case QCOW2_EXT_MAGIC_BACKING_FORMAT:
115
            if (ext.len >= sizeof(bs->backing_format)) {
116
                fprintf(stderr, "ERROR: ext_backing_format: len=%u too large"
117
                        " (>=%zu)\n",
118
                        ext.len, sizeof(bs->backing_format));
119
                return 2;
120
            }
121
            if (bdrv_pread(bs->file, offset , bs->backing_format,
122
                           ext.len) != ext.len)
123
                return 3;
124
            bs->backing_format[ext.len] = '\0';
125
#ifdef DEBUG_EXT
126
            printf("Qcow2: Got format extension %s\n", bs->backing_format);
127
#endif
128
            offset = ((offset + ext.len + 7) & ~7);
129
            break;
130

    
131
        default:
132
            /* unknown magic -- just skip it */
133
            offset = ((offset + ext.len + 7) & ~7);
134
            break;
135
        }
136
    }
137

    
138
    return 0;
139
}
140

    
141

    
142
static int qcow2_open(BlockDriverState *bs, int flags)
143
{
144
    BDRVQcowState *s = bs->opaque;
145
    int len, i, ret = 0;
146
    QCowHeader header;
147
    uint64_t ext_end;
148
    bool writethrough;
149

    
150
    ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
151
    if (ret < 0) {
152
        goto fail;
153
    }
154
    be32_to_cpus(&header.magic);
155
    be32_to_cpus(&header.version);
156
    be64_to_cpus(&header.backing_file_offset);
157
    be32_to_cpus(&header.backing_file_size);
158
    be64_to_cpus(&header.size);
159
    be32_to_cpus(&header.cluster_bits);
160
    be32_to_cpus(&header.crypt_method);
161
    be64_to_cpus(&header.l1_table_offset);
162
    be32_to_cpus(&header.l1_size);
163
    be64_to_cpus(&header.refcount_table_offset);
164
    be32_to_cpus(&header.refcount_table_clusters);
165
    be64_to_cpus(&header.snapshots_offset);
166
    be32_to_cpus(&header.nb_snapshots);
167

    
168
    if (header.magic != QCOW_MAGIC) {
169
        ret = -EINVAL;
170
        goto fail;
171
    }
172
    if (header.version != QCOW_VERSION) {
173
        char version[64];
174
        snprintf(version, sizeof(version), "QCOW version %d", header.version);
175
        qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
176
            bs->device_name, "qcow2", version);
177
        ret = -ENOTSUP;
178
        goto fail;
179
    }
180
    if (header.cluster_bits < MIN_CLUSTER_BITS ||
181
        header.cluster_bits > MAX_CLUSTER_BITS) {
182
        ret = -EINVAL;
183
        goto fail;
184
    }
185
    if (header.crypt_method > QCOW_CRYPT_AES) {
186
        ret = -EINVAL;
187
        goto fail;
188
    }
189
    s->crypt_method_header = header.crypt_method;
190
    if (s->crypt_method_header) {
191
        bs->encrypted = 1;
192
    }
193
    s->cluster_bits = header.cluster_bits;
194
    s->cluster_size = 1 << s->cluster_bits;
195
    s->cluster_sectors = 1 << (s->cluster_bits - 9);
196
    s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
197
    s->l2_size = 1 << s->l2_bits;
198
    bs->total_sectors = header.size / 512;
199
    s->csize_shift = (62 - (s->cluster_bits - 8));
200
    s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
201
    s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
202
    s->refcount_table_offset = header.refcount_table_offset;
203
    s->refcount_table_size =
204
        header.refcount_table_clusters << (s->cluster_bits - 3);
205

    
206
    s->snapshots_offset = header.snapshots_offset;
207
    s->nb_snapshots = header.nb_snapshots;
208

    
209
    /* read the level 1 table */
210
    s->l1_size = header.l1_size;
211
    s->l1_vm_state_index = size_to_l1(s, header.size);
212
    /* the L1 table must contain at least enough entries to put
213
       header.size bytes */
214
    if (s->l1_size < s->l1_vm_state_index) {
215
        ret = -EINVAL;
216
        goto fail;
217
    }
218
    s->l1_table_offset = header.l1_table_offset;
219
    if (s->l1_size > 0) {
220
        s->l1_table = g_malloc0(
221
            align_offset(s->l1_size * sizeof(uint64_t), 512));
222
        ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
223
                         s->l1_size * sizeof(uint64_t));
224
        if (ret < 0) {
225
            goto fail;
226
        }
227
        for(i = 0;i < s->l1_size; i++) {
228
            be64_to_cpus(&s->l1_table[i]);
229
        }
230
    }
231

    
232
    /* alloc L2 table/refcount block cache */
233
    writethrough = ((flags & BDRV_O_CACHE_WB) == 0);
234
    s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE, writethrough);
235
    s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE,
236
        writethrough);
237

    
238
    s->cluster_cache = g_malloc(s->cluster_size);
239
    /* one more sector for decompressed data alignment */
240
    s->cluster_data = g_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
241
                                  + 512);
242
    s->cluster_cache_offset = -1;
243

    
244
    ret = qcow2_refcount_init(bs);
245
    if (ret != 0) {
246
        goto fail;
247
    }
248

    
249
    QLIST_INIT(&s->cluster_allocs);
250

    
251
    /* read qcow2 extensions */
252
    if (header.backing_file_offset) {
253
        ext_end = header.backing_file_offset;
254
    } else {
255
        ext_end = s->cluster_size;
256
    }
257
    if (qcow2_read_extensions(bs, sizeof(header), ext_end)) {
258
        ret = -EINVAL;
259
        goto fail;
260
    }
261

    
262
    /* read the backing file name */
263
    if (header.backing_file_offset != 0) {
264
        len = header.backing_file_size;
265
        if (len > 1023) {
266
            len = 1023;
267
        }
268
        ret = bdrv_pread(bs->file, header.backing_file_offset,
269
                         bs->backing_file, len);
270
        if (ret < 0) {
271
            goto fail;
272
        }
273
        bs->backing_file[len] = '\0';
274
    }
275
    if (qcow2_read_snapshots(bs) < 0) {
276
        ret = -EINVAL;
277
        goto fail;
278
    }
279

    
280
    /* Initialise locks */
281
    qemu_co_mutex_init(&s->lock);
282

    
283
#ifdef DEBUG_ALLOC
284
    {
285
        BdrvCheckResult result = {0};
286
        qcow2_check_refcounts(bs, &result);
287
    }
288
#endif
289
    return ret;
290

    
291
 fail:
292
    qcow2_free_snapshots(bs);
293
    qcow2_refcount_close(bs);
294
    g_free(s->l1_table);
295
    if (s->l2_table_cache) {
296
        qcow2_cache_destroy(bs, s->l2_table_cache);
297
    }
298
    g_free(s->cluster_cache);
299
    g_free(s->cluster_data);
300
    return ret;
301
}
302

    
303
static int qcow2_set_key(BlockDriverState *bs, const char *key)
304
{
305
    BDRVQcowState *s = bs->opaque;
306
    uint8_t keybuf[16];
307
    int len, i;
308

    
309
    memset(keybuf, 0, 16);
310
    len = strlen(key);
311
    if (len > 16)
312
        len = 16;
313
    /* XXX: we could compress the chars to 7 bits to increase
314
       entropy */
315
    for(i = 0;i < len;i++) {
316
        keybuf[i] = key[i];
317
    }
318
    s->crypt_method = s->crypt_method_header;
319

    
320
    if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
321
        return -1;
322
    if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
323
        return -1;
324
#if 0
325
    /* test */
326
    {
327
        uint8_t in[16];
328
        uint8_t out[16];
329
        uint8_t tmp[16];
330
        for(i=0;i<16;i++)
331
            in[i] = i;
332
        AES_encrypt(in, tmp, &s->aes_encrypt_key);
333
        AES_decrypt(tmp, out, &s->aes_decrypt_key);
334
        for(i = 0; i < 16; i++)
335
            printf(" %02x", tmp[i]);
336
        printf("\n");
337
        for(i = 0; i < 16; i++)
338
            printf(" %02x", out[i]);
339
        printf("\n");
340
    }
341
#endif
342
    return 0;
343
}
344

    
345
static int qcow2_is_allocated(BlockDriverState *bs, int64_t sector_num,
346
                              int nb_sectors, int *pnum)
347
{
348
    uint64_t cluster_offset;
349
    int ret;
350

    
351
    *pnum = nb_sectors;
352
    /* FIXME We can get errors here, but the bdrv_is_allocated interface can't
353
     * pass them on today */
354
    ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset);
355
    if (ret < 0) {
356
        *pnum = 0;
357
    }
358

    
359
    return (cluster_offset != 0);
360
}
361

    
362
/* handle reading after the end of the backing file */
363
int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
364
                  int64_t sector_num, int nb_sectors)
365
{
366
    int n1;
367
    if ((sector_num + nb_sectors) <= bs->total_sectors)
368
        return nb_sectors;
369
    if (sector_num >= bs->total_sectors)
370
        n1 = 0;
371
    else
372
        n1 = bs->total_sectors - sector_num;
373

    
374
    qemu_iovec_memset_skip(qiov, 0, 512 * (nb_sectors - n1), 512 * n1);
375

    
376
    return n1;
377
}
378

    
379
static int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
380
                          int remaining_sectors, QEMUIOVector *qiov)
381
{
382
    BDRVQcowState *s = bs->opaque;
383
    int index_in_cluster, n1;
384
    int ret;
385
    int cur_nr_sectors; /* number of sectors in current iteration */
386
    uint64_t cluster_offset = 0;
387
    uint64_t bytes_done = 0;
388
    QEMUIOVector hd_qiov;
389
    uint8_t *cluster_data = NULL;
390

    
391
    qemu_iovec_init(&hd_qiov, qiov->niov);
392

    
393
    qemu_co_mutex_lock(&s->lock);
394

    
395
    while (remaining_sectors != 0) {
396

    
397
        /* prepare next request */
398
        cur_nr_sectors = remaining_sectors;
399
        if (s->crypt_method) {
400
            cur_nr_sectors = MIN(cur_nr_sectors,
401
                QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
402
        }
403

    
404
        ret = qcow2_get_cluster_offset(bs, sector_num << 9,
405
            &cur_nr_sectors, &cluster_offset);
406
        if (ret < 0) {
407
            goto fail;
408
        }
409

    
410
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
411

    
412
        qemu_iovec_reset(&hd_qiov);
413
        qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
414
            cur_nr_sectors * 512);
415

    
416
        if (!cluster_offset) {
417

    
418
            if (bs->backing_hd) {
419
                /* read from the base image */
420
                n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov,
421
                    sector_num, cur_nr_sectors);
422
                if (n1 > 0) {
423
                    BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
424
                    qemu_co_mutex_unlock(&s->lock);
425
                    ret = bdrv_co_readv(bs->backing_hd, sector_num,
426
                                        n1, &hd_qiov);
427
                    qemu_co_mutex_lock(&s->lock);
428
                    if (ret < 0) {
429
                        goto fail;
430
                    }
431
                }
432
            } else {
433
                /* Note: in this case, no need to wait */
434
                qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors);
435
            }
436
        } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
437
            /* add AIO support for compressed blocks ? */
438
            ret = qcow2_decompress_cluster(bs, cluster_offset);
439
            if (ret < 0) {
440
                goto fail;
441
            }
442

    
443
            qemu_iovec_from_buffer(&hd_qiov,
444
                s->cluster_cache + index_in_cluster * 512,
445
                512 * cur_nr_sectors);
446
        } else {
447
            if ((cluster_offset & 511) != 0) {
448
                ret = -EIO;
449
                goto fail;
450
            }
451

    
452
            if (s->crypt_method) {
453
                /*
454
                 * For encrypted images, read everything into a temporary
455
                 * contiguous buffer on which the AES functions can work.
456
                 */
457
                if (!cluster_data) {
458
                    cluster_data =
459
                        g_malloc0(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
460
                }
461

    
462
                assert(cur_nr_sectors <=
463
                    QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
464
                qemu_iovec_reset(&hd_qiov);
465
                qemu_iovec_add(&hd_qiov, cluster_data,
466
                    512 * cur_nr_sectors);
467
            }
468

    
469
            BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
470
            qemu_co_mutex_unlock(&s->lock);
471
            ret = bdrv_co_readv(bs->file,
472
                                (cluster_offset >> 9) + index_in_cluster,
473
                                cur_nr_sectors, &hd_qiov);
474
            qemu_co_mutex_lock(&s->lock);
475
            if (ret < 0) {
476
                goto fail;
477
            }
478
            if (s->crypt_method) {
479
                qcow2_encrypt_sectors(s, sector_num,  cluster_data,
480
                    cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key);
481
                qemu_iovec_reset(&hd_qiov);
482
                qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
483
                    cur_nr_sectors * 512);
484
                qemu_iovec_from_buffer(&hd_qiov, cluster_data,
485
                    512 * cur_nr_sectors);
486
            }
487
        }
488

    
489
        remaining_sectors -= cur_nr_sectors;
490
        sector_num += cur_nr_sectors;
491
        bytes_done += cur_nr_sectors * 512;
492
    }
493
    ret = 0;
494

    
495
fail:
496
    qemu_co_mutex_unlock(&s->lock);
497

    
498
    qemu_iovec_destroy(&hd_qiov);
499
    g_free(cluster_data);
500

    
501
    return ret;
502
}
503

    
504
static void run_dependent_requests(BDRVQcowState *s, QCowL2Meta *m)
505
{
506
    /* Take the request off the list of running requests */
507
    if (m->nb_clusters != 0) {
508
        QLIST_REMOVE(m, next_in_flight);
509
    }
510

    
511
    /* Restart all dependent requests */
512
    if (!qemu_co_queue_empty(&m->dependent_requests)) {
513
        qemu_co_mutex_unlock(&s->lock);
514
        while(qemu_co_queue_next(&m->dependent_requests));
515
        qemu_co_mutex_lock(&s->lock);
516
    }
517
}
518

    
519
static int qcow2_co_writev(BlockDriverState *bs,
520
                           int64_t sector_num,
521
                           int remaining_sectors,
522
                           QEMUIOVector *qiov)
523
{
524
    BDRVQcowState *s = bs->opaque;
525
    int index_in_cluster;
526
    int n_end;
527
    int ret;
528
    int cur_nr_sectors; /* number of sectors in current iteration */
529
    QCowL2Meta l2meta;
530
    uint64_t cluster_offset;
531
    QEMUIOVector hd_qiov;
532
    uint64_t bytes_done = 0;
533
    uint8_t *cluster_data = NULL;
534

    
535
    l2meta.nb_clusters = 0;
536
    qemu_co_queue_init(&l2meta.dependent_requests);
537

    
538
    qemu_iovec_init(&hd_qiov, qiov->niov);
539

    
540
    s->cluster_cache_offset = -1; /* disable compressed cache */
541

    
542
    qemu_co_mutex_lock(&s->lock);
543

    
544
    while (remaining_sectors != 0) {
545

    
546
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
547
        n_end = index_in_cluster + remaining_sectors;
548
        if (s->crypt_method &&
549
            n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) {
550
            n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
551
        }
552

    
553
        ret = qcow2_alloc_cluster_offset(bs, sector_num << 9,
554
            index_in_cluster, n_end, &cur_nr_sectors, &l2meta);
555
        if (ret < 0) {
556
            goto fail;
557
        }
558

    
559
        cluster_offset = l2meta.cluster_offset;
560
        assert((cluster_offset & 511) == 0);
561

    
562
        qemu_iovec_reset(&hd_qiov);
563
        qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
564
            cur_nr_sectors * 512);
565

    
566
        if (s->crypt_method) {
567
            if (!cluster_data) {
568
                cluster_data = g_malloc0(QCOW_MAX_CRYPT_CLUSTERS *
569
                                                 s->cluster_size);
570
            }
571

    
572
            assert(hd_qiov.size <=
573
                   QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
574
            qemu_iovec_to_buffer(&hd_qiov, cluster_data);
575

    
576
            qcow2_encrypt_sectors(s, sector_num, cluster_data,
577
                cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key);
578

    
579
            qemu_iovec_reset(&hd_qiov);
580
            qemu_iovec_add(&hd_qiov, cluster_data,
581
                cur_nr_sectors * 512);
582
        }
583

    
584
        BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
585
        qemu_co_mutex_unlock(&s->lock);
586
        ret = bdrv_co_writev(bs->file,
587
                             (cluster_offset >> 9) + index_in_cluster,
588
                             cur_nr_sectors, &hd_qiov);
589
        qemu_co_mutex_lock(&s->lock);
590
        if (ret < 0) {
591
            goto fail;
592
        }
593

    
594
        ret = qcow2_alloc_cluster_link_l2(bs, &l2meta);
595

    
596
        run_dependent_requests(s, &l2meta);
597

    
598
        if (ret < 0) {
599
            goto fail;
600
        }
601

    
602
        remaining_sectors -= cur_nr_sectors;
603
        sector_num += cur_nr_sectors;
604
        bytes_done += cur_nr_sectors * 512;
605
    }
606
    ret = 0;
607

    
608
fail:
609
    qemu_co_mutex_unlock(&s->lock);
610

    
611
    qemu_iovec_destroy(&hd_qiov);
612
    g_free(cluster_data);
613

    
614
    return ret;
615
}
616

    
617
static void qcow2_close(BlockDriverState *bs)
618
{
619
    BDRVQcowState *s = bs->opaque;
620
    g_free(s->l1_table);
621

    
622
    qcow2_cache_flush(bs, s->l2_table_cache);
623
    qcow2_cache_flush(bs, s->refcount_block_cache);
624

    
625
    qcow2_cache_destroy(bs, s->l2_table_cache);
626
    qcow2_cache_destroy(bs, s->refcount_block_cache);
627

    
628
    g_free(s->cluster_cache);
629
    g_free(s->cluster_data);
630
    qcow2_refcount_close(bs);
631
}
632

    
633
/*
634
 * Updates the variable length parts of the qcow2 header, i.e. the backing file
635
 * name and all extensions. qcow2 was not designed to allow such changes, so if
636
 * we run out of space (we can only use the first cluster) this function may
637
 * fail.
638
 *
639
 * Returns 0 on success, -errno in error cases.
640
 */
641
static int qcow2_update_ext_header(BlockDriverState *bs,
642
    const char *backing_file, const char *backing_fmt)
643
{
644
    size_t backing_file_len = 0;
645
    size_t backing_fmt_len = 0;
646
    BDRVQcowState *s = bs->opaque;
647
    QCowExtension ext_backing_fmt = {0, 0};
648
    int ret;
649

    
650
    /* Backing file format doesn't make sense without a backing file */
651
    if (backing_fmt && !backing_file) {
652
        return -EINVAL;
653
    }
654

    
655
    /* Prepare the backing file format extension if needed */
656
    if (backing_fmt) {
657
        ext_backing_fmt.len = cpu_to_be32(strlen(backing_fmt));
658
        ext_backing_fmt.magic = cpu_to_be32(QCOW2_EXT_MAGIC_BACKING_FORMAT);
659
        backing_fmt_len = ((sizeof(ext_backing_fmt)
660
            + strlen(backing_fmt) + 7) & ~7);
661
    }
662

    
663
    /* Check if we can fit the new header into the first cluster */
664
    if (backing_file) {
665
        backing_file_len = strlen(backing_file);
666
    }
667

    
668
    size_t header_size = sizeof(QCowHeader) + backing_file_len
669
        + backing_fmt_len;
670

    
671
    if (header_size > s->cluster_size) {
672
        return -ENOSPC;
673
    }
674

    
675
    /* Rewrite backing file name and qcow2 extensions */
676
    size_t ext_size = header_size - sizeof(QCowHeader);
677
    uint8_t buf[ext_size];
678
    size_t offset = 0;
679
    size_t backing_file_offset = 0;
680

    
681
    if (backing_file) {
682
        if (backing_fmt) {
683
            int padding = backing_fmt_len -
684
                (sizeof(ext_backing_fmt) + strlen(backing_fmt));
685

    
686
            memcpy(buf + offset, &ext_backing_fmt, sizeof(ext_backing_fmt));
687
            offset += sizeof(ext_backing_fmt);
688

    
689
            memcpy(buf + offset, backing_fmt, strlen(backing_fmt));
690
            offset += strlen(backing_fmt);
691

    
692
            memset(buf + offset, 0, padding);
693
            offset += padding;
694
        }
695

    
696
        memcpy(buf + offset, backing_file, backing_file_len);
697
        backing_file_offset = sizeof(QCowHeader) + offset;
698
    }
699

    
700
    ret = bdrv_pwrite_sync(bs->file, sizeof(QCowHeader), buf, ext_size);
701
    if (ret < 0) {
702
        goto fail;
703
    }
704

    
705
    /* Update header fields */
706
    uint64_t be_backing_file_offset = cpu_to_be64(backing_file_offset);
707
    uint32_t be_backing_file_size = cpu_to_be32(backing_file_len);
708

    
709
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, backing_file_offset),
710
        &be_backing_file_offset, sizeof(uint64_t));
711
    if (ret < 0) {
712
        goto fail;
713
    }
714

    
715
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, backing_file_size),
716
        &be_backing_file_size, sizeof(uint32_t));
717
    if (ret < 0) {
718
        goto fail;
719
    }
720

    
721
    ret = 0;
722
fail:
723
    return ret;
724
}
725

    
726
static int qcow2_change_backing_file(BlockDriverState *bs,
727
    const char *backing_file, const char *backing_fmt)
728
{
729
    return qcow2_update_ext_header(bs, backing_file, backing_fmt);
730
}
731

    
732
static int preallocate(BlockDriverState *bs)
733
{
734
    uint64_t nb_sectors;
735
    uint64_t offset;
736
    int num;
737
    int ret;
738
    QCowL2Meta meta;
739

    
740
    nb_sectors = bdrv_getlength(bs) >> 9;
741
    offset = 0;
742
    qemu_co_queue_init(&meta.dependent_requests);
743
    meta.cluster_offset = 0;
744

    
745
    while (nb_sectors) {
746
        num = MIN(nb_sectors, INT_MAX >> 9);
747
        ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, &meta);
748
        if (ret < 0) {
749
            return ret;
750
        }
751

    
752
        ret = qcow2_alloc_cluster_link_l2(bs, &meta);
753
        if (ret < 0) {
754
            qcow2_free_any_clusters(bs, meta.cluster_offset, meta.nb_clusters);
755
            return ret;
756
        }
757

    
758
        /* There are no dependent requests, but we need to remove our request
759
         * from the list of in-flight requests */
760
        run_dependent_requests(bs->opaque, &meta);
761

    
762
        /* TODO Preallocate data if requested */
763

    
764
        nb_sectors -= num;
765
        offset += num << 9;
766
    }
767

    
768
    /*
769
     * It is expected that the image file is large enough to actually contain
770
     * all of the allocated clusters (otherwise we get failing reads after
771
     * EOF). Extend the image to the last allocated sector.
772
     */
773
    if (meta.cluster_offset != 0) {
774
        uint8_t buf[512];
775
        memset(buf, 0, 512);
776
        ret = bdrv_write(bs->file, (meta.cluster_offset >> 9) + num - 1, buf, 1);
777
        if (ret < 0) {
778
            return ret;
779
        }
780
    }
781

    
782
    return 0;
783
}
784

    
785
static int qcow2_create2(const char *filename, int64_t total_size,
786
                         const char *backing_file, const char *backing_format,
787
                         int flags, size_t cluster_size, int prealloc,
788
                         QEMUOptionParameter *options)
789
{
790
    /* Calulate cluster_bits */
791
    int cluster_bits;
792
    cluster_bits = ffs(cluster_size) - 1;
793
    if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
794
        (1 << cluster_bits) != cluster_size)
795
    {
796
        error_report(
797
            "Cluster size must be a power of two between %d and %dk",
798
            1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
799
        return -EINVAL;
800
    }
801

    
802
    /*
803
     * Open the image file and write a minimal qcow2 header.
804
     *
805
     * We keep things simple and start with a zero-sized image. We also
806
     * do without refcount blocks or a L1 table for now. We'll fix the
807
     * inconsistency later.
808
     *
809
     * We do need a refcount table because growing the refcount table means
810
     * allocating two new refcount blocks - the seconds of which would be at
811
     * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
812
     * size for any qcow2 image.
813
     */
814
    BlockDriverState* bs;
815
    QCowHeader header;
816
    uint8_t* refcount_table;
817
    int ret;
818

    
819
    ret = bdrv_create_file(filename, options);
820
    if (ret < 0) {
821
        return ret;
822
    }
823

    
824
    ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR);
825
    if (ret < 0) {
826
        return ret;
827
    }
828

    
829
    /* Write the header */
830
    memset(&header, 0, sizeof(header));
831
    header.magic = cpu_to_be32(QCOW_MAGIC);
832
    header.version = cpu_to_be32(QCOW_VERSION);
833
    header.cluster_bits = cpu_to_be32(cluster_bits);
834
    header.size = cpu_to_be64(0);
835
    header.l1_table_offset = cpu_to_be64(0);
836
    header.l1_size = cpu_to_be32(0);
837
    header.refcount_table_offset = cpu_to_be64(cluster_size);
838
    header.refcount_table_clusters = cpu_to_be32(1);
839

    
840
    if (flags & BLOCK_FLAG_ENCRYPT) {
841
        header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
842
    } else {
843
        header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
844
    }
845

    
846
    ret = bdrv_pwrite(bs, 0, &header, sizeof(header));
847
    if (ret < 0) {
848
        goto out;
849
    }
850

    
851
    /* Write an empty refcount table */
852
    refcount_table = g_malloc0(cluster_size);
853
    ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size);
854
    g_free(refcount_table);
855

    
856
    if (ret < 0) {
857
        goto out;
858
    }
859

    
860
    bdrv_close(bs);
861

    
862
    /*
863
     * And now open the image and make it consistent first (i.e. increase the
864
     * refcount of the cluster that is occupied by the header and the refcount
865
     * table)
866
     */
867
    BlockDriver* drv = bdrv_find_format("qcow2");
868
    assert(drv != NULL);
869
    ret = bdrv_open(bs, filename,
870
        BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv);
871
    if (ret < 0) {
872
        goto out;
873
    }
874

    
875
    ret = qcow2_alloc_clusters(bs, 2 * cluster_size);
876
    if (ret < 0) {
877
        goto out;
878

    
879
    } else if (ret != 0) {
880
        error_report("Huh, first cluster in empty image is already in use?");
881
        abort();
882
    }
883

    
884
    /* Okay, now that we have a valid image, let's give it the right size */
885
    ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE);
886
    if (ret < 0) {
887
        goto out;
888
    }
889

    
890
    /* Want a backing file? There you go.*/
891
    if (backing_file) {
892
        ret = bdrv_change_backing_file(bs, backing_file, backing_format);
893
        if (ret < 0) {
894
            goto out;
895
        }
896
    }
897

    
898
    /* And if we're supposed to preallocate metadata, do that now */
899
    if (prealloc) {
900
        ret = preallocate(bs);
901
        if (ret < 0) {
902
            goto out;
903
        }
904
    }
905

    
906
    ret = 0;
907
out:
908
    bdrv_delete(bs);
909
    return ret;
910
}
911

    
912
static int qcow2_create(const char *filename, QEMUOptionParameter *options)
913
{
914
    const char *backing_file = NULL;
915
    const char *backing_fmt = NULL;
916
    uint64_t sectors = 0;
917
    int flags = 0;
918
    size_t cluster_size = DEFAULT_CLUSTER_SIZE;
919
    int prealloc = 0;
920

    
921
    /* Read out options */
922
    while (options && options->name) {
923
        if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
924
            sectors = options->value.n / 512;
925
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
926
            backing_file = options->value.s;
927
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
928
            backing_fmt = options->value.s;
929
        } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) {
930
            flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0;
931
        } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
932
            if (options->value.n) {
933
                cluster_size = options->value.n;
934
            }
935
        } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
936
            if (!options->value.s || !strcmp(options->value.s, "off")) {
937
                prealloc = 0;
938
            } else if (!strcmp(options->value.s, "metadata")) {
939
                prealloc = 1;
940
            } else {
941
                fprintf(stderr, "Invalid preallocation mode: '%s'\n",
942
                    options->value.s);
943
                return -EINVAL;
944
            }
945
        }
946
        options++;
947
    }
948

    
949
    if (backing_file && prealloc) {
950
        fprintf(stderr, "Backing file and preallocation cannot be used at "
951
            "the same time\n");
952
        return -EINVAL;
953
    }
954

    
955
    return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags,
956
                         cluster_size, prealloc, options);
957
}
958

    
959
static int qcow2_make_empty(BlockDriverState *bs)
960
{
961
#if 0
962
    /* XXX: not correct */
963
    BDRVQcowState *s = bs->opaque;
964
    uint32_t l1_length = s->l1_size * sizeof(uint64_t);
965
    int ret;
966

967
    memset(s->l1_table, 0, l1_length);
968
    if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0)
969
        return -1;
970
    ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length);
971
    if (ret < 0)
972
        return ret;
973

974
    l2_cache_reset(bs);
975
#endif
976
    return 0;
977
}
978

    
979
static int qcow2_discard(BlockDriverState *bs, int64_t sector_num,
980
    int nb_sectors)
981
{
982
    return qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS,
983
        nb_sectors);
984
}
985

    
986
static int qcow2_truncate(BlockDriverState *bs, int64_t offset)
987
{
988
    BDRVQcowState *s = bs->opaque;
989
    int ret, new_l1_size;
990

    
991
    if (offset & 511) {
992
        return -EINVAL;
993
    }
994

    
995
    /* cannot proceed if image has snapshots */
996
    if (s->nb_snapshots) {
997
        return -ENOTSUP;
998
    }
999

    
1000
    /* shrinking is currently not supported */
1001
    if (offset < bs->total_sectors * 512) {
1002
        return -ENOTSUP;
1003
    }
1004

    
1005
    new_l1_size = size_to_l1(s, offset);
1006
    ret = qcow2_grow_l1_table(bs, new_l1_size, true);
1007
    if (ret < 0) {
1008
        return ret;
1009
    }
1010

    
1011
    /* write updated header.size */
1012
    offset = cpu_to_be64(offset);
1013
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
1014
                           &offset, sizeof(uint64_t));
1015
    if (ret < 0) {
1016
        return ret;
1017
    }
1018

    
1019
    s->l1_vm_state_index = new_l1_size;
1020
    return 0;
1021
}
1022

    
1023
/* XXX: put compressed sectors first, then all the cluster aligned
1024
   tables to avoid losing bytes in alignment */
1025
static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num,
1026
                                  const uint8_t *buf, int nb_sectors)
1027
{
1028
    BDRVQcowState *s = bs->opaque;
1029
    z_stream strm;
1030
    int ret, out_len;
1031
    uint8_t *out_buf;
1032
    uint64_t cluster_offset;
1033

    
1034
    if (nb_sectors == 0) {
1035
        /* align end of file to a sector boundary to ease reading with
1036
           sector based I/Os */
1037
        cluster_offset = bdrv_getlength(bs->file);
1038
        cluster_offset = (cluster_offset + 511) & ~511;
1039
        bdrv_truncate(bs->file, cluster_offset);
1040
        return 0;
1041
    }
1042

    
1043
    if (nb_sectors != s->cluster_sectors)
1044
        return -EINVAL;
1045

    
1046
    out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
1047

    
1048
    /* best compression, small window, no zlib header */
1049
    memset(&strm, 0, sizeof(strm));
1050
    ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
1051
                       Z_DEFLATED, -12,
1052
                       9, Z_DEFAULT_STRATEGY);
1053
    if (ret != 0) {
1054
        g_free(out_buf);
1055
        return -1;
1056
    }
1057

    
1058
    strm.avail_in = s->cluster_size;
1059
    strm.next_in = (uint8_t *)buf;
1060
    strm.avail_out = s->cluster_size;
1061
    strm.next_out = out_buf;
1062

    
1063
    ret = deflate(&strm, Z_FINISH);
1064
    if (ret != Z_STREAM_END && ret != Z_OK) {
1065
        g_free(out_buf);
1066
        deflateEnd(&strm);
1067
        return -1;
1068
    }
1069
    out_len = strm.next_out - out_buf;
1070

    
1071
    deflateEnd(&strm);
1072

    
1073
    if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
1074
        /* could not compress: write normal cluster */
1075
        bdrv_write(bs, sector_num, buf, s->cluster_sectors);
1076
    } else {
1077
        cluster_offset = qcow2_alloc_compressed_cluster_offset(bs,
1078
            sector_num << 9, out_len);
1079
        if (!cluster_offset)
1080
            return -1;
1081
        cluster_offset &= s->cluster_offset_mask;
1082
        BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
1083
        if (bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len) != out_len) {
1084
            g_free(out_buf);
1085
            return -1;
1086
        }
1087
    }
1088

    
1089
    g_free(out_buf);
1090
    return 0;
1091
}
1092

    
1093
static int qcow2_flush(BlockDriverState *bs)
1094
{
1095
    BDRVQcowState *s = bs->opaque;
1096
    int ret;
1097

    
1098
    ret = qcow2_cache_flush(bs, s->l2_table_cache);
1099
    if (ret < 0) {
1100
        return ret;
1101
    }
1102

    
1103
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1104
    if (ret < 0) {
1105
        return ret;
1106
    }
1107

    
1108
    return bdrv_flush(bs->file);
1109
}
1110

    
1111
static BlockDriverAIOCB *qcow2_aio_flush(BlockDriverState *bs,
1112
                                         BlockDriverCompletionFunc *cb,
1113
                                         void *opaque)
1114
{
1115
    BDRVQcowState *s = bs->opaque;
1116
    int ret;
1117

    
1118
    ret = qcow2_cache_flush(bs, s->l2_table_cache);
1119
    if (ret < 0) {
1120
        return NULL;
1121
    }
1122

    
1123
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1124
    if (ret < 0) {
1125
        return NULL;
1126
    }
1127

    
1128
    return bdrv_aio_flush(bs->file, cb, opaque);
1129
}
1130

    
1131
static int64_t qcow2_vm_state_offset(BDRVQcowState *s)
1132
{
1133
        return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
1134
}
1135

    
1136
static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1137
{
1138
    BDRVQcowState *s = bs->opaque;
1139
    bdi->cluster_size = s->cluster_size;
1140
    bdi->vm_state_offset = qcow2_vm_state_offset(s);
1141
    return 0;
1142
}
1143

    
1144

    
1145
static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result)
1146
{
1147
    return qcow2_check_refcounts(bs, result);
1148
}
1149

    
1150
#if 0
1151
static void dump_refcounts(BlockDriverState *bs)
1152
{
1153
    BDRVQcowState *s = bs->opaque;
1154
    int64_t nb_clusters, k, k1, size;
1155
    int refcount;
1156

1157
    size = bdrv_getlength(bs->file);
1158
    nb_clusters = size_to_clusters(s, size);
1159
    for(k = 0; k < nb_clusters;) {
1160
        k1 = k;
1161
        refcount = get_refcount(bs, k);
1162
        k++;
1163
        while (k < nb_clusters && get_refcount(bs, k) == refcount)
1164
            k++;
1165
        printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount,
1166
               k - k1);
1167
    }
1168
}
1169
#endif
1170

    
1171
static int qcow2_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1172
                              int64_t pos, int size)
1173
{
1174
    BDRVQcowState *s = bs->opaque;
1175
    int growable = bs->growable;
1176
    int ret;
1177

    
1178
    BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
1179
    bs->growable = 1;
1180
    ret = bdrv_pwrite(bs, qcow2_vm_state_offset(s) + pos, buf, size);
1181
    bs->growable = growable;
1182

    
1183
    return ret;
1184
}
1185

    
1186
static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1187
                              int64_t pos, int size)
1188
{
1189
    BDRVQcowState *s = bs->opaque;
1190
    int growable = bs->growable;
1191
    int ret;
1192

    
1193
    BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
1194
    bs->growable = 1;
1195
    ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
1196
    bs->growable = growable;
1197

    
1198
    return ret;
1199
}
1200

    
1201
static QEMUOptionParameter qcow2_create_options[] = {
1202
    {
1203
        .name = BLOCK_OPT_SIZE,
1204
        .type = OPT_SIZE,
1205
        .help = "Virtual disk size"
1206
    },
1207
    {
1208
        .name = BLOCK_OPT_BACKING_FILE,
1209
        .type = OPT_STRING,
1210
        .help = "File name of a base image"
1211
    },
1212
    {
1213
        .name = BLOCK_OPT_BACKING_FMT,
1214
        .type = OPT_STRING,
1215
        .help = "Image format of the base image"
1216
    },
1217
    {
1218
        .name = BLOCK_OPT_ENCRYPT,
1219
        .type = OPT_FLAG,
1220
        .help = "Encrypt the image"
1221
    },
1222
    {
1223
        .name = BLOCK_OPT_CLUSTER_SIZE,
1224
        .type = OPT_SIZE,
1225
        .help = "qcow2 cluster size",
1226
        .value = { .n = DEFAULT_CLUSTER_SIZE },
1227
    },
1228
    {
1229
        .name = BLOCK_OPT_PREALLOC,
1230
        .type = OPT_STRING,
1231
        .help = "Preallocation mode (allowed values: off, metadata)"
1232
    },
1233
    { NULL }
1234
};
1235

    
1236
static BlockDriver bdrv_qcow2 = {
1237
    .format_name        = "qcow2",
1238
    .instance_size      = sizeof(BDRVQcowState),
1239
    .bdrv_probe         = qcow2_probe,
1240
    .bdrv_open          = qcow2_open,
1241
    .bdrv_close         = qcow2_close,
1242
    .bdrv_create        = qcow2_create,
1243
    .bdrv_flush         = qcow2_flush,
1244
    .bdrv_is_allocated  = qcow2_is_allocated,
1245
    .bdrv_set_key       = qcow2_set_key,
1246
    .bdrv_make_empty    = qcow2_make_empty,
1247

    
1248
    .bdrv_co_readv      = qcow2_co_readv,
1249
    .bdrv_co_writev     = qcow2_co_writev,
1250
    .bdrv_aio_flush     = qcow2_aio_flush,
1251

    
1252
    .bdrv_discard           = qcow2_discard,
1253
    .bdrv_truncate          = qcow2_truncate,
1254
    .bdrv_write_compressed  = qcow2_write_compressed,
1255

    
1256
    .bdrv_snapshot_create   = qcow2_snapshot_create,
1257
    .bdrv_snapshot_goto     = qcow2_snapshot_goto,
1258
    .bdrv_snapshot_delete   = qcow2_snapshot_delete,
1259
    .bdrv_snapshot_list     = qcow2_snapshot_list,
1260
    .bdrv_snapshot_load_tmp     = qcow2_snapshot_load_tmp,
1261
    .bdrv_get_info      = qcow2_get_info,
1262

    
1263
    .bdrv_save_vmstate    = qcow2_save_vmstate,
1264
    .bdrv_load_vmstate    = qcow2_load_vmstate,
1265

    
1266
    .bdrv_change_backing_file   = qcow2_change_backing_file,
1267

    
1268
    .create_options = qcow2_create_options,
1269
    .bdrv_check = qcow2_check,
1270
};
1271

    
1272
static void bdrv_qcow2_init(void)
1273
{
1274
    bdrv_register(&bdrv_qcow2);
1275
}
1276

    
1277
block_init(bdrv_qcow2_init);