Statistics
| Branch: | Revision:

root / block / qcow2.c @ 0aa217e4

History | View | Annotate | Download (30 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include "qemu-common.h"
25
#include "block_int.h"
26
#include "module.h"
27
#include <zlib.h>
28
#include "aes.h"
29
#include "block/qcow2.h"
30

    
31
/*
32
  Differences with QCOW:
33

34
  - Support for multiple incremental snapshots.
35
  - Memory management by reference counts.
36
  - Clusters which have a reference count of one have the bit
37
    QCOW_OFLAG_COPIED to optimize write performance.
38
  - Size of compressed clusters is stored in sectors to reduce bit usage
39
    in the cluster offsets.
40
  - Support for storing additional data (such as the VM state) in the
41
    snapshots.
42
  - If a backing store is used, the cluster size is not constrained
43
    (could be backported to QCOW).
44
  - L2 tables have always a size of one cluster.
45
*/
46

    
47

    
48
typedef struct {
49
    uint32_t magic;
50
    uint32_t len;
51
} QCowExtension;
52
#define  QCOW_EXT_MAGIC_END 0
53
#define  QCOW_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA
54

    
55

    
56

    
57
static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
58
{
59
    const QCowHeader *cow_header = (const void *)buf;
60

    
61
    if (buf_size >= sizeof(QCowHeader) &&
62
        be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
63
        be32_to_cpu(cow_header->version) == QCOW_VERSION)
64
        return 100;
65
    else
66
        return 0;
67
}
68

    
69

    
70
/* 
71
 * read qcow2 extension and fill bs
72
 * start reading from start_offset
73
 * finish reading upon magic of value 0 or when end_offset reached
74
 * unknown magic is skipped (future extension this version knows nothing about)
75
 * return 0 upon success, non-0 otherwise
76
 */
77
static int qcow_read_extensions(BlockDriverState *bs, uint64_t start_offset,
78
                                uint64_t end_offset)
79
{
80
    BDRVQcowState *s = bs->opaque;
81
    QCowExtension ext;
82
    uint64_t offset;
83

    
84
#ifdef DEBUG_EXT
85
    printf("qcow_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
86
#endif
87
    offset = start_offset;
88
    while (offset < end_offset) {
89

    
90
#ifdef DEBUG_EXT
91
        /* Sanity check */
92
        if (offset > s->cluster_size)
93
            printf("qcow_handle_extension: suspicious offset %lu\n", offset);
94

    
95
        printf("attemting to read extended header in offset %lu\n", offset);
96
#endif
97

    
98
        if (bdrv_pread(s->hd, offset, &ext, sizeof(ext)) != sizeof(ext)) {
99
            fprintf(stderr, "qcow_handle_extension: ERROR: pread fail from offset %llu\n",
100
                    (unsigned long long)offset);
101
            return 1;
102
        }
103
        be32_to_cpus(&ext.magic);
104
        be32_to_cpus(&ext.len);
105
        offset += sizeof(ext);
106
#ifdef DEBUG_EXT
107
        printf("ext.magic = 0x%x\n", ext.magic);
108
#endif
109
        switch (ext.magic) {
110
        case QCOW_EXT_MAGIC_END:
111
            return 0;
112

    
113
        case QCOW_EXT_MAGIC_BACKING_FORMAT:
114
            if (ext.len >= sizeof(bs->backing_format)) {
115
                fprintf(stderr, "ERROR: ext_backing_format: len=%u too large"
116
                        " (>=%zu)\n",
117
                        ext.len, sizeof(bs->backing_format));
118
                return 2;
119
            }
120
            if (bdrv_pread(s->hd, offset , bs->backing_format,
121
                           ext.len) != ext.len)
122
                return 3;
123
            bs->backing_format[ext.len] = '\0';
124
#ifdef DEBUG_EXT
125
            printf("Qcow2: Got format extension %s\n", bs->backing_format);
126
#endif
127
            offset += ((ext.len + 7) & ~7);
128
            break;
129

    
130
        default:
131
            /* unknown magic -- just skip it */
132
            offset += ((ext.len + 7) & ~7);
133
            break;
134
        }
135
    }
136

    
137
    return 0;
138
}
139

    
140

    
141
static int qcow_open(BlockDriverState *bs, const char *filename, int flags)
142
{
143
    BDRVQcowState *s = bs->opaque;
144
    int len, i, shift, ret;
145
    QCowHeader header;
146
    uint64_t ext_end;
147

    
148
    ret = bdrv_file_open(&s->hd, filename, flags);
149
    if (ret < 0)
150
        return ret;
151
    if (bdrv_pread(s->hd, 0, &header, sizeof(header)) != sizeof(header))
152
        goto fail;
153
    be32_to_cpus(&header.magic);
154
    be32_to_cpus(&header.version);
155
    be64_to_cpus(&header.backing_file_offset);
156
    be32_to_cpus(&header.backing_file_size);
157
    be64_to_cpus(&header.size);
158
    be32_to_cpus(&header.cluster_bits);
159
    be32_to_cpus(&header.crypt_method);
160
    be64_to_cpus(&header.l1_table_offset);
161
    be32_to_cpus(&header.l1_size);
162
    be64_to_cpus(&header.refcount_table_offset);
163
    be32_to_cpus(&header.refcount_table_clusters);
164
    be64_to_cpus(&header.snapshots_offset);
165
    be32_to_cpus(&header.nb_snapshots);
166

    
167
    if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION)
168
        goto fail;
169
    if (header.size <= 1 ||
170
        header.cluster_bits < MIN_CLUSTER_BITS ||
171
        header.cluster_bits > MAX_CLUSTER_BITS)
172
        goto fail;
173
    if (header.crypt_method > QCOW_CRYPT_AES)
174
        goto fail;
175
    s->crypt_method_header = header.crypt_method;
176
    if (s->crypt_method_header)
177
        bs->encrypted = 1;
178
    s->cluster_bits = header.cluster_bits;
179
    s->cluster_size = 1 << s->cluster_bits;
180
    s->cluster_sectors = 1 << (s->cluster_bits - 9);
181
    s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
182
    s->l2_size = 1 << s->l2_bits;
183
    bs->total_sectors = header.size / 512;
184
    s->csize_shift = (62 - (s->cluster_bits - 8));
185
    s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
186
    s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
187
    s->refcount_table_offset = header.refcount_table_offset;
188
    s->refcount_table_size =
189
        header.refcount_table_clusters << (s->cluster_bits - 3);
190

    
191
    s->snapshots_offset = header.snapshots_offset;
192
    s->nb_snapshots = header.nb_snapshots;
193

    
194
    /* read the level 1 table */
195
    s->l1_size = header.l1_size;
196
    shift = s->cluster_bits + s->l2_bits;
197
    s->l1_vm_state_index = (header.size + (1LL << shift) - 1) >> shift;
198
    /* the L1 table must contain at least enough entries to put
199
       header.size bytes */
200
    if (s->l1_size < s->l1_vm_state_index)
201
        goto fail;
202
    s->l1_table_offset = header.l1_table_offset;
203
    s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
204
    if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
205
        s->l1_size * sizeof(uint64_t))
206
        goto fail;
207
    for(i = 0;i < s->l1_size; i++) {
208
        be64_to_cpus(&s->l1_table[i]);
209
    }
210
    /* alloc L2 cache */
211
    s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
212
    s->cluster_cache = qemu_malloc(s->cluster_size);
213
    /* one more sector for decompressed data alignment */
214
    s->cluster_data = qemu_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
215
                                  + 512);
216
    s->cluster_cache_offset = -1;
217

    
218
    if (qcow2_refcount_init(bs) < 0)
219
        goto fail;
220

    
221
    /* read qcow2 extensions */
222
    if (header.backing_file_offset)
223
        ext_end = header.backing_file_offset;
224
    else
225
        ext_end = s->cluster_size;
226
    if (qcow_read_extensions(bs, sizeof(header), ext_end))
227
        goto fail;
228

    
229
    /* read the backing file name */
230
    if (header.backing_file_offset != 0) {
231
        len = header.backing_file_size;
232
        if (len > 1023)
233
            len = 1023;
234
        if (bdrv_pread(s->hd, header.backing_file_offset, bs->backing_file, len) != len)
235
            goto fail;
236
        bs->backing_file[len] = '\0';
237
    }
238
    if (qcow2_read_snapshots(bs) < 0)
239
        goto fail;
240

    
241
#ifdef DEBUG_ALLOC
242
    qcow2_check_refcounts(bs);
243
#endif
244
    return 0;
245

    
246
 fail:
247
    qcow2_free_snapshots(bs);
248
    qcow2_refcount_close(bs);
249
    qemu_free(s->l1_table);
250
    qemu_free(s->l2_cache);
251
    qemu_free(s->cluster_cache);
252
    qemu_free(s->cluster_data);
253
    bdrv_delete(s->hd);
254
    return -1;
255
}
256

    
257
static int qcow_set_key(BlockDriverState *bs, const char *key)
258
{
259
    BDRVQcowState *s = bs->opaque;
260
    uint8_t keybuf[16];
261
    int len, i;
262

    
263
    memset(keybuf, 0, 16);
264
    len = strlen(key);
265
    if (len > 16)
266
        len = 16;
267
    /* XXX: we could compress the chars to 7 bits to increase
268
       entropy */
269
    for(i = 0;i < len;i++) {
270
        keybuf[i] = key[i];
271
    }
272
    s->crypt_method = s->crypt_method_header;
273

    
274
    if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
275
        return -1;
276
    if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
277
        return -1;
278
#if 0
279
    /* test */
280
    {
281
        uint8_t in[16];
282
        uint8_t out[16];
283
        uint8_t tmp[16];
284
        for(i=0;i<16;i++)
285
            in[i] = i;
286
        AES_encrypt(in, tmp, &s->aes_encrypt_key);
287
        AES_decrypt(tmp, out, &s->aes_decrypt_key);
288
        for(i = 0; i < 16; i++)
289
            printf(" %02x", tmp[i]);
290
        printf("\n");
291
        for(i = 0; i < 16; i++)
292
            printf(" %02x", out[i]);
293
        printf("\n");
294
    }
295
#endif
296
    return 0;
297
}
298

    
299
static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
300
                             int nb_sectors, int *pnum)
301
{
302
    uint64_t cluster_offset;
303

    
304
    *pnum = nb_sectors;
305
    cluster_offset = qcow2_get_cluster_offset(bs, sector_num << 9, pnum);
306

    
307
    return (cluster_offset != 0);
308
}
309

    
310
/* handle reading after the end of the backing file */
311
int qcow2_backing_read1(BlockDriverState *bs,
312
                  int64_t sector_num, uint8_t *buf, int nb_sectors)
313
{
314
    int n1;
315
    if ((sector_num + nb_sectors) <= bs->total_sectors)
316
        return nb_sectors;
317
    if (sector_num >= bs->total_sectors)
318
        n1 = 0;
319
    else
320
        n1 = bs->total_sectors - sector_num;
321
    memset(buf + n1 * 512, 0, 512 * (nb_sectors - n1));
322
    return n1;
323
}
324

    
325
typedef struct QCowAIOCB {
326
    BlockDriverAIOCB common;
327
    int64_t sector_num;
328
    QEMUIOVector *qiov;
329
    uint8_t *buf;
330
    void *orig_buf;
331
    int nb_sectors;
332
    int n;
333
    uint64_t cluster_offset;
334
    uint8_t *cluster_data;
335
    BlockDriverAIOCB *hd_aiocb;
336
    struct iovec hd_iov;
337
    QEMUIOVector hd_qiov;
338
    QEMUBH *bh;
339
    QCowL2Meta l2meta;
340
} QCowAIOCB;
341

    
342
static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
343
{
344
    QCowAIOCB *acb = (QCowAIOCB *)blockacb;
345
    if (acb->hd_aiocb)
346
        bdrv_aio_cancel(acb->hd_aiocb);
347
    qemu_aio_release(acb);
348
}
349

    
350
static AIOPool qcow_aio_pool = {
351
    .aiocb_size         = sizeof(QCowAIOCB),
352
    .cancel             = qcow_aio_cancel,
353
};
354

    
355
static void qcow_aio_read_cb(void *opaque, int ret);
356
static void qcow_aio_read_bh(void *opaque)
357
{
358
    QCowAIOCB *acb = opaque;
359
    qemu_bh_delete(acb->bh);
360
    acb->bh = NULL;
361
    qcow_aio_read_cb(opaque, 0);
362
}
363

    
364
static int qcow_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb)
365
{
366
    if (acb->bh)
367
        return -EIO;
368

    
369
    acb->bh = qemu_bh_new(cb, acb);
370
    if (!acb->bh)
371
        return -EIO;
372

    
373
    qemu_bh_schedule(acb->bh);
374

    
375
    return 0;
376
}
377

    
378
static void qcow_aio_read_cb(void *opaque, int ret)
379
{
380
    QCowAIOCB *acb = opaque;
381
    BlockDriverState *bs = acb->common.bs;
382
    BDRVQcowState *s = bs->opaque;
383
    int index_in_cluster, n1;
384

    
385
    acb->hd_aiocb = NULL;
386
    if (ret < 0)
387
        goto done;
388

    
389
    /* post process the read buffer */
390
    if (!acb->cluster_offset) {
391
        /* nothing to do */
392
    } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
393
        /* nothing to do */
394
    } else {
395
        if (s->crypt_method) {
396
            qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
397
                            acb->n, 0,
398
                            &s->aes_decrypt_key);
399
        }
400
    }
401

    
402
    acb->nb_sectors -= acb->n;
403
    acb->sector_num += acb->n;
404
    acb->buf += acb->n * 512;
405

    
406
    if (acb->nb_sectors == 0) {
407
        /* request completed */
408
        ret = 0;
409
        goto done;
410
    }
411

    
412
    /* prepare next AIO request */
413
    acb->n = acb->nb_sectors;
414
    acb->cluster_offset =
415
        qcow2_get_cluster_offset(bs, acb->sector_num << 9, &acb->n);
416
    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
417

    
418
    if (!acb->cluster_offset) {
419
        if (bs->backing_hd) {
420
            /* read from the base image */
421
            n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num,
422
                               acb->buf, acb->n);
423
            if (n1 > 0) {
424
                acb->hd_iov.iov_base = (void *)acb->buf;
425
                acb->hd_iov.iov_len = acb->n * 512;
426
                qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
427
                acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num,
428
                                    &acb->hd_qiov, acb->n,
429
                                    qcow_aio_read_cb, acb);
430
                if (acb->hd_aiocb == NULL)
431
                    goto done;
432
            } else {
433
                ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
434
                if (ret < 0)
435
                    goto done;
436
            }
437
        } else {
438
            /* Note: in this case, no need to wait */
439
            memset(acb->buf, 0, 512 * acb->n);
440
            ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
441
            if (ret < 0)
442
                goto done;
443
        }
444
    } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
445
        /* add AIO support for compressed blocks ? */
446
        if (qcow2_decompress_cluster(s, acb->cluster_offset) < 0)
447
            goto done;
448
        memcpy(acb->buf,
449
               s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
450
        ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
451
        if (ret < 0)
452
            goto done;
453
    } else {
454
        if ((acb->cluster_offset & 511) != 0) {
455
            ret = -EIO;
456
            goto done;
457
        }
458

    
459
        acb->hd_iov.iov_base = (void *)acb->buf;
460
        acb->hd_iov.iov_len = acb->n * 512;
461
        qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
462
        acb->hd_aiocb = bdrv_aio_readv(s->hd,
463
                            (acb->cluster_offset >> 9) + index_in_cluster,
464
                            &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb);
465
        if (acb->hd_aiocb == NULL)
466
            goto done;
467
    }
468

    
469
    return;
470
done:
471
    if (acb->qiov->niov > 1) {
472
        qemu_iovec_from_buffer(acb->qiov, acb->orig_buf, acb->qiov->size);
473
        qemu_vfree(acb->orig_buf);
474
    }
475
    acb->common.cb(acb->common.opaque, ret);
476
    qemu_aio_release(acb);
477
}
478

    
479
static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
480
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
481
        BlockDriverCompletionFunc *cb, void *opaque, int is_write)
482
{
483
    QCowAIOCB *acb;
484

    
485
    acb = qemu_aio_get(&qcow_aio_pool, bs, cb, opaque);
486
    if (!acb)
487
        return NULL;
488
    acb->hd_aiocb = NULL;
489
    acb->sector_num = sector_num;
490
    acb->qiov = qiov;
491
    if (qiov->niov > 1) {
492
        acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size);
493
        if (is_write)
494
            qemu_iovec_to_buffer(qiov, acb->buf);
495
    } else {
496
        acb->buf = (uint8_t *)qiov->iov->iov_base;
497
    }
498
    acb->nb_sectors = nb_sectors;
499
    acb->n = 0;
500
    acb->cluster_offset = 0;
501
    acb->l2meta.nb_clusters = 0;
502
    return acb;
503
}
504

    
505
static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs,
506
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
507
        BlockDriverCompletionFunc *cb, void *opaque)
508
{
509
    QCowAIOCB *acb;
510

    
511
    acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
512
    if (!acb)
513
        return NULL;
514

    
515
    qcow_aio_read_cb(acb, 0);
516
    return &acb->common;
517
}
518

    
519
static void qcow_aio_write_cb(void *opaque, int ret)
520
{
521
    QCowAIOCB *acb = opaque;
522
    BlockDriverState *bs = acb->common.bs;
523
    BDRVQcowState *s = bs->opaque;
524
    int index_in_cluster;
525
    const uint8_t *src_buf;
526
    int n_end;
527

    
528
    acb->hd_aiocb = NULL;
529

    
530
    if (ret < 0)
531
        goto done;
532

    
533
    if (qcow2_alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta) < 0) {
534
        qcow2_free_any_clusters(bs, acb->cluster_offset, acb->l2meta.nb_clusters);
535
        goto done;
536
    }
537

    
538
    acb->nb_sectors -= acb->n;
539
    acb->sector_num += acb->n;
540
    acb->buf += acb->n * 512;
541

    
542
    if (acb->nb_sectors == 0) {
543
        /* request completed */
544
        ret = 0;
545
        goto done;
546
    }
547

    
548
    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
549
    n_end = index_in_cluster + acb->nb_sectors;
550
    if (s->crypt_method &&
551
        n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
552
        n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
553

    
554
    acb->cluster_offset = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
555
                                          index_in_cluster,
556
                                          n_end, &acb->n, &acb->l2meta);
557
    if (!acb->cluster_offset || (acb->cluster_offset & 511) != 0) {
558
        ret = -EIO;
559
        goto done;
560
    }
561
    if (s->crypt_method) {
562
        if (!acb->cluster_data) {
563
            acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS *
564
                                             s->cluster_size);
565
        }
566
        qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
567
                        acb->n, 1, &s->aes_encrypt_key);
568
        src_buf = acb->cluster_data;
569
    } else {
570
        src_buf = acb->buf;
571
    }
572
    acb->hd_iov.iov_base = (void *)src_buf;
573
    acb->hd_iov.iov_len = acb->n * 512;
574
    qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
575
    acb->hd_aiocb = bdrv_aio_writev(s->hd,
576
                                    (acb->cluster_offset >> 9) + index_in_cluster,
577
                                    &acb->hd_qiov, acb->n,
578
                                    qcow_aio_write_cb, acb);
579
    if (acb->hd_aiocb == NULL)
580
        goto done;
581

    
582
    return;
583

    
584
done:
585
    if (acb->qiov->niov > 1)
586
        qemu_vfree(acb->orig_buf);
587
    acb->common.cb(acb->common.opaque, ret);
588
    qemu_aio_release(acb);
589
}
590

    
591
static BlockDriverAIOCB *qcow_aio_writev(BlockDriverState *bs,
592
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
593
        BlockDriverCompletionFunc *cb, void *opaque)
594
{
595
    BDRVQcowState *s = bs->opaque;
596
    QCowAIOCB *acb;
597

    
598
    s->cluster_cache_offset = -1; /* disable compressed cache */
599

    
600
    acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
601
    if (!acb)
602
        return NULL;
603

    
604
    qcow_aio_write_cb(acb, 0);
605
    return &acb->common;
606
}
607

    
608
static void qcow_close(BlockDriverState *bs)
609
{
610
    BDRVQcowState *s = bs->opaque;
611
    qemu_free(s->l1_table);
612
    qemu_free(s->l2_cache);
613
    qemu_free(s->cluster_cache);
614
    qemu_free(s->cluster_data);
615
    qcow2_refcount_close(bs);
616
    bdrv_delete(s->hd);
617
}
618

    
619
static int get_bits_from_size(size_t size)
620
{
621
    int res = 0;
622

    
623
    if (size == 0) {
624
        return -1;
625
    }
626

    
627
    while (size != 1) {
628
        /* Not a power of two */
629
        if (size & 1) {
630
            return -1;
631
        }
632

    
633
        size >>= 1;
634
        res++;
635
    }
636

    
637
    return res;
638
}
639

    
640
static int qcow_create2(const char *filename, int64_t total_size,
641
                        const char *backing_file, const char *backing_format,
642
                        int flags, size_t cluster_size)
643
{
644

    
645
    int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits;
646
    int ref_clusters, backing_format_len = 0;
647
    QCowHeader header;
648
    uint64_t tmp, offset;
649
    QCowCreateState s1, *s = &s1;
650
    QCowExtension ext_bf = {0, 0};
651

    
652

    
653
    memset(s, 0, sizeof(*s));
654

    
655
    fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
656
    if (fd < 0)
657
        return -1;
658
    memset(&header, 0, sizeof(header));
659
    header.magic = cpu_to_be32(QCOW_MAGIC);
660
    header.version = cpu_to_be32(QCOW_VERSION);
661
    header.size = cpu_to_be64(total_size * 512);
662
    header_size = sizeof(header);
663
    backing_filename_len = 0;
664
    if (backing_file) {
665
        if (backing_format) {
666
            ext_bf.magic = QCOW_EXT_MAGIC_BACKING_FORMAT;
667
            backing_format_len = strlen(backing_format);
668
            ext_bf.len = (backing_format_len + 7) & ~7;
669
            header_size += ((sizeof(ext_bf) + ext_bf.len + 7) & ~7);
670
        }
671
        header.backing_file_offset = cpu_to_be64(header_size);
672
        backing_filename_len = strlen(backing_file);
673
        header.backing_file_size = cpu_to_be32(backing_filename_len);
674
        header_size += backing_filename_len;
675
    }
676

    
677
    /* Cluster size */
678
    s->cluster_bits = get_bits_from_size(cluster_size);
679
    if (s->cluster_bits < MIN_CLUSTER_BITS ||
680
        s->cluster_bits > MAX_CLUSTER_BITS)
681
    {
682
        fprintf(stderr, "Cluster size must be a power of two between "
683
            "%d and %dk\n",
684
            1 << MIN_CLUSTER_BITS,
685
            1 << (MAX_CLUSTER_BITS - 10));
686
        return -EINVAL;
687
    }
688
    s->cluster_size = 1 << s->cluster_bits;
689

    
690
    header.cluster_bits = cpu_to_be32(s->cluster_bits);
691
    header_size = (header_size + 7) & ~7;
692
    if (flags & BLOCK_FLAG_ENCRYPT) {
693
        header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
694
    } else {
695
        header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
696
    }
697
    l2_bits = s->cluster_bits - 3;
698
    shift = s->cluster_bits + l2_bits;
699
    l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift);
700
    offset = align_offset(header_size, s->cluster_size);
701
    s->l1_table_offset = offset;
702
    header.l1_table_offset = cpu_to_be64(s->l1_table_offset);
703
    header.l1_size = cpu_to_be32(l1_size);
704
    offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size);
705

    
706
    s->refcount_table = qemu_mallocz(s->cluster_size);
707

    
708
    s->refcount_table_offset = offset;
709
    header.refcount_table_offset = cpu_to_be64(offset);
710
    header.refcount_table_clusters = cpu_to_be32(1);
711
    offset += s->cluster_size;
712
    s->refcount_block_offset = offset;
713

    
714
    /* count how many refcount blocks needed */
715
    tmp = offset >> s->cluster_bits;
716
    ref_clusters = (tmp >> (s->cluster_bits - REFCOUNT_SHIFT)) + 1;
717
    for (i=0; i < ref_clusters; i++) {
718
        s->refcount_table[i] = cpu_to_be64(offset);
719
        offset += s->cluster_size;
720
    }
721

    
722
    s->refcount_block = qemu_mallocz(ref_clusters * s->cluster_size);
723

    
724
    /* update refcounts */
725
    qcow2_create_refcount_update(s, 0, header_size);
726
    qcow2_create_refcount_update(s, s->l1_table_offset,
727
        l1_size * sizeof(uint64_t));
728
    qcow2_create_refcount_update(s, s->refcount_table_offset, s->cluster_size);
729
    qcow2_create_refcount_update(s, s->refcount_block_offset,
730
        ref_clusters * s->cluster_size);
731

    
732
    /* write all the data */
733
    write(fd, &header, sizeof(header));
734
    if (backing_file) {
735
        if (backing_format_len) {
736
            char zero[16];
737
            int d = ext_bf.len - backing_format_len;
738

    
739
            memset(zero, 0, sizeof(zero));
740
            cpu_to_be32s(&ext_bf.magic);
741
            cpu_to_be32s(&ext_bf.len);
742
            write(fd, &ext_bf, sizeof(ext_bf));
743
            write(fd, backing_format, backing_format_len);
744
            if (d>0) {
745
                write(fd, zero, d);
746
            }
747
        }
748
        write(fd, backing_file, backing_filename_len);
749
    }
750
    lseek(fd, s->l1_table_offset, SEEK_SET);
751
    tmp = 0;
752
    for(i = 0;i < l1_size; i++) {
753
        write(fd, &tmp, sizeof(tmp));
754
    }
755
    lseek(fd, s->refcount_table_offset, SEEK_SET);
756
    write(fd, s->refcount_table, s->cluster_size);
757

    
758
    lseek(fd, s->refcount_block_offset, SEEK_SET);
759
    write(fd, s->refcount_block, ref_clusters * s->cluster_size);
760

    
761
    qemu_free(s->refcount_table);
762
    qemu_free(s->refcount_block);
763
    close(fd);
764
    return 0;
765
}
766

    
767
static int qcow_create(const char *filename, QEMUOptionParameter *options)
768
{
769
    const char *backing_file = NULL;
770
    const char *backing_fmt = NULL;
771
    uint64_t sectors = 0;
772
    int flags = 0;
773
    size_t cluster_size = 65536;
774

    
775
    /* Read out options */
776
    while (options && options->name) {
777
        if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
778
            sectors = options->value.n / 512;
779
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
780
            backing_file = options->value.s;
781
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
782
            backing_fmt = options->value.s;
783
        } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) {
784
            flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0;
785
        } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
786
            if (options->value.n) {
787
                cluster_size = options->value.n;
788
            }
789
        }
790
        options++;
791
    }
792

    
793
    return qcow_create2(filename, sectors, backing_file, backing_fmt, flags,
794
        cluster_size);
795
}
796

    
797
static int qcow_make_empty(BlockDriverState *bs)
798
{
799
#if 0
800
    /* XXX: not correct */
801
    BDRVQcowState *s = bs->opaque;
802
    uint32_t l1_length = s->l1_size * sizeof(uint64_t);
803
    int ret;
804

805
    memset(s->l1_table, 0, l1_length);
806
    if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, l1_length) < 0)
807
        return -1;
808
    ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length);
809
    if (ret < 0)
810
        return ret;
811

812
    l2_cache_reset(bs);
813
#endif
814
    return 0;
815
}
816

    
817
/* XXX: put compressed sectors first, then all the cluster aligned
818
   tables to avoid losing bytes in alignment */
819
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
820
                                 const uint8_t *buf, int nb_sectors)
821
{
822
    BDRVQcowState *s = bs->opaque;
823
    z_stream strm;
824
    int ret, out_len;
825
    uint8_t *out_buf;
826
    uint64_t cluster_offset;
827

    
828
    if (nb_sectors == 0) {
829
        /* align end of file to a sector boundary to ease reading with
830
           sector based I/Os */
831
        cluster_offset = bdrv_getlength(s->hd);
832
        cluster_offset = (cluster_offset + 511) & ~511;
833
        bdrv_truncate(s->hd, cluster_offset);
834
        return 0;
835
    }
836

    
837
    if (nb_sectors != s->cluster_sectors)
838
        return -EINVAL;
839

    
840
    out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
841

    
842
    /* best compression, small window, no zlib header */
843
    memset(&strm, 0, sizeof(strm));
844
    ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
845
                       Z_DEFLATED, -12,
846
                       9, Z_DEFAULT_STRATEGY);
847
    if (ret != 0) {
848
        qemu_free(out_buf);
849
        return -1;
850
    }
851

    
852
    strm.avail_in = s->cluster_size;
853
    strm.next_in = (uint8_t *)buf;
854
    strm.avail_out = s->cluster_size;
855
    strm.next_out = out_buf;
856

    
857
    ret = deflate(&strm, Z_FINISH);
858
    if (ret != Z_STREAM_END && ret != Z_OK) {
859
        qemu_free(out_buf);
860
        deflateEnd(&strm);
861
        return -1;
862
    }
863
    out_len = strm.next_out - out_buf;
864

    
865
    deflateEnd(&strm);
866

    
867
    if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
868
        /* could not compress: write normal cluster */
869
        bdrv_write(bs, sector_num, buf, s->cluster_sectors);
870
    } else {
871
        cluster_offset = qcow2_alloc_compressed_cluster_offset(bs,
872
            sector_num << 9, out_len);
873
        if (!cluster_offset)
874
            return -1;
875
        cluster_offset &= s->cluster_offset_mask;
876
        if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
877
            qemu_free(out_buf);
878
            return -1;
879
        }
880
    }
881

    
882
    qemu_free(out_buf);
883
    return 0;
884
}
885

    
886
static void qcow_flush(BlockDriverState *bs)
887
{
888
    BDRVQcowState *s = bs->opaque;
889
    bdrv_flush(s->hd);
890
}
891

    
892
static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
893
{
894
    BDRVQcowState *s = bs->opaque;
895
    bdi->cluster_size = s->cluster_size;
896
    bdi->vm_state_offset = (int64_t)s->l1_vm_state_index <<
897
        (s->cluster_bits + s->l2_bits);
898
    return 0;
899
}
900

    
901

    
902
static int qcow_check(BlockDriverState *bs)
903
{
904
    return qcow2_check_refcounts(bs);
905
}
906

    
907
#if 0
908
static void dump_refcounts(BlockDriverState *bs)
909
{
910
    BDRVQcowState *s = bs->opaque;
911
    int64_t nb_clusters, k, k1, size;
912
    int refcount;
913

914
    size = bdrv_getlength(s->hd);
915
    nb_clusters = size_to_clusters(s, size);
916
    for(k = 0; k < nb_clusters;) {
917
        k1 = k;
918
        refcount = get_refcount(bs, k);
919
        k++;
920
        while (k < nb_clusters && get_refcount(bs, k) == refcount)
921
            k++;
922
        printf("%lld: refcount=%d nb=%lld\n", k, refcount, k - k1);
923
    }
924
}
925
#endif
926

    
927
static int qcow_put_buffer(BlockDriverState *bs, const uint8_t *buf,
928
                           int64_t pos, int size)
929
{
930
    int growable = bs->growable;
931

    
932
    bs->growable = 1;
933
    bdrv_pwrite(bs, pos, buf, size);
934
    bs->growable = growable;
935

    
936
    return size;
937
}
938

    
939
static int qcow_get_buffer(BlockDriverState *bs, uint8_t *buf,
940
                           int64_t pos, int size)
941
{
942
    int growable = bs->growable;
943
    int ret;
944

    
945
    bs->growable = 1;
946
    ret = bdrv_pread(bs, pos, buf, size);
947
    bs->growable = growable;
948

    
949
    return ret;
950
}
951

    
952
static QEMUOptionParameter qcow_create_options[] = {
953
    {
954
        .name = BLOCK_OPT_SIZE,
955
        .type = OPT_SIZE,
956
        .help = "Virtual disk size"
957
    },
958
    {
959
        .name = BLOCK_OPT_BACKING_FILE,
960
        .type = OPT_STRING,
961
        .help = "File name of a base image"
962
    },
963
    {
964
        .name = BLOCK_OPT_BACKING_FMT,
965
        .type = OPT_STRING,
966
        .help = "Image format of the base image"
967
    },
968
    {
969
        .name = BLOCK_OPT_ENCRYPT,
970
        .type = OPT_FLAG,
971
        .help = "Encrypt the image"
972
    },
973
    {
974
        .name = BLOCK_OPT_CLUSTER_SIZE,
975
        .type = OPT_SIZE,
976
        .help = "qcow2 cluster size"
977
    },
978
    { NULL }
979
};
980

    
981
static BlockDriver bdrv_qcow2 = {
982
    .format_name        = "qcow2",
983
    .instance_size        = sizeof(BDRVQcowState),
984
    .bdrv_probe                = qcow_probe,
985
    .bdrv_open                = qcow_open,
986
    .bdrv_close                = qcow_close,
987
    .bdrv_create        = qcow_create,
988
    .bdrv_flush                = qcow_flush,
989
    .bdrv_is_allocated        = qcow_is_allocated,
990
    .bdrv_set_key        = qcow_set_key,
991
    .bdrv_make_empty        = qcow_make_empty,
992

    
993
    .bdrv_aio_readv        = qcow_aio_readv,
994
    .bdrv_aio_writev        = qcow_aio_writev,
995
    .bdrv_write_compressed = qcow_write_compressed,
996

    
997
    .bdrv_snapshot_create   = qcow2_snapshot_create,
998
    .bdrv_snapshot_goto     = qcow2_snapshot_goto,
999
    .bdrv_snapshot_delete   = qcow2_snapshot_delete,
1000
    .bdrv_snapshot_list     = qcow2_snapshot_list,
1001
    .bdrv_get_info        = qcow_get_info,
1002

    
1003
    .bdrv_put_buffer    = qcow_put_buffer,
1004
    .bdrv_get_buffer    = qcow_get_buffer,
1005

    
1006
    .create_options = qcow_create_options,
1007
    .bdrv_check = qcow_check,
1008
};
1009

    
1010
static void bdrv_qcow2_init(void)
1011
{
1012
    bdrv_register(&bdrv_qcow2);
1013
}
1014

    
1015
block_init(bdrv_qcow2_init);