Statistics
| Branch: | Revision:

root / block / qcow2.c @ cc84d90f

History | View | Annotate | Download (62.9 kB)

1 585f8587 bellard
/*
2 585f8587 bellard
 * Block driver for the QCOW version 2 format
3 5fafdf24 ths
 *
4 585f8587 bellard
 * Copyright (c) 2004-2006 Fabrice Bellard
5 5fafdf24 ths
 *
6 585f8587 bellard
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 585f8587 bellard
 * of this software and associated documentation files (the "Software"), to deal
8 585f8587 bellard
 * in the Software without restriction, including without limitation the rights
9 585f8587 bellard
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 585f8587 bellard
 * copies of the Software, and to permit persons to whom the Software is
11 585f8587 bellard
 * furnished to do so, subject to the following conditions:
12 585f8587 bellard
 *
13 585f8587 bellard
 * The above copyright notice and this permission notice shall be included in
14 585f8587 bellard
 * all copies or substantial portions of the Software.
15 585f8587 bellard
 *
16 585f8587 bellard
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 585f8587 bellard
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 585f8587 bellard
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 585f8587 bellard
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 585f8587 bellard
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 585f8587 bellard
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 585f8587 bellard
 * THE SOFTWARE.
23 585f8587 bellard
 */
24 faf07963 pbrook
#include "qemu-common.h"
25 737e150e Paolo Bonzini
#include "block/block_int.h"
26 1de7afc9 Paolo Bonzini
#include "qemu/module.h"
27 585f8587 bellard
#include <zlib.h>
28 753d9b82 Aurelien Jarno
#include "qemu/aes.h"
29 f7d0fe02 Kevin Wolf
#include "block/qcow2.h"
30 1de7afc9 Paolo Bonzini
#include "qemu/error-report.h"
31 7b1b5d19 Paolo Bonzini
#include "qapi/qmp/qerror.h"
32 acdfb480 Kevin Wolf
#include "qapi/qmp/qbool.h"
33 3cce16f4 Kevin Wolf
#include "trace.h"
34 585f8587 bellard
35 585f8587 bellard
/*
36 585f8587 bellard
  Differences with QCOW:
37 585f8587 bellard

38 585f8587 bellard
  - Support for multiple incremental snapshots.
39 585f8587 bellard
  - Memory management by reference counts.
40 585f8587 bellard
  - Clusters which have a reference count of one have the bit
41 585f8587 bellard
    QCOW_OFLAG_COPIED to optimize write performance.
42 5fafdf24 ths
  - Size of compressed clusters is stored in sectors to reduce bit usage
43 585f8587 bellard
    in the cluster offsets.
44 585f8587 bellard
  - Support for storing additional data (such as the VM state) in the
45 3b46e624 ths
    snapshots.
46 585f8587 bellard
  - If a backing store is used, the cluster size is not constrained
47 585f8587 bellard
    (could be backported to QCOW).
48 585f8587 bellard
  - L2 tables have always a size of one cluster.
49 585f8587 bellard
*/
50 585f8587 bellard
51 9b80ddf3 aliguori
52 9b80ddf3 aliguori
typedef struct {
53 9b80ddf3 aliguori
    uint32_t magic;
54 9b80ddf3 aliguori
    uint32_t len;
55 9b80ddf3 aliguori
} QCowExtension;
56 21d82ac9 Jeff Cody
57 7c80ab3f Jes Sorensen
#define  QCOW2_EXT_MAGIC_END 0
58 7c80ab3f Jes Sorensen
#define  QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA
59 cfcc4c62 Kevin Wolf
#define  QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
60 9b80ddf3 aliguori
61 7c80ab3f Jes Sorensen
static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
62 585f8587 bellard
{
63 585f8587 bellard
    const QCowHeader *cow_header = (const void *)buf;
64 3b46e624 ths
65 585f8587 bellard
    if (buf_size >= sizeof(QCowHeader) &&
66 585f8587 bellard
        be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
67 6744cbab Kevin Wolf
        be32_to_cpu(cow_header->version) >= 2)
68 585f8587 bellard
        return 100;
69 585f8587 bellard
    else
70 585f8587 bellard
        return 0;
71 585f8587 bellard
}
72 585f8587 bellard
73 9b80ddf3 aliguori
74 9b80ddf3 aliguori
/* 
75 9b80ddf3 aliguori
 * read qcow2 extension and fill bs
76 9b80ddf3 aliguori
 * start reading from start_offset
77 9b80ddf3 aliguori
 * finish reading upon magic of value 0 or when end_offset reached
78 9b80ddf3 aliguori
 * unknown magic is skipped (future extension this version knows nothing about)
79 9b80ddf3 aliguori
 * return 0 upon success, non-0 otherwise
80 9b80ddf3 aliguori
 */
81 7c80ab3f Jes Sorensen
static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
82 cfcc4c62 Kevin Wolf
                                 uint64_t end_offset, void **p_feature_table)
83 9b80ddf3 aliguori
{
84 75bab85c Kevin Wolf
    BDRVQcowState *s = bs->opaque;
85 9b80ddf3 aliguori
    QCowExtension ext;
86 9b80ddf3 aliguori
    uint64_t offset;
87 75bab85c Kevin Wolf
    int ret;
88 9b80ddf3 aliguori
89 9b80ddf3 aliguori
#ifdef DEBUG_EXT
90 7c80ab3f Jes Sorensen
    printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
91 9b80ddf3 aliguori
#endif
92 9b80ddf3 aliguori
    offset = start_offset;
93 9b80ddf3 aliguori
    while (offset < end_offset) {
94 9b80ddf3 aliguori
95 9b80ddf3 aliguori
#ifdef DEBUG_EXT
96 9b80ddf3 aliguori
        /* Sanity check */
97 9b80ddf3 aliguori
        if (offset > s->cluster_size)
98 7c80ab3f Jes Sorensen
            printf("qcow2_read_extension: suspicious offset %lu\n", offset);
99 9b80ddf3 aliguori
100 9b2260cb Dong Xu Wang
        printf("attempting to read extended header in offset %lu\n", offset);
101 9b80ddf3 aliguori
#endif
102 9b80ddf3 aliguori
103 66f82cee Kevin Wolf
        if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) {
104 7c80ab3f Jes Sorensen
            fprintf(stderr, "qcow2_read_extension: ERROR: "
105 0bfcd599 Blue Swirl
                    "pread fail from offset %" PRIu64 "\n",
106 0bfcd599 Blue Swirl
                    offset);
107 9b80ddf3 aliguori
            return 1;
108 9b80ddf3 aliguori
        }
109 9b80ddf3 aliguori
        be32_to_cpus(&ext.magic);
110 9b80ddf3 aliguori
        be32_to_cpus(&ext.len);
111 9b80ddf3 aliguori
        offset += sizeof(ext);
112 9b80ddf3 aliguori
#ifdef DEBUG_EXT
113 9b80ddf3 aliguori
        printf("ext.magic = 0x%x\n", ext.magic);
114 9b80ddf3 aliguori
#endif
115 64ca6aee Kevin Wolf
        if (ext.len > end_offset - offset) {
116 64ca6aee Kevin Wolf
            error_report("Header extension too large");
117 64ca6aee Kevin Wolf
            return -EINVAL;
118 64ca6aee Kevin Wolf
        }
119 64ca6aee Kevin Wolf
120 9b80ddf3 aliguori
        switch (ext.magic) {
121 7c80ab3f Jes Sorensen
        case QCOW2_EXT_MAGIC_END:
122 9b80ddf3 aliguori
            return 0;
123 f965509c aliguori
124 7c80ab3f Jes Sorensen
        case QCOW2_EXT_MAGIC_BACKING_FORMAT:
125 f965509c aliguori
            if (ext.len >= sizeof(bs->backing_format)) {
126 f965509c aliguori
                fprintf(stderr, "ERROR: ext_backing_format: len=%u too large"
127 4c978075 aliguori
                        " (>=%zu)\n",
128 f965509c aliguori
                        ext.len, sizeof(bs->backing_format));
129 f965509c aliguori
                return 2;
130 f965509c aliguori
            }
131 66f82cee Kevin Wolf
            if (bdrv_pread(bs->file, offset , bs->backing_format,
132 f965509c aliguori
                           ext.len) != ext.len)
133 f965509c aliguori
                return 3;
134 f965509c aliguori
            bs->backing_format[ext.len] = '\0';
135 f965509c aliguori
#ifdef DEBUG_EXT
136 f965509c aliguori
            printf("Qcow2: Got format extension %s\n", bs->backing_format);
137 f965509c aliguori
#endif
138 f965509c aliguori
            break;
139 f965509c aliguori
140 cfcc4c62 Kevin Wolf
        case QCOW2_EXT_MAGIC_FEATURE_TABLE:
141 cfcc4c62 Kevin Wolf
            if (p_feature_table != NULL) {
142 cfcc4c62 Kevin Wolf
                void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
143 cfcc4c62 Kevin Wolf
                ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
144 cfcc4c62 Kevin Wolf
                if (ret < 0) {
145 cfcc4c62 Kevin Wolf
                    return ret;
146 cfcc4c62 Kevin Wolf
                }
147 cfcc4c62 Kevin Wolf
148 cfcc4c62 Kevin Wolf
                *p_feature_table = feature_table;
149 cfcc4c62 Kevin Wolf
            }
150 cfcc4c62 Kevin Wolf
            break;
151 cfcc4c62 Kevin Wolf
152 9b80ddf3 aliguori
        default:
153 75bab85c Kevin Wolf
            /* unknown magic - save it in case we need to rewrite the header */
154 75bab85c Kevin Wolf
            {
155 75bab85c Kevin Wolf
                Qcow2UnknownHeaderExtension *uext;
156 75bab85c Kevin Wolf
157 75bab85c Kevin Wolf
                uext = g_malloc0(sizeof(*uext)  + ext.len);
158 75bab85c Kevin Wolf
                uext->magic = ext.magic;
159 75bab85c Kevin Wolf
                uext->len = ext.len;
160 75bab85c Kevin Wolf
                QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
161 75bab85c Kevin Wolf
162 75bab85c Kevin Wolf
                ret = bdrv_pread(bs->file, offset , uext->data, uext->len);
163 75bab85c Kevin Wolf
                if (ret < 0) {
164 75bab85c Kevin Wolf
                    return ret;
165 75bab85c Kevin Wolf
                }
166 75bab85c Kevin Wolf
            }
167 9b80ddf3 aliguori
            break;
168 9b80ddf3 aliguori
        }
169 fd29b4bb Kevin Wolf
170 fd29b4bb Kevin Wolf
        offset += ((ext.len + 7) & ~7);
171 9b80ddf3 aliguori
    }
172 9b80ddf3 aliguori
173 9b80ddf3 aliguori
    return 0;
174 9b80ddf3 aliguori
}
175 9b80ddf3 aliguori
176 75bab85c Kevin Wolf
static void cleanup_unknown_header_ext(BlockDriverState *bs)
177 75bab85c Kevin Wolf
{
178 75bab85c Kevin Wolf
    BDRVQcowState *s = bs->opaque;
179 75bab85c Kevin Wolf
    Qcow2UnknownHeaderExtension *uext, *next;
180 75bab85c Kevin Wolf
181 75bab85c Kevin Wolf
    QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) {
182 75bab85c Kevin Wolf
        QLIST_REMOVE(uext, next);
183 75bab85c Kevin Wolf
        g_free(uext);
184 75bab85c Kevin Wolf
    }
185 75bab85c Kevin Wolf
}
186 9b80ddf3 aliguori
187 b9531b6e Stefan Weil
static void GCC_FMT_ATTR(2, 3) report_unsupported(BlockDriverState *bs,
188 b9531b6e Stefan Weil
    const char *fmt, ...)
189 6744cbab Kevin Wolf
{
190 6744cbab Kevin Wolf
    char msg[64];
191 6744cbab Kevin Wolf
    va_list ap;
192 6744cbab Kevin Wolf
193 6744cbab Kevin Wolf
    va_start(ap, fmt);
194 6744cbab Kevin Wolf
    vsnprintf(msg, sizeof(msg), fmt, ap);
195 6744cbab Kevin Wolf
    va_end(ap);
196 6744cbab Kevin Wolf
197 6744cbab Kevin Wolf
    qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
198 6744cbab Kevin Wolf
        bs->device_name, "qcow2", msg);
199 6744cbab Kevin Wolf
}
200 6744cbab Kevin Wolf
201 cfcc4c62 Kevin Wolf
static void report_unsupported_feature(BlockDriverState *bs,
202 cfcc4c62 Kevin Wolf
    Qcow2Feature *table, uint64_t mask)
203 cfcc4c62 Kevin Wolf
{
204 cfcc4c62 Kevin Wolf
    while (table && table->name[0] != '\0') {
205 cfcc4c62 Kevin Wolf
        if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
206 cfcc4c62 Kevin Wolf
            if (mask & (1 << table->bit)) {
207 cfcc4c62 Kevin Wolf
                report_unsupported(bs, "%.46s",table->name);
208 cfcc4c62 Kevin Wolf
                mask &= ~(1 << table->bit);
209 cfcc4c62 Kevin Wolf
            }
210 cfcc4c62 Kevin Wolf
        }
211 cfcc4c62 Kevin Wolf
        table++;
212 cfcc4c62 Kevin Wolf
    }
213 cfcc4c62 Kevin Wolf
214 cfcc4c62 Kevin Wolf
    if (mask) {
215 cfcc4c62 Kevin Wolf
        report_unsupported(bs, "Unknown incompatible feature: %" PRIx64, mask);
216 cfcc4c62 Kevin Wolf
    }
217 cfcc4c62 Kevin Wolf
}
218 cfcc4c62 Kevin Wolf
219 c61d0004 Stefan Hajnoczi
/*
220 bfe8043e Stefan Hajnoczi
 * Sets the dirty bit and flushes afterwards if necessary.
221 bfe8043e Stefan Hajnoczi
 *
222 bfe8043e Stefan Hajnoczi
 * The incompatible_features bit is only set if the image file header was
223 bfe8043e Stefan Hajnoczi
 * updated successfully.  Therefore it is not required to check the return
224 bfe8043e Stefan Hajnoczi
 * value of this function.
225 bfe8043e Stefan Hajnoczi
 */
226 280d3735 Kevin Wolf
int qcow2_mark_dirty(BlockDriverState *bs)
227 bfe8043e Stefan Hajnoczi
{
228 bfe8043e Stefan Hajnoczi
    BDRVQcowState *s = bs->opaque;
229 bfe8043e Stefan Hajnoczi
    uint64_t val;
230 bfe8043e Stefan Hajnoczi
    int ret;
231 bfe8043e Stefan Hajnoczi
232 bfe8043e Stefan Hajnoczi
    assert(s->qcow_version >= 3);
233 bfe8043e Stefan Hajnoczi
234 bfe8043e Stefan Hajnoczi
    if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
235 bfe8043e Stefan Hajnoczi
        return 0; /* already dirty */
236 bfe8043e Stefan Hajnoczi
    }
237 bfe8043e Stefan Hajnoczi
238 bfe8043e Stefan Hajnoczi
    val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
239 bfe8043e Stefan Hajnoczi
    ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
240 bfe8043e Stefan Hajnoczi
                      &val, sizeof(val));
241 bfe8043e Stefan Hajnoczi
    if (ret < 0) {
242 bfe8043e Stefan Hajnoczi
        return ret;
243 bfe8043e Stefan Hajnoczi
    }
244 bfe8043e Stefan Hajnoczi
    ret = bdrv_flush(bs->file);
245 bfe8043e Stefan Hajnoczi
    if (ret < 0) {
246 bfe8043e Stefan Hajnoczi
        return ret;
247 bfe8043e Stefan Hajnoczi
    }
248 bfe8043e Stefan Hajnoczi
249 bfe8043e Stefan Hajnoczi
    /* Only treat image as dirty if the header was updated successfully */
250 bfe8043e Stefan Hajnoczi
    s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
251 bfe8043e Stefan Hajnoczi
    return 0;
252 bfe8043e Stefan Hajnoczi
}
253 bfe8043e Stefan Hajnoczi
254 bfe8043e Stefan Hajnoczi
/*
255 c61d0004 Stefan Hajnoczi
 * Clears the dirty bit and flushes before if necessary.  Only call this
256 c61d0004 Stefan Hajnoczi
 * function when there are no pending requests, it does not guard against
257 c61d0004 Stefan Hajnoczi
 * concurrent requests dirtying the image.
258 c61d0004 Stefan Hajnoczi
 */
259 c61d0004 Stefan Hajnoczi
static int qcow2_mark_clean(BlockDriverState *bs)
260 c61d0004 Stefan Hajnoczi
{
261 c61d0004 Stefan Hajnoczi
    BDRVQcowState *s = bs->opaque;
262 c61d0004 Stefan Hajnoczi
263 c61d0004 Stefan Hajnoczi
    if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
264 c61d0004 Stefan Hajnoczi
        int ret = bdrv_flush(bs);
265 c61d0004 Stefan Hajnoczi
        if (ret < 0) {
266 c61d0004 Stefan Hajnoczi
            return ret;
267 c61d0004 Stefan Hajnoczi
        }
268 c61d0004 Stefan Hajnoczi
269 c61d0004 Stefan Hajnoczi
        s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
270 c61d0004 Stefan Hajnoczi
        return qcow2_update_header(bs);
271 c61d0004 Stefan Hajnoczi
    }
272 c61d0004 Stefan Hajnoczi
    return 0;
273 c61d0004 Stefan Hajnoczi
}
274 c61d0004 Stefan Hajnoczi
275 69c98726 Max Reitz
/*
276 69c98726 Max Reitz
 * Marks the image as corrupt.
277 69c98726 Max Reitz
 */
278 69c98726 Max Reitz
int qcow2_mark_corrupt(BlockDriverState *bs)
279 69c98726 Max Reitz
{
280 69c98726 Max Reitz
    BDRVQcowState *s = bs->opaque;
281 69c98726 Max Reitz
282 69c98726 Max Reitz
    s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT;
283 69c98726 Max Reitz
    return qcow2_update_header(bs);
284 69c98726 Max Reitz
}
285 69c98726 Max Reitz
286 69c98726 Max Reitz
/*
287 69c98726 Max Reitz
 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
288 69c98726 Max Reitz
 * before if necessary.
289 69c98726 Max Reitz
 */
290 69c98726 Max Reitz
int qcow2_mark_consistent(BlockDriverState *bs)
291 69c98726 Max Reitz
{
292 69c98726 Max Reitz
    BDRVQcowState *s = bs->opaque;
293 69c98726 Max Reitz
294 69c98726 Max Reitz
    if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
295 69c98726 Max Reitz
        int ret = bdrv_flush(bs);
296 69c98726 Max Reitz
        if (ret < 0) {
297 69c98726 Max Reitz
            return ret;
298 69c98726 Max Reitz
        }
299 69c98726 Max Reitz
300 69c98726 Max Reitz
        s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT;
301 69c98726 Max Reitz
        return qcow2_update_header(bs);
302 69c98726 Max Reitz
    }
303 69c98726 Max Reitz
    return 0;
304 69c98726 Max Reitz
}
305 69c98726 Max Reitz
306 acbe5982 Stefan Hajnoczi
static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result,
307 acbe5982 Stefan Hajnoczi
                       BdrvCheckMode fix)
308 acbe5982 Stefan Hajnoczi
{
309 acbe5982 Stefan Hajnoczi
    int ret = qcow2_check_refcounts(bs, result, fix);
310 acbe5982 Stefan Hajnoczi
    if (ret < 0) {
311 acbe5982 Stefan Hajnoczi
        return ret;
312 acbe5982 Stefan Hajnoczi
    }
313 acbe5982 Stefan Hajnoczi
314 acbe5982 Stefan Hajnoczi
    if (fix && result->check_errors == 0 && result->corruptions == 0) {
315 24530f3e Max Reitz
        ret = qcow2_mark_clean(bs);
316 24530f3e Max Reitz
        if (ret < 0) {
317 24530f3e Max Reitz
            return ret;
318 24530f3e Max Reitz
        }
319 24530f3e Max Reitz
        return qcow2_mark_consistent(bs);
320 acbe5982 Stefan Hajnoczi
    }
321 acbe5982 Stefan Hajnoczi
    return ret;
322 acbe5982 Stefan Hajnoczi
}
323 acbe5982 Stefan Hajnoczi
324 74c4510a Kevin Wolf
static QemuOptsList qcow2_runtime_opts = {
325 74c4510a Kevin Wolf
    .name = "qcow2",
326 74c4510a Kevin Wolf
    .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
327 74c4510a Kevin Wolf
    .desc = {
328 74c4510a Kevin Wolf
        {
329 64aa99d3 Kevin Wolf
            .name = QCOW2_OPT_LAZY_REFCOUNTS,
330 74c4510a Kevin Wolf
            .type = QEMU_OPT_BOOL,
331 74c4510a Kevin Wolf
            .help = "Postpone refcount updates",
332 74c4510a Kevin Wolf
        },
333 67af674e Kevin Wolf
        {
334 67af674e Kevin Wolf
            .name = QCOW2_OPT_DISCARD_REQUEST,
335 67af674e Kevin Wolf
            .type = QEMU_OPT_BOOL,
336 67af674e Kevin Wolf
            .help = "Pass guest discard requests to the layer below",
337 67af674e Kevin Wolf
        },
338 67af674e Kevin Wolf
        {
339 67af674e Kevin Wolf
            .name = QCOW2_OPT_DISCARD_SNAPSHOT,
340 67af674e Kevin Wolf
            .type = QEMU_OPT_BOOL,
341 67af674e Kevin Wolf
            .help = "Generate discard requests when snapshot related space "
342 67af674e Kevin Wolf
                    "is freed",
343 67af674e Kevin Wolf
        },
344 67af674e Kevin Wolf
        {
345 67af674e Kevin Wolf
            .name = QCOW2_OPT_DISCARD_OTHER,
346 67af674e Kevin Wolf
            .type = QEMU_OPT_BOOL,
347 67af674e Kevin Wolf
            .help = "Generate discard requests when other clusters are freed",
348 67af674e Kevin Wolf
        },
349 74c4510a Kevin Wolf
        { /* end of list */ }
350 74c4510a Kevin Wolf
    },
351 74c4510a Kevin Wolf
};
352 74c4510a Kevin Wolf
353 015a1036 Max Reitz
static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
354 015a1036 Max Reitz
                      Error **errp)
355 585f8587 bellard
{
356 585f8587 bellard
    BDRVQcowState *s = bs->opaque;
357 6d85a57e Jes Sorensen
    int len, i, ret = 0;
358 585f8587 bellard
    QCowHeader header;
359 74c4510a Kevin Wolf
    QemuOpts *opts;
360 74c4510a Kevin Wolf
    Error *local_err = NULL;
361 9b80ddf3 aliguori
    uint64_t ext_end;
362 2cf7cfa1 Kevin Wolf
    uint64_t l1_vm_state_index;
363 585f8587 bellard
364 6d85a57e Jes Sorensen
    ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
365 6d85a57e Jes Sorensen
    if (ret < 0) {
366 585f8587 bellard
        goto fail;
367 6d85a57e Jes Sorensen
    }
368 585f8587 bellard
    be32_to_cpus(&header.magic);
369 585f8587 bellard
    be32_to_cpus(&header.version);
370 585f8587 bellard
    be64_to_cpus(&header.backing_file_offset);
371 585f8587 bellard
    be32_to_cpus(&header.backing_file_size);
372 585f8587 bellard
    be64_to_cpus(&header.size);
373 585f8587 bellard
    be32_to_cpus(&header.cluster_bits);
374 585f8587 bellard
    be32_to_cpus(&header.crypt_method);
375 585f8587 bellard
    be64_to_cpus(&header.l1_table_offset);
376 585f8587 bellard
    be32_to_cpus(&header.l1_size);
377 585f8587 bellard
    be64_to_cpus(&header.refcount_table_offset);
378 585f8587 bellard
    be32_to_cpus(&header.refcount_table_clusters);
379 585f8587 bellard
    be64_to_cpus(&header.snapshots_offset);
380 585f8587 bellard
    be32_to_cpus(&header.nb_snapshots);
381 3b46e624 ths
382 e8cdcec1 Kevin Wolf
    if (header.magic != QCOW_MAGIC) {
383 15bac0d5 Stefan Weil
        ret = -EMEDIUMTYPE;
384 585f8587 bellard
        goto fail;
385 6d85a57e Jes Sorensen
    }
386 6744cbab Kevin Wolf
    if (header.version < 2 || header.version > 3) {
387 6744cbab Kevin Wolf
        report_unsupported(bs, "QCOW version %d", header.version);
388 6744cbab Kevin Wolf
        ret = -ENOTSUP;
389 6744cbab Kevin Wolf
        goto fail;
390 6744cbab Kevin Wolf
    }
391 6744cbab Kevin Wolf
392 6744cbab Kevin Wolf
    s->qcow_version = header.version;
393 6744cbab Kevin Wolf
394 6744cbab Kevin Wolf
    /* Initialise version 3 header fields */
395 6744cbab Kevin Wolf
    if (header.version == 2) {
396 6744cbab Kevin Wolf
        header.incompatible_features    = 0;
397 6744cbab Kevin Wolf
        header.compatible_features      = 0;
398 6744cbab Kevin Wolf
        header.autoclear_features       = 0;
399 6744cbab Kevin Wolf
        header.refcount_order           = 4;
400 6744cbab Kevin Wolf
        header.header_length            = 72;
401 6744cbab Kevin Wolf
    } else {
402 6744cbab Kevin Wolf
        be64_to_cpus(&header.incompatible_features);
403 6744cbab Kevin Wolf
        be64_to_cpus(&header.compatible_features);
404 6744cbab Kevin Wolf
        be64_to_cpus(&header.autoclear_features);
405 6744cbab Kevin Wolf
        be32_to_cpus(&header.refcount_order);
406 6744cbab Kevin Wolf
        be32_to_cpus(&header.header_length);
407 6744cbab Kevin Wolf
    }
408 6744cbab Kevin Wolf
409 6744cbab Kevin Wolf
    if (header.header_length > sizeof(header)) {
410 6744cbab Kevin Wolf
        s->unknown_header_fields_size = header.header_length - sizeof(header);
411 6744cbab Kevin Wolf
        s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
412 6744cbab Kevin Wolf
        ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
413 6744cbab Kevin Wolf
                         s->unknown_header_fields_size);
414 6744cbab Kevin Wolf
        if (ret < 0) {
415 6744cbab Kevin Wolf
            goto fail;
416 6744cbab Kevin Wolf
        }
417 6744cbab Kevin Wolf
    }
418 6744cbab Kevin Wolf
419 cfcc4c62 Kevin Wolf
    if (header.backing_file_offset) {
420 cfcc4c62 Kevin Wolf
        ext_end = header.backing_file_offset;
421 cfcc4c62 Kevin Wolf
    } else {
422 cfcc4c62 Kevin Wolf
        ext_end = 1 << header.cluster_bits;
423 cfcc4c62 Kevin Wolf
    }
424 cfcc4c62 Kevin Wolf
425 6744cbab Kevin Wolf
    /* Handle feature bits */
426 6744cbab Kevin Wolf
    s->incompatible_features    = header.incompatible_features;
427 6744cbab Kevin Wolf
    s->compatible_features      = header.compatible_features;
428 6744cbab Kevin Wolf
    s->autoclear_features       = header.autoclear_features;
429 6744cbab Kevin Wolf
430 c61d0004 Stefan Hajnoczi
    if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
431 cfcc4c62 Kevin Wolf
        void *feature_table = NULL;
432 cfcc4c62 Kevin Wolf
        qcow2_read_extensions(bs, header.header_length, ext_end,
433 cfcc4c62 Kevin Wolf
                              &feature_table);
434 cfcc4c62 Kevin Wolf
        report_unsupported_feature(bs, feature_table,
435 c61d0004 Stefan Hajnoczi
                                   s->incompatible_features &
436 c61d0004 Stefan Hajnoczi
                                   ~QCOW2_INCOMPAT_MASK);
437 6744cbab Kevin Wolf
        ret = -ENOTSUP;
438 6744cbab Kevin Wolf
        goto fail;
439 6744cbab Kevin Wolf
    }
440 6744cbab Kevin Wolf
441 69c98726 Max Reitz
    if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
442 69c98726 Max Reitz
        /* Corrupt images may not be written to unless they are being repaired
443 69c98726 Max Reitz
         */
444 69c98726 Max Reitz
        if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
445 69c98726 Max Reitz
            error_report("qcow2: Image is corrupt; cannot be opened "
446 69c98726 Max Reitz
                    "read/write.");
447 69c98726 Max Reitz
            ret = -EACCES;
448 69c98726 Max Reitz
            goto fail;
449 69c98726 Max Reitz
        }
450 69c98726 Max Reitz
    }
451 69c98726 Max Reitz
452 6744cbab Kevin Wolf
    /* Check support for various header values */
453 6744cbab Kevin Wolf
    if (header.refcount_order != 4) {
454 6744cbab Kevin Wolf
        report_unsupported(bs, "%d bit reference counts",
455 6744cbab Kevin Wolf
                           1 << header.refcount_order);
456 e8cdcec1 Kevin Wolf
        ret = -ENOTSUP;
457 e8cdcec1 Kevin Wolf
        goto fail;
458 e8cdcec1 Kevin Wolf
    }
459 b6481f37 Max Reitz
    s->refcount_order = header.refcount_order;
460 6744cbab Kevin Wolf
461 d191d12d Stefan Weil
    if (header.cluster_bits < MIN_CLUSTER_BITS ||
462 6d85a57e Jes Sorensen
        header.cluster_bits > MAX_CLUSTER_BITS) {
463 6d85a57e Jes Sorensen
        ret = -EINVAL;
464 585f8587 bellard
        goto fail;
465 6d85a57e Jes Sorensen
    }
466 6d85a57e Jes Sorensen
    if (header.crypt_method > QCOW_CRYPT_AES) {
467 6d85a57e Jes Sorensen
        ret = -EINVAL;
468 585f8587 bellard
        goto fail;
469 6d85a57e Jes Sorensen
    }
470 585f8587 bellard
    s->crypt_method_header = header.crypt_method;
471 6d85a57e Jes Sorensen
    if (s->crypt_method_header) {
472 585f8587 bellard
        bs->encrypted = 1;
473 6d85a57e Jes Sorensen
    }
474 585f8587 bellard
    s->cluster_bits = header.cluster_bits;
475 585f8587 bellard
    s->cluster_size = 1 << s->cluster_bits;
476 585f8587 bellard
    s->cluster_sectors = 1 << (s->cluster_bits - 9);
477 585f8587 bellard
    s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
478 585f8587 bellard
    s->l2_size = 1 << s->l2_bits;
479 585f8587 bellard
    bs->total_sectors = header.size / 512;
480 585f8587 bellard
    s->csize_shift = (62 - (s->cluster_bits - 8));
481 585f8587 bellard
    s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
482 585f8587 bellard
    s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
483 585f8587 bellard
    s->refcount_table_offset = header.refcount_table_offset;
484 5fafdf24 ths
    s->refcount_table_size =
485 585f8587 bellard
        header.refcount_table_clusters << (s->cluster_bits - 3);
486 585f8587 bellard
487 585f8587 bellard
    s->snapshots_offset = header.snapshots_offset;
488 585f8587 bellard
    s->nb_snapshots = header.nb_snapshots;
489 585f8587 bellard
490 585f8587 bellard
    /* read the level 1 table */
491 585f8587 bellard
    s->l1_size = header.l1_size;
492 2cf7cfa1 Kevin Wolf
493 2cf7cfa1 Kevin Wolf
    l1_vm_state_index = size_to_l1(s, header.size);
494 2cf7cfa1 Kevin Wolf
    if (l1_vm_state_index > INT_MAX) {
495 2cf7cfa1 Kevin Wolf
        ret = -EFBIG;
496 2cf7cfa1 Kevin Wolf
        goto fail;
497 2cf7cfa1 Kevin Wolf
    }
498 2cf7cfa1 Kevin Wolf
    s->l1_vm_state_index = l1_vm_state_index;
499 2cf7cfa1 Kevin Wolf
500 585f8587 bellard
    /* the L1 table must contain at least enough entries to put
501 585f8587 bellard
       header.size bytes */
502 6d85a57e Jes Sorensen
    if (s->l1_size < s->l1_vm_state_index) {
503 6d85a57e Jes Sorensen
        ret = -EINVAL;
504 585f8587 bellard
        goto fail;
505 6d85a57e Jes Sorensen
    }
506 585f8587 bellard
    s->l1_table_offset = header.l1_table_offset;
507 d191d12d Stefan Weil
    if (s->l1_size > 0) {
508 7267c094 Anthony Liguori
        s->l1_table = g_malloc0(
509 d191d12d Stefan Weil
            align_offset(s->l1_size * sizeof(uint64_t), 512));
510 6d85a57e Jes Sorensen
        ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
511 6d85a57e Jes Sorensen
                         s->l1_size * sizeof(uint64_t));
512 6d85a57e Jes Sorensen
        if (ret < 0) {
513 d191d12d Stefan Weil
            goto fail;
514 6d85a57e Jes Sorensen
        }
515 d191d12d Stefan Weil
        for(i = 0;i < s->l1_size; i++) {
516 d191d12d Stefan Weil
            be64_to_cpus(&s->l1_table[i]);
517 d191d12d Stefan Weil
        }
518 585f8587 bellard
    }
519 29c1a730 Kevin Wolf
520 29c1a730 Kevin Wolf
    /* alloc L2 table/refcount block cache */
521 6af4e9ea Paolo Bonzini
    s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
522 6af4e9ea Paolo Bonzini
    s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
523 29c1a730 Kevin Wolf
524 7267c094 Anthony Liguori
    s->cluster_cache = g_malloc(s->cluster_size);
525 585f8587 bellard
    /* one more sector for decompressed data alignment */
526 dea43a65 Frediano Ziglio
    s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
527 095a9c58 aliguori
                                  + 512);
528 585f8587 bellard
    s->cluster_cache_offset = -1;
529 06d9260f Anthony Liguori
    s->flags = flags;
530 3b46e624 ths
531 6d85a57e Jes Sorensen
    ret = qcow2_refcount_init(bs);
532 6d85a57e Jes Sorensen
    if (ret != 0) {
533 585f8587 bellard
        goto fail;
534 6d85a57e Jes Sorensen
    }
535 585f8587 bellard
536 72cf2d4f Blue Swirl
    QLIST_INIT(&s->cluster_allocs);
537 0b919fae Kevin Wolf
    QTAILQ_INIT(&s->discards);
538 f214978a Kevin Wolf
539 9b80ddf3 aliguori
    /* read qcow2 extensions */
540 cfcc4c62 Kevin Wolf
    if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL)) {
541 6d85a57e Jes Sorensen
        ret = -EINVAL;
542 9b80ddf3 aliguori
        goto fail;
543 6d85a57e Jes Sorensen
    }
544 9b80ddf3 aliguori
545 585f8587 bellard
    /* read the backing file name */
546 585f8587 bellard
    if (header.backing_file_offset != 0) {
547 585f8587 bellard
        len = header.backing_file_size;
548 6d85a57e Jes Sorensen
        if (len > 1023) {
549 585f8587 bellard
            len = 1023;
550 6d85a57e Jes Sorensen
        }
551 6d85a57e Jes Sorensen
        ret = bdrv_pread(bs->file, header.backing_file_offset,
552 6d85a57e Jes Sorensen
                         bs->backing_file, len);
553 6d85a57e Jes Sorensen
        if (ret < 0) {
554 585f8587 bellard
            goto fail;
555 6d85a57e Jes Sorensen
        }
556 585f8587 bellard
        bs->backing_file[len] = '\0';
557 585f8587 bellard
    }
558 42deb29f Kevin Wolf
559 42deb29f Kevin Wolf
    ret = qcow2_read_snapshots(bs);
560 42deb29f Kevin Wolf
    if (ret < 0) {
561 585f8587 bellard
        goto fail;
562 6d85a57e Jes Sorensen
    }
563 585f8587 bellard
564 af7b708d Stefan Hajnoczi
    /* Clear unknown autoclear feature bits */
565 af7b708d Stefan Hajnoczi
    if (!bs->read_only && s->autoclear_features != 0) {
566 af7b708d Stefan Hajnoczi
        s->autoclear_features = 0;
567 af7b708d Stefan Hajnoczi
        ret = qcow2_update_header(bs);
568 af7b708d Stefan Hajnoczi
        if (ret < 0) {
569 af7b708d Stefan Hajnoczi
            goto fail;
570 af7b708d Stefan Hajnoczi
        }
571 af7b708d Stefan Hajnoczi
    }
572 af7b708d Stefan Hajnoczi
573 68d100e9 Kevin Wolf
    /* Initialise locks */
574 68d100e9 Kevin Wolf
    qemu_co_mutex_init(&s->lock);
575 68d100e9 Kevin Wolf
576 c61d0004 Stefan Hajnoczi
    /* Repair image if dirty */
577 058f8f16 Stefan Hajnoczi
    if (!(flags & BDRV_O_CHECK) && !bs->read_only &&
578 058f8f16 Stefan Hajnoczi
        (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
579 c61d0004 Stefan Hajnoczi
        BdrvCheckResult result = {0};
580 c61d0004 Stefan Hajnoczi
581 acbe5982 Stefan Hajnoczi
        ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
582 c61d0004 Stefan Hajnoczi
        if (ret < 0) {
583 c61d0004 Stefan Hajnoczi
            goto fail;
584 c61d0004 Stefan Hajnoczi
        }
585 c61d0004 Stefan Hajnoczi
    }
586 c61d0004 Stefan Hajnoczi
587 74c4510a Kevin Wolf
    /* Enable lazy_refcounts according to image and command line options */
588 74c4510a Kevin Wolf
    opts = qemu_opts_create_nofail(&qcow2_runtime_opts);
589 74c4510a Kevin Wolf
    qemu_opts_absorb_qdict(opts, options, &local_err);
590 74c4510a Kevin Wolf
    if (error_is_set(&local_err)) {
591 74c4510a Kevin Wolf
        qerror_report_err(local_err);
592 74c4510a Kevin Wolf
        error_free(local_err);
593 74c4510a Kevin Wolf
        ret = -EINVAL;
594 74c4510a Kevin Wolf
        goto fail;
595 74c4510a Kevin Wolf
    }
596 74c4510a Kevin Wolf
597 acdfb480 Kevin Wolf
    s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
598 74c4510a Kevin Wolf
        (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
599 74c4510a Kevin Wolf
600 67af674e Kevin Wolf
    s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
601 67af674e Kevin Wolf
    s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
602 67af674e Kevin Wolf
    s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
603 67af674e Kevin Wolf
        qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
604 67af674e Kevin Wolf
                          flags & BDRV_O_UNMAP);
605 67af674e Kevin Wolf
    s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
606 67af674e Kevin Wolf
        qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
607 67af674e Kevin Wolf
    s->discard_passthrough[QCOW2_DISCARD_OTHER] =
608 67af674e Kevin Wolf
        qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
609 67af674e Kevin Wolf
610 74c4510a Kevin Wolf
    qemu_opts_del(opts);
611 74c4510a Kevin Wolf
612 74c4510a Kevin Wolf
    if (s->use_lazy_refcounts && s->qcow_version < 3) {
613 74c4510a Kevin Wolf
        qerror_report(ERROR_CLASS_GENERIC_ERROR, "Lazy refcounts require "
614 74c4510a Kevin Wolf
            "a qcow2 image with at least qemu 1.1 compatibility level");
615 74c4510a Kevin Wolf
        ret = -EINVAL;
616 74c4510a Kevin Wolf
        goto fail;
617 74c4510a Kevin Wolf
    }
618 74c4510a Kevin Wolf
619 585f8587 bellard
#ifdef DEBUG_ALLOC
620 6cbc3031 Philipp Hahn
    {
621 6cbc3031 Philipp Hahn
        BdrvCheckResult result = {0};
622 b35278f7 Stefan Hajnoczi
        qcow2_check_refcounts(bs, &result, 0);
623 6cbc3031 Philipp Hahn
    }
624 585f8587 bellard
#endif
625 6d85a57e Jes Sorensen
    return ret;
626 585f8587 bellard
627 585f8587 bellard
 fail:
628 6744cbab Kevin Wolf
    g_free(s->unknown_header_fields);
629 75bab85c Kevin Wolf
    cleanup_unknown_header_ext(bs);
630 ed6ccf0f Kevin Wolf
    qcow2_free_snapshots(bs);
631 ed6ccf0f Kevin Wolf
    qcow2_refcount_close(bs);
632 7267c094 Anthony Liguori
    g_free(s->l1_table);
633 cf93980e Max Reitz
    /* else pre-write overlap checks in cache_destroy may crash */
634 cf93980e Max Reitz
    s->l1_table = NULL;
635 29c1a730 Kevin Wolf
    if (s->l2_table_cache) {
636 29c1a730 Kevin Wolf
        qcow2_cache_destroy(bs, s->l2_table_cache);
637 29c1a730 Kevin Wolf
    }
638 7267c094 Anthony Liguori
    g_free(s->cluster_cache);
639 dea43a65 Frediano Ziglio
    qemu_vfree(s->cluster_data);
640 6d85a57e Jes Sorensen
    return ret;
641 585f8587 bellard
}
642 585f8587 bellard
643 7c80ab3f Jes Sorensen
static int qcow2_set_key(BlockDriverState *bs, const char *key)
644 585f8587 bellard
{
645 585f8587 bellard
    BDRVQcowState *s = bs->opaque;
646 585f8587 bellard
    uint8_t keybuf[16];
647 585f8587 bellard
    int len, i;
648 3b46e624 ths
649 585f8587 bellard
    memset(keybuf, 0, 16);
650 585f8587 bellard
    len = strlen(key);
651 585f8587 bellard
    if (len > 16)
652 585f8587 bellard
        len = 16;
653 585f8587 bellard
    /* XXX: we could compress the chars to 7 bits to increase
654 585f8587 bellard
       entropy */
655 585f8587 bellard
    for(i = 0;i < len;i++) {
656 585f8587 bellard
        keybuf[i] = key[i];
657 585f8587 bellard
    }
658 585f8587 bellard
    s->crypt_method = s->crypt_method_header;
659 585f8587 bellard
660 585f8587 bellard
    if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
661 585f8587 bellard
        return -1;
662 585f8587 bellard
    if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
663 585f8587 bellard
        return -1;
664 585f8587 bellard
#if 0
665 585f8587 bellard
    /* test */
666 585f8587 bellard
    {
667 585f8587 bellard
        uint8_t in[16];
668 585f8587 bellard
        uint8_t out[16];
669 585f8587 bellard
        uint8_t tmp[16];
670 585f8587 bellard
        for(i=0;i<16;i++)
671 585f8587 bellard
            in[i] = i;
672 585f8587 bellard
        AES_encrypt(in, tmp, &s->aes_encrypt_key);
673 585f8587 bellard
        AES_decrypt(tmp, out, &s->aes_decrypt_key);
674 585f8587 bellard
        for(i = 0; i < 16; i++)
675 585f8587 bellard
            printf(" %02x", tmp[i]);
676 585f8587 bellard
        printf("\n");
677 585f8587 bellard
        for(i = 0; i < 16; i++)
678 585f8587 bellard
            printf(" %02x", out[i]);
679 585f8587 bellard
        printf("\n");
680 585f8587 bellard
    }
681 585f8587 bellard
#endif
682 585f8587 bellard
    return 0;
683 585f8587 bellard
}
684 585f8587 bellard
685 21d82ac9 Jeff Cody
/* We have nothing to do for QCOW2 reopen, stubs just return
686 21d82ac9 Jeff Cody
 * success */
687 21d82ac9 Jeff Cody
static int qcow2_reopen_prepare(BDRVReopenState *state,
688 21d82ac9 Jeff Cody
                                BlockReopenQueue *queue, Error **errp)
689 21d82ac9 Jeff Cody
{
690 21d82ac9 Jeff Cody
    return 0;
691 21d82ac9 Jeff Cody
}
692 21d82ac9 Jeff Cody
693 b6b8a333 Paolo Bonzini
static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs,
694 f8a2e5e3 Stefan Hajnoczi
        int64_t sector_num, int nb_sectors, int *pnum)
695 585f8587 bellard
{
696 f8a2e5e3 Stefan Hajnoczi
    BDRVQcowState *s = bs->opaque;
697 585f8587 bellard
    uint64_t cluster_offset;
698 4bc74be9 Paolo Bonzini
    int index_in_cluster, ret;
699 4bc74be9 Paolo Bonzini
    int64_t status = 0;
700 585f8587 bellard
701 095a9c58 aliguori
    *pnum = nb_sectors;
702 f8a2e5e3 Stefan Hajnoczi
    qemu_co_mutex_lock(&s->lock);
703 1c46efaa Kevin Wolf
    ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset);
704 f8a2e5e3 Stefan Hajnoczi
    qemu_co_mutex_unlock(&s->lock);
705 1c46efaa Kevin Wolf
    if (ret < 0) {
706 d663640c Paolo Bonzini
        return ret;
707 1c46efaa Kevin Wolf
    }
708 095a9c58 aliguori
709 4bc74be9 Paolo Bonzini
    if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED &&
710 4bc74be9 Paolo Bonzini
        !s->crypt_method) {
711 4bc74be9 Paolo Bonzini
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
712 4bc74be9 Paolo Bonzini
        cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS);
713 4bc74be9 Paolo Bonzini
        status |= BDRV_BLOCK_OFFSET_VALID | cluster_offset;
714 4bc74be9 Paolo Bonzini
    }
715 4bc74be9 Paolo Bonzini
    if (ret == QCOW2_CLUSTER_ZERO) {
716 4bc74be9 Paolo Bonzini
        status |= BDRV_BLOCK_ZERO;
717 4bc74be9 Paolo Bonzini
    } else if (ret != QCOW2_CLUSTER_UNALLOCATED) {
718 4bc74be9 Paolo Bonzini
        status |= BDRV_BLOCK_DATA;
719 4bc74be9 Paolo Bonzini
    }
720 4bc74be9 Paolo Bonzini
    return status;
721 585f8587 bellard
}
722 585f8587 bellard
723 a9465922 bellard
/* handle reading after the end of the backing file */
724 bd28f835 Kevin Wolf
int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
725 bd28f835 Kevin Wolf
                  int64_t sector_num, int nb_sectors)
726 a9465922 bellard
{
727 a9465922 bellard
    int n1;
728 a9465922 bellard
    if ((sector_num + nb_sectors) <= bs->total_sectors)
729 a9465922 bellard
        return nb_sectors;
730 a9465922 bellard
    if (sector_num >= bs->total_sectors)
731 a9465922 bellard
        n1 = 0;
732 a9465922 bellard
    else
733 a9465922 bellard
        n1 = bs->total_sectors - sector_num;
734 bd28f835 Kevin Wolf
735 3d9b4925 Michael Tokarev
    qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1));
736 bd28f835 Kevin Wolf
737 a9465922 bellard
    return n1;
738 a9465922 bellard
}
739 a9465922 bellard
740 a968168c Dong Xu Wang
static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
741 3fc48d09 Frediano Ziglio
                          int remaining_sectors, QEMUIOVector *qiov)
742 585f8587 bellard
{
743 585f8587 bellard
    BDRVQcowState *s = bs->opaque;
744 a9465922 bellard
    int index_in_cluster, n1;
745 68d100e9 Kevin Wolf
    int ret;
746 faf575c1 Frediano Ziglio
    int cur_nr_sectors; /* number of sectors in current iteration */
747 c2bdd990 Frediano Ziglio
    uint64_t cluster_offset = 0;
748 3fc48d09 Frediano Ziglio
    uint64_t bytes_done = 0;
749 3fc48d09 Frediano Ziglio
    QEMUIOVector hd_qiov;
750 3fc48d09 Frediano Ziglio
    uint8_t *cluster_data = NULL;
751 585f8587 bellard
752 3fc48d09 Frediano Ziglio
    qemu_iovec_init(&hd_qiov, qiov->niov);
753 3fc48d09 Frediano Ziglio
754 3fc48d09 Frediano Ziglio
    qemu_co_mutex_lock(&s->lock);
755 3fc48d09 Frediano Ziglio
756 3fc48d09 Frediano Ziglio
    while (remaining_sectors != 0) {
757 bd28f835 Kevin Wolf
758 5ebaa27e Frediano Ziglio
        /* prepare next request */
759 3fc48d09 Frediano Ziglio
        cur_nr_sectors = remaining_sectors;
760 5ebaa27e Frediano Ziglio
        if (s->crypt_method) {
761 5ebaa27e Frediano Ziglio
            cur_nr_sectors = MIN(cur_nr_sectors,
762 5ebaa27e Frediano Ziglio
                QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
763 585f8587 bellard
        }
764 5ebaa27e Frediano Ziglio
765 3fc48d09 Frediano Ziglio
        ret = qcow2_get_cluster_offset(bs, sector_num << 9,
766 5ebaa27e Frediano Ziglio
            &cur_nr_sectors, &cluster_offset);
767 8af36488 Kevin Wolf
        if (ret < 0) {
768 3fc48d09 Frediano Ziglio
            goto fail;
769 8af36488 Kevin Wolf
        }
770 bd28f835 Kevin Wolf
771 3fc48d09 Frediano Ziglio
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
772 c87c0672 aliguori
773 3fc48d09 Frediano Ziglio
        qemu_iovec_reset(&hd_qiov);
774 1b093c48 Michael Tokarev
        qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
775 5ebaa27e Frediano Ziglio
            cur_nr_sectors * 512);
776 5ebaa27e Frediano Ziglio
777 68d000a3 Kevin Wolf
        switch (ret) {
778 68d000a3 Kevin Wolf
        case QCOW2_CLUSTER_UNALLOCATED:
779 5ebaa27e Frediano Ziglio
780 5ebaa27e Frediano Ziglio
            if (bs->backing_hd) {
781 5ebaa27e Frediano Ziglio
                /* read from the base image */
782 3fc48d09 Frediano Ziglio
                n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov,
783 3fc48d09 Frediano Ziglio
                    sector_num, cur_nr_sectors);
784 5ebaa27e Frediano Ziglio
                if (n1 > 0) {
785 5ebaa27e Frediano Ziglio
                    BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
786 5ebaa27e Frediano Ziglio
                    qemu_co_mutex_unlock(&s->lock);
787 3fc48d09 Frediano Ziglio
                    ret = bdrv_co_readv(bs->backing_hd, sector_num,
788 3fc48d09 Frediano Ziglio
                                        n1, &hd_qiov);
789 5ebaa27e Frediano Ziglio
                    qemu_co_mutex_lock(&s->lock);
790 5ebaa27e Frediano Ziglio
                    if (ret < 0) {
791 3fc48d09 Frediano Ziglio
                        goto fail;
792 5ebaa27e Frediano Ziglio
                    }
793 5ebaa27e Frediano Ziglio
                }
794 5ebaa27e Frediano Ziglio
            } else {
795 5ebaa27e Frediano Ziglio
                /* Note: in this case, no need to wait */
796 3d9b4925 Michael Tokarev
                qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors);
797 5ebaa27e Frediano Ziglio
            }
798 68d000a3 Kevin Wolf
            break;
799 68d000a3 Kevin Wolf
800 6377af48 Kevin Wolf
        case QCOW2_CLUSTER_ZERO:
801 3d9b4925 Michael Tokarev
            qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors);
802 6377af48 Kevin Wolf
            break;
803 6377af48 Kevin Wolf
804 68d000a3 Kevin Wolf
        case QCOW2_CLUSTER_COMPRESSED:
805 5ebaa27e Frediano Ziglio
            /* add AIO support for compressed blocks ? */
806 5ebaa27e Frediano Ziglio
            ret = qcow2_decompress_cluster(bs, cluster_offset);
807 5ebaa27e Frediano Ziglio
            if (ret < 0) {
808 3fc48d09 Frediano Ziglio
                goto fail;
809 bd28f835 Kevin Wolf
            }
810 bd28f835 Kevin Wolf
811 03396148 Michael Tokarev
            qemu_iovec_from_buf(&hd_qiov, 0,
812 5ebaa27e Frediano Ziglio
                s->cluster_cache + index_in_cluster * 512,
813 faf575c1 Frediano Ziglio
                512 * cur_nr_sectors);
814 68d000a3 Kevin Wolf
            break;
815 68d000a3 Kevin Wolf
816 68d000a3 Kevin Wolf
        case QCOW2_CLUSTER_NORMAL:
817 5ebaa27e Frediano Ziglio
            if ((cluster_offset & 511) != 0) {
818 3fc48d09 Frediano Ziglio
                ret = -EIO;
819 3fc48d09 Frediano Ziglio
                goto fail;
820 5ebaa27e Frediano Ziglio
            }
821 bd28f835 Kevin Wolf
822 5ebaa27e Frediano Ziglio
            if (s->crypt_method) {
823 5ebaa27e Frediano Ziglio
                /*
824 5ebaa27e Frediano Ziglio
                 * For encrypted images, read everything into a temporary
825 5ebaa27e Frediano Ziglio
                 * contiguous buffer on which the AES functions can work.
826 5ebaa27e Frediano Ziglio
                 */
827 3fc48d09 Frediano Ziglio
                if (!cluster_data) {
828 3fc48d09 Frediano Ziglio
                    cluster_data =
829 dea43a65 Frediano Ziglio
                        qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
830 5ebaa27e Frediano Ziglio
                }
831 5ebaa27e Frediano Ziglio
832 5ebaa27e Frediano Ziglio
                assert(cur_nr_sectors <=
833 5ebaa27e Frediano Ziglio
                    QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
834 3fc48d09 Frediano Ziglio
                qemu_iovec_reset(&hd_qiov);
835 3fc48d09 Frediano Ziglio
                qemu_iovec_add(&hd_qiov, cluster_data,
836 5ebaa27e Frediano Ziglio
                    512 * cur_nr_sectors);
837 5ebaa27e Frediano Ziglio
            }
838 5ebaa27e Frediano Ziglio
839 5ebaa27e Frediano Ziglio
            BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
840 5ebaa27e Frediano Ziglio
            qemu_co_mutex_unlock(&s->lock);
841 5ebaa27e Frediano Ziglio
            ret = bdrv_co_readv(bs->file,
842 5ebaa27e Frediano Ziglio
                                (cluster_offset >> 9) + index_in_cluster,
843 3fc48d09 Frediano Ziglio
                                cur_nr_sectors, &hd_qiov);
844 5ebaa27e Frediano Ziglio
            qemu_co_mutex_lock(&s->lock);
845 5ebaa27e Frediano Ziglio
            if (ret < 0) {
846 3fc48d09 Frediano Ziglio
                goto fail;
847 5ebaa27e Frediano Ziglio
            }
848 5ebaa27e Frediano Ziglio
            if (s->crypt_method) {
849 3fc48d09 Frediano Ziglio
                qcow2_encrypt_sectors(s, sector_num,  cluster_data,
850 3fc48d09 Frediano Ziglio
                    cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key);
851 03396148 Michael Tokarev
                qemu_iovec_from_buf(qiov, bytes_done,
852 03396148 Michael Tokarev
                    cluster_data, 512 * cur_nr_sectors);
853 5ebaa27e Frediano Ziglio
            }
854 68d000a3 Kevin Wolf
            break;
855 68d000a3 Kevin Wolf
856 68d000a3 Kevin Wolf
        default:
857 68d000a3 Kevin Wolf
            g_assert_not_reached();
858 68d000a3 Kevin Wolf
            ret = -EIO;
859 68d000a3 Kevin Wolf
            goto fail;
860 faf575c1 Frediano Ziglio
        }
861 f141eafe aliguori
862 3fc48d09 Frediano Ziglio
        remaining_sectors -= cur_nr_sectors;
863 3fc48d09 Frediano Ziglio
        sector_num += cur_nr_sectors;
864 3fc48d09 Frediano Ziglio
        bytes_done += cur_nr_sectors * 512;
865 5ebaa27e Frediano Ziglio
    }
866 3fc48d09 Frediano Ziglio
    ret = 0;
867 faf575c1 Frediano Ziglio
868 3fc48d09 Frediano Ziglio
fail:
869 68d100e9 Kevin Wolf
    qemu_co_mutex_unlock(&s->lock);
870 42496d62 Kevin Wolf
871 3fc48d09 Frediano Ziglio
    qemu_iovec_destroy(&hd_qiov);
872 dea43a65 Frediano Ziglio
    qemu_vfree(cluster_data);
873 68d100e9 Kevin Wolf
874 68d100e9 Kevin Wolf
    return ret;
875 585f8587 bellard
}
876 585f8587 bellard
877 a968168c Dong Xu Wang
static coroutine_fn int qcow2_co_writev(BlockDriverState *bs,
878 3fc48d09 Frediano Ziglio
                           int64_t sector_num,
879 3fc48d09 Frediano Ziglio
                           int remaining_sectors,
880 3fc48d09 Frediano Ziglio
                           QEMUIOVector *qiov)
881 585f8587 bellard
{
882 585f8587 bellard
    BDRVQcowState *s = bs->opaque;
883 585f8587 bellard
    int index_in_cluster;
884 095a9c58 aliguori
    int n_end;
885 68d100e9 Kevin Wolf
    int ret;
886 faf575c1 Frediano Ziglio
    int cur_nr_sectors; /* number of sectors in current iteration */
887 c2bdd990 Frediano Ziglio
    uint64_t cluster_offset;
888 3fc48d09 Frediano Ziglio
    QEMUIOVector hd_qiov;
889 3fc48d09 Frediano Ziglio
    uint64_t bytes_done = 0;
890 3fc48d09 Frediano Ziglio
    uint8_t *cluster_data = NULL;
891 8d2497c3 Kevin Wolf
    QCowL2Meta *l2meta = NULL;
892 c2271403 Frediano Ziglio
893 3cce16f4 Kevin Wolf
    trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num,
894 3cce16f4 Kevin Wolf
                                 remaining_sectors);
895 3cce16f4 Kevin Wolf
896 3fc48d09 Frediano Ziglio
    qemu_iovec_init(&hd_qiov, qiov->niov);
897 3fc48d09 Frediano Ziglio
898 3fc48d09 Frediano Ziglio
    s->cluster_cache_offset = -1; /* disable compressed cache */
899 3b46e624 ths
900 3fc48d09 Frediano Ziglio
    qemu_co_mutex_lock(&s->lock);
901 3fc48d09 Frediano Ziglio
902 3fc48d09 Frediano Ziglio
    while (remaining_sectors != 0) {
903 3fc48d09 Frediano Ziglio
904 f50f88b9 Kevin Wolf
        l2meta = NULL;
905 cf5c1a23 Kevin Wolf
906 3cce16f4 Kevin Wolf
        trace_qcow2_writev_start_part(qemu_coroutine_self());
907 3fc48d09 Frediano Ziglio
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
908 3fc48d09 Frediano Ziglio
        n_end = index_in_cluster + remaining_sectors;
909 5ebaa27e Frediano Ziglio
        if (s->crypt_method &&
910 5ebaa27e Frediano Ziglio
            n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) {
911 5ebaa27e Frediano Ziglio
            n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
912 5ebaa27e Frediano Ziglio
        }
913 095a9c58 aliguori
914 3fc48d09 Frediano Ziglio
        ret = qcow2_alloc_cluster_offset(bs, sector_num << 9,
915 f50f88b9 Kevin Wolf
            index_in_cluster, n_end, &cur_nr_sectors, &cluster_offset, &l2meta);
916 5ebaa27e Frediano Ziglio
        if (ret < 0) {
917 3fc48d09 Frediano Ziglio
            goto fail;
918 5ebaa27e Frediano Ziglio
        }
919 148da7ea Kevin Wolf
920 5ebaa27e Frediano Ziglio
        assert((cluster_offset & 511) == 0);
921 148da7ea Kevin Wolf
922 3fc48d09 Frediano Ziglio
        qemu_iovec_reset(&hd_qiov);
923 1b093c48 Michael Tokarev
        qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
924 5ebaa27e Frediano Ziglio
            cur_nr_sectors * 512);
925 6f5f060b Kevin Wolf
926 5ebaa27e Frediano Ziglio
        if (s->crypt_method) {
927 3fc48d09 Frediano Ziglio
            if (!cluster_data) {
928 dea43a65 Frediano Ziglio
                cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS *
929 5ebaa27e Frediano Ziglio
                                                 s->cluster_size);
930 5ebaa27e Frediano Ziglio
            }
931 6f5f060b Kevin Wolf
932 3fc48d09 Frediano Ziglio
            assert(hd_qiov.size <=
933 5ebaa27e Frediano Ziglio
                   QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
934 d5e6b161 Michael Tokarev
            qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size);
935 6f5f060b Kevin Wolf
936 3fc48d09 Frediano Ziglio
            qcow2_encrypt_sectors(s, sector_num, cluster_data,
937 3fc48d09 Frediano Ziglio
                cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key);
938 6f5f060b Kevin Wolf
939 3fc48d09 Frediano Ziglio
            qemu_iovec_reset(&hd_qiov);
940 3fc48d09 Frediano Ziglio
            qemu_iovec_add(&hd_qiov, cluster_data,
941 5ebaa27e Frediano Ziglio
                cur_nr_sectors * 512);
942 5ebaa27e Frediano Ziglio
        }
943 6f5f060b Kevin Wolf
944 cf93980e Max Reitz
        ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
945 cf93980e Max Reitz
                cluster_offset + index_in_cluster * BDRV_SECTOR_SIZE,
946 cf93980e Max Reitz
                cur_nr_sectors * BDRV_SECTOR_SIZE);
947 cf93980e Max Reitz
        if (ret < 0) {
948 cf93980e Max Reitz
            goto fail;
949 cf93980e Max Reitz
        }
950 cf93980e Max Reitz
951 5ebaa27e Frediano Ziglio
        qemu_co_mutex_unlock(&s->lock);
952 67a7a0eb Kevin Wolf
        BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
953 3cce16f4 Kevin Wolf
        trace_qcow2_writev_data(qemu_coroutine_self(),
954 3cce16f4 Kevin Wolf
                                (cluster_offset >> 9) + index_in_cluster);
955 5ebaa27e Frediano Ziglio
        ret = bdrv_co_writev(bs->file,
956 5ebaa27e Frediano Ziglio
                             (cluster_offset >> 9) + index_in_cluster,
957 3fc48d09 Frediano Ziglio
                             cur_nr_sectors, &hd_qiov);
958 5ebaa27e Frediano Ziglio
        qemu_co_mutex_lock(&s->lock);
959 5ebaa27e Frediano Ziglio
        if (ret < 0) {
960 3fc48d09 Frediano Ziglio
            goto fail;
961 5ebaa27e Frediano Ziglio
        }
962 f141eafe aliguori
963 88c6588c Kevin Wolf
        while (l2meta != NULL) {
964 88c6588c Kevin Wolf
            QCowL2Meta *next;
965 88c6588c Kevin Wolf
966 f50f88b9 Kevin Wolf
            ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
967 f50f88b9 Kevin Wolf
            if (ret < 0) {
968 f50f88b9 Kevin Wolf
                goto fail;
969 f50f88b9 Kevin Wolf
            }
970 faf575c1 Frediano Ziglio
971 4e95314e Kevin Wolf
            /* Take the request off the list of running requests */
972 4e95314e Kevin Wolf
            if (l2meta->nb_clusters != 0) {
973 4e95314e Kevin Wolf
                QLIST_REMOVE(l2meta, next_in_flight);
974 4e95314e Kevin Wolf
            }
975 4e95314e Kevin Wolf
976 4e95314e Kevin Wolf
            qemu_co_queue_restart_all(&l2meta->dependent_requests);
977 4e95314e Kevin Wolf
978 88c6588c Kevin Wolf
            next = l2meta->next;
979 f50f88b9 Kevin Wolf
            g_free(l2meta);
980 88c6588c Kevin Wolf
            l2meta = next;
981 f50f88b9 Kevin Wolf
        }
982 0fa9131a Kevin Wolf
983 3fc48d09 Frediano Ziglio
        remaining_sectors -= cur_nr_sectors;
984 3fc48d09 Frediano Ziglio
        sector_num += cur_nr_sectors;
985 3fc48d09 Frediano Ziglio
        bytes_done += cur_nr_sectors * 512;
986 3cce16f4 Kevin Wolf
        trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors);
987 5ebaa27e Frediano Ziglio
    }
988 3fc48d09 Frediano Ziglio
    ret = 0;
989 faf575c1 Frediano Ziglio
990 3fc48d09 Frediano Ziglio
fail:
991 4e95314e Kevin Wolf
    qemu_co_mutex_unlock(&s->lock);
992 4e95314e Kevin Wolf
993 88c6588c Kevin Wolf
    while (l2meta != NULL) {
994 88c6588c Kevin Wolf
        QCowL2Meta *next;
995 88c6588c Kevin Wolf
996 4e95314e Kevin Wolf
        if (l2meta->nb_clusters != 0) {
997 4e95314e Kevin Wolf
            QLIST_REMOVE(l2meta, next_in_flight);
998 4e95314e Kevin Wolf
        }
999 4e95314e Kevin Wolf
        qemu_co_queue_restart_all(&l2meta->dependent_requests);
1000 88c6588c Kevin Wolf
1001 88c6588c Kevin Wolf
        next = l2meta->next;
1002 cf5c1a23 Kevin Wolf
        g_free(l2meta);
1003 88c6588c Kevin Wolf
        l2meta = next;
1004 cf5c1a23 Kevin Wolf
    }
1005 0fa9131a Kevin Wolf
1006 3fc48d09 Frediano Ziglio
    qemu_iovec_destroy(&hd_qiov);
1007 dea43a65 Frediano Ziglio
    qemu_vfree(cluster_data);
1008 3cce16f4 Kevin Wolf
    trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
1009 42496d62 Kevin Wolf
1010 68d100e9 Kevin Wolf
    return ret;
1011 585f8587 bellard
}
1012 585f8587 bellard
1013 7c80ab3f Jes Sorensen
static void qcow2_close(BlockDriverState *bs)
1014 585f8587 bellard
{
1015 585f8587 bellard
    BDRVQcowState *s = bs->opaque;
1016 7267c094 Anthony Liguori
    g_free(s->l1_table);
1017 cf93980e Max Reitz
    /* else pre-write overlap checks in cache_destroy may crash */
1018 cf93980e Max Reitz
    s->l1_table = NULL;
1019 29c1a730 Kevin Wolf
1020 29c1a730 Kevin Wolf
    qcow2_cache_flush(bs, s->l2_table_cache);
1021 29c1a730 Kevin Wolf
    qcow2_cache_flush(bs, s->refcount_block_cache);
1022 29c1a730 Kevin Wolf
1023 c61d0004 Stefan Hajnoczi
    qcow2_mark_clean(bs);
1024 c61d0004 Stefan Hajnoczi
1025 29c1a730 Kevin Wolf
    qcow2_cache_destroy(bs, s->l2_table_cache);
1026 29c1a730 Kevin Wolf
    qcow2_cache_destroy(bs, s->refcount_block_cache);
1027 29c1a730 Kevin Wolf
1028 6744cbab Kevin Wolf
    g_free(s->unknown_header_fields);
1029 75bab85c Kevin Wolf
    cleanup_unknown_header_ext(bs);
1030 6744cbab Kevin Wolf
1031 7267c094 Anthony Liguori
    g_free(s->cluster_cache);
1032 dea43a65 Frediano Ziglio
    qemu_vfree(s->cluster_data);
1033 ed6ccf0f Kevin Wolf
    qcow2_refcount_close(bs);
1034 28c1202b Li Zhi Hui
    qcow2_free_snapshots(bs);
1035 585f8587 bellard
}
1036 585f8587 bellard
1037 06d9260f Anthony Liguori
static void qcow2_invalidate_cache(BlockDriverState *bs)
1038 06d9260f Anthony Liguori
{
1039 06d9260f Anthony Liguori
    BDRVQcowState *s = bs->opaque;
1040 06d9260f Anthony Liguori
    int flags = s->flags;
1041 06d9260f Anthony Liguori
    AES_KEY aes_encrypt_key;
1042 06d9260f Anthony Liguori
    AES_KEY aes_decrypt_key;
1043 06d9260f Anthony Liguori
    uint32_t crypt_method = 0;
1044 acdfb480 Kevin Wolf
    QDict *options;
1045 06d9260f Anthony Liguori
1046 06d9260f Anthony Liguori
    /*
1047 06d9260f Anthony Liguori
     * Backing files are read-only which makes all of their metadata immutable,
1048 06d9260f Anthony Liguori
     * that means we don't have to worry about reopening them here.
1049 06d9260f Anthony Liguori
     */
1050 06d9260f Anthony Liguori
1051 06d9260f Anthony Liguori
    if (s->crypt_method) {
1052 06d9260f Anthony Liguori
        crypt_method = s->crypt_method;
1053 06d9260f Anthony Liguori
        memcpy(&aes_encrypt_key, &s->aes_encrypt_key, sizeof(aes_encrypt_key));
1054 06d9260f Anthony Liguori
        memcpy(&aes_decrypt_key, &s->aes_decrypt_key, sizeof(aes_decrypt_key));
1055 06d9260f Anthony Liguori
    }
1056 06d9260f Anthony Liguori
1057 06d9260f Anthony Liguori
    qcow2_close(bs);
1058 06d9260f Anthony Liguori
1059 acdfb480 Kevin Wolf
    options = qdict_new();
1060 acdfb480 Kevin Wolf
    qdict_put(options, QCOW2_OPT_LAZY_REFCOUNTS,
1061 acdfb480 Kevin Wolf
              qbool_from_int(s->use_lazy_refcounts));
1062 acdfb480 Kevin Wolf
1063 06d9260f Anthony Liguori
    memset(s, 0, sizeof(BDRVQcowState));
1064 015a1036 Max Reitz
    qcow2_open(bs, options, flags, NULL);
1065 acdfb480 Kevin Wolf
1066 acdfb480 Kevin Wolf
    QDECREF(options);
1067 06d9260f Anthony Liguori
1068 06d9260f Anthony Liguori
    if (crypt_method) {
1069 06d9260f Anthony Liguori
        s->crypt_method = crypt_method;
1070 06d9260f Anthony Liguori
        memcpy(&s->aes_encrypt_key, &aes_encrypt_key, sizeof(aes_encrypt_key));
1071 06d9260f Anthony Liguori
        memcpy(&s->aes_decrypt_key, &aes_decrypt_key, sizeof(aes_decrypt_key));
1072 06d9260f Anthony Liguori
    }
1073 06d9260f Anthony Liguori
}
1074 06d9260f Anthony Liguori
1075 e24e49e6 Kevin Wolf
static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
1076 e24e49e6 Kevin Wolf
    size_t len, size_t buflen)
1077 e24e49e6 Kevin Wolf
{
1078 e24e49e6 Kevin Wolf
    QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
1079 e24e49e6 Kevin Wolf
    size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
1080 e24e49e6 Kevin Wolf
1081 e24e49e6 Kevin Wolf
    if (buflen < ext_len) {
1082 e24e49e6 Kevin Wolf
        return -ENOSPC;
1083 e24e49e6 Kevin Wolf
    }
1084 e24e49e6 Kevin Wolf
1085 e24e49e6 Kevin Wolf
    *ext_backing_fmt = (QCowExtension) {
1086 e24e49e6 Kevin Wolf
        .magic  = cpu_to_be32(magic),
1087 e24e49e6 Kevin Wolf
        .len    = cpu_to_be32(len),
1088 e24e49e6 Kevin Wolf
    };
1089 e24e49e6 Kevin Wolf
    memcpy(buf + sizeof(QCowExtension), s, len);
1090 e24e49e6 Kevin Wolf
1091 e24e49e6 Kevin Wolf
    return ext_len;
1092 e24e49e6 Kevin Wolf
}
1093 e24e49e6 Kevin Wolf
1094 756e6736 Kevin Wolf
/*
1095 e24e49e6 Kevin Wolf
 * Updates the qcow2 header, including the variable length parts of it, i.e.
1096 e24e49e6 Kevin Wolf
 * the backing file name and all extensions. qcow2 was not designed to allow
1097 e24e49e6 Kevin Wolf
 * such changes, so if we run out of space (we can only use the first cluster)
1098 e24e49e6 Kevin Wolf
 * this function may fail.
1099 756e6736 Kevin Wolf
 *
1100 756e6736 Kevin Wolf
 * Returns 0 on success, -errno in error cases.
1101 756e6736 Kevin Wolf
 */
1102 e24e49e6 Kevin Wolf
int qcow2_update_header(BlockDriverState *bs)
1103 756e6736 Kevin Wolf
{
1104 756e6736 Kevin Wolf
    BDRVQcowState *s = bs->opaque;
1105 e24e49e6 Kevin Wolf
    QCowHeader *header;
1106 e24e49e6 Kevin Wolf
    char *buf;
1107 e24e49e6 Kevin Wolf
    size_t buflen = s->cluster_size;
1108 756e6736 Kevin Wolf
    int ret;
1109 e24e49e6 Kevin Wolf
    uint64_t total_size;
1110 e24e49e6 Kevin Wolf
    uint32_t refcount_table_clusters;
1111 6744cbab Kevin Wolf
    size_t header_length;
1112 75bab85c Kevin Wolf
    Qcow2UnknownHeaderExtension *uext;
1113 756e6736 Kevin Wolf
1114 e24e49e6 Kevin Wolf
    buf = qemu_blockalign(bs, buflen);
1115 756e6736 Kevin Wolf
1116 e24e49e6 Kevin Wolf
    /* Header structure */
1117 e24e49e6 Kevin Wolf
    header = (QCowHeader*) buf;
1118 756e6736 Kevin Wolf
1119 e24e49e6 Kevin Wolf
    if (buflen < sizeof(*header)) {
1120 e24e49e6 Kevin Wolf
        ret = -ENOSPC;
1121 e24e49e6 Kevin Wolf
        goto fail;
1122 756e6736 Kevin Wolf
    }
1123 756e6736 Kevin Wolf
1124 6744cbab Kevin Wolf
    header_length = sizeof(*header) + s->unknown_header_fields_size;
1125 e24e49e6 Kevin Wolf
    total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
1126 e24e49e6 Kevin Wolf
    refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
1127 e24e49e6 Kevin Wolf
1128 e24e49e6 Kevin Wolf
    *header = (QCowHeader) {
1129 6744cbab Kevin Wolf
        /* Version 2 fields */
1130 e24e49e6 Kevin Wolf
        .magic                  = cpu_to_be32(QCOW_MAGIC),
1131 6744cbab Kevin Wolf
        .version                = cpu_to_be32(s->qcow_version),
1132 e24e49e6 Kevin Wolf
        .backing_file_offset    = 0,
1133 e24e49e6 Kevin Wolf
        .backing_file_size      = 0,
1134 e24e49e6 Kevin Wolf
        .cluster_bits           = cpu_to_be32(s->cluster_bits),
1135 e24e49e6 Kevin Wolf
        .size                   = cpu_to_be64(total_size),
1136 e24e49e6 Kevin Wolf
        .crypt_method           = cpu_to_be32(s->crypt_method_header),
1137 e24e49e6 Kevin Wolf
        .l1_size                = cpu_to_be32(s->l1_size),
1138 e24e49e6 Kevin Wolf
        .l1_table_offset        = cpu_to_be64(s->l1_table_offset),
1139 e24e49e6 Kevin Wolf
        .refcount_table_offset  = cpu_to_be64(s->refcount_table_offset),
1140 e24e49e6 Kevin Wolf
        .refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
1141 e24e49e6 Kevin Wolf
        .nb_snapshots           = cpu_to_be32(s->nb_snapshots),
1142 e24e49e6 Kevin Wolf
        .snapshots_offset       = cpu_to_be64(s->snapshots_offset),
1143 6744cbab Kevin Wolf
1144 6744cbab Kevin Wolf
        /* Version 3 fields */
1145 6744cbab Kevin Wolf
        .incompatible_features  = cpu_to_be64(s->incompatible_features),
1146 6744cbab Kevin Wolf
        .compatible_features    = cpu_to_be64(s->compatible_features),
1147 6744cbab Kevin Wolf
        .autoclear_features     = cpu_to_be64(s->autoclear_features),
1148 b6481f37 Max Reitz
        .refcount_order         = cpu_to_be32(s->refcount_order),
1149 6744cbab Kevin Wolf
        .header_length          = cpu_to_be32(header_length),
1150 e24e49e6 Kevin Wolf
    };
1151 756e6736 Kevin Wolf
1152 6744cbab Kevin Wolf
    /* For older versions, write a shorter header */
1153 6744cbab Kevin Wolf
    switch (s->qcow_version) {
1154 6744cbab Kevin Wolf
    case 2:
1155 6744cbab Kevin Wolf
        ret = offsetof(QCowHeader, incompatible_features);
1156 6744cbab Kevin Wolf
        break;
1157 6744cbab Kevin Wolf
    case 3:
1158 6744cbab Kevin Wolf
        ret = sizeof(*header);
1159 6744cbab Kevin Wolf
        break;
1160 6744cbab Kevin Wolf
    default:
1161 b6c14762 Jim Meyering
        ret = -EINVAL;
1162 b6c14762 Jim Meyering
        goto fail;
1163 6744cbab Kevin Wolf
    }
1164 6744cbab Kevin Wolf
1165 6744cbab Kevin Wolf
    buf += ret;
1166 6744cbab Kevin Wolf
    buflen -= ret;
1167 6744cbab Kevin Wolf
    memset(buf, 0, buflen);
1168 6744cbab Kevin Wolf
1169 6744cbab Kevin Wolf
    /* Preserve any unknown field in the header */
1170 6744cbab Kevin Wolf
    if (s->unknown_header_fields_size) {
1171 6744cbab Kevin Wolf
        if (buflen < s->unknown_header_fields_size) {
1172 6744cbab Kevin Wolf
            ret = -ENOSPC;
1173 6744cbab Kevin Wolf
            goto fail;
1174 6744cbab Kevin Wolf
        }
1175 6744cbab Kevin Wolf
1176 6744cbab Kevin Wolf
        memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
1177 6744cbab Kevin Wolf
        buf += s->unknown_header_fields_size;
1178 6744cbab Kevin Wolf
        buflen -= s->unknown_header_fields_size;
1179 6744cbab Kevin Wolf
    }
1180 756e6736 Kevin Wolf
1181 e24e49e6 Kevin Wolf
    /* Backing file format header extension */
1182 e24e49e6 Kevin Wolf
    if (*bs->backing_format) {
1183 e24e49e6 Kevin Wolf
        ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
1184 e24e49e6 Kevin Wolf
                             bs->backing_format, strlen(bs->backing_format),
1185 e24e49e6 Kevin Wolf
                             buflen);
1186 e24e49e6 Kevin Wolf
        if (ret < 0) {
1187 e24e49e6 Kevin Wolf
            goto fail;
1188 756e6736 Kevin Wolf
        }
1189 756e6736 Kevin Wolf
1190 e24e49e6 Kevin Wolf
        buf += ret;
1191 e24e49e6 Kevin Wolf
        buflen -= ret;
1192 756e6736 Kevin Wolf
    }
1193 756e6736 Kevin Wolf
1194 cfcc4c62 Kevin Wolf
    /* Feature table */
1195 cfcc4c62 Kevin Wolf
    Qcow2Feature features[] = {
1196 c61d0004 Stefan Hajnoczi
        {
1197 c61d0004 Stefan Hajnoczi
            .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
1198 c61d0004 Stefan Hajnoczi
            .bit  = QCOW2_INCOMPAT_DIRTY_BITNR,
1199 c61d0004 Stefan Hajnoczi
            .name = "dirty bit",
1200 c61d0004 Stefan Hajnoczi
        },
1201 bfe8043e Stefan Hajnoczi
        {
1202 69c98726 Max Reitz
            .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
1203 69c98726 Max Reitz
            .bit  = QCOW2_INCOMPAT_CORRUPT_BITNR,
1204 69c98726 Max Reitz
            .name = "corrupt bit",
1205 69c98726 Max Reitz
        },
1206 69c98726 Max Reitz
        {
1207 bfe8043e Stefan Hajnoczi
            .type = QCOW2_FEAT_TYPE_COMPATIBLE,
1208 bfe8043e Stefan Hajnoczi
            .bit  = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
1209 bfe8043e Stefan Hajnoczi
            .name = "lazy refcounts",
1210 bfe8043e Stefan Hajnoczi
        },
1211 cfcc4c62 Kevin Wolf
    };
1212 cfcc4c62 Kevin Wolf
1213 cfcc4c62 Kevin Wolf
    ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
1214 cfcc4c62 Kevin Wolf
                         features, sizeof(features), buflen);
1215 cfcc4c62 Kevin Wolf
    if (ret < 0) {
1216 cfcc4c62 Kevin Wolf
        goto fail;
1217 cfcc4c62 Kevin Wolf
    }
1218 cfcc4c62 Kevin Wolf
    buf += ret;
1219 cfcc4c62 Kevin Wolf
    buflen -= ret;
1220 cfcc4c62 Kevin Wolf
1221 75bab85c Kevin Wolf
    /* Keep unknown header extensions */
1222 75bab85c Kevin Wolf
    QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
1223 75bab85c Kevin Wolf
        ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
1224 75bab85c Kevin Wolf
        if (ret < 0) {
1225 75bab85c Kevin Wolf
            goto fail;
1226 75bab85c Kevin Wolf
        }
1227 75bab85c Kevin Wolf
1228 75bab85c Kevin Wolf
        buf += ret;
1229 75bab85c Kevin Wolf
        buflen -= ret;
1230 75bab85c Kevin Wolf
    }
1231 75bab85c Kevin Wolf
1232 e24e49e6 Kevin Wolf
    /* End of header extensions */
1233 e24e49e6 Kevin Wolf
    ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
1234 756e6736 Kevin Wolf
    if (ret < 0) {
1235 756e6736 Kevin Wolf
        goto fail;
1236 756e6736 Kevin Wolf
    }
1237 756e6736 Kevin Wolf
1238 e24e49e6 Kevin Wolf
    buf += ret;
1239 e24e49e6 Kevin Wolf
    buflen -= ret;
1240 756e6736 Kevin Wolf
1241 e24e49e6 Kevin Wolf
    /* Backing file name */
1242 e24e49e6 Kevin Wolf
    if (*bs->backing_file) {
1243 e24e49e6 Kevin Wolf
        size_t backing_file_len = strlen(bs->backing_file);
1244 e24e49e6 Kevin Wolf
1245 e24e49e6 Kevin Wolf
        if (buflen < backing_file_len) {
1246 e24e49e6 Kevin Wolf
            ret = -ENOSPC;
1247 e24e49e6 Kevin Wolf
            goto fail;
1248 e24e49e6 Kevin Wolf
        }
1249 e24e49e6 Kevin Wolf
1250 00ea1881 Jim Meyering
        /* Using strncpy is ok here, since buf is not NUL-terminated. */
1251 e24e49e6 Kevin Wolf
        strncpy(buf, bs->backing_file, buflen);
1252 e24e49e6 Kevin Wolf
1253 e24e49e6 Kevin Wolf
        header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
1254 e24e49e6 Kevin Wolf
        header->backing_file_size   = cpu_to_be32(backing_file_len);
1255 756e6736 Kevin Wolf
    }
1256 756e6736 Kevin Wolf
1257 e24e49e6 Kevin Wolf
    /* Write the new header */
1258 e24e49e6 Kevin Wolf
    ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
1259 756e6736 Kevin Wolf
    if (ret < 0) {
1260 756e6736 Kevin Wolf
        goto fail;
1261 756e6736 Kevin Wolf
    }
1262 756e6736 Kevin Wolf
1263 756e6736 Kevin Wolf
    ret = 0;
1264 756e6736 Kevin Wolf
fail:
1265 e24e49e6 Kevin Wolf
    qemu_vfree(header);
1266 756e6736 Kevin Wolf
    return ret;
1267 756e6736 Kevin Wolf
}
1268 756e6736 Kevin Wolf
1269 756e6736 Kevin Wolf
static int qcow2_change_backing_file(BlockDriverState *bs,
1270 756e6736 Kevin Wolf
    const char *backing_file, const char *backing_fmt)
1271 756e6736 Kevin Wolf
{
1272 e24e49e6 Kevin Wolf
    pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
1273 e24e49e6 Kevin Wolf
    pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
1274 e24e49e6 Kevin Wolf
1275 e24e49e6 Kevin Wolf
    return qcow2_update_header(bs);
1276 756e6736 Kevin Wolf
}
1277 756e6736 Kevin Wolf
1278 a35e1c17 Kevin Wolf
static int preallocate(BlockDriverState *bs)
1279 a35e1c17 Kevin Wolf
{
1280 a35e1c17 Kevin Wolf
    uint64_t nb_sectors;
1281 a35e1c17 Kevin Wolf
    uint64_t offset;
1282 060bee89 Kevin Wolf
    uint64_t host_offset = 0;
1283 a35e1c17 Kevin Wolf
    int num;
1284 148da7ea Kevin Wolf
    int ret;
1285 f50f88b9 Kevin Wolf
    QCowL2Meta *meta;
1286 a35e1c17 Kevin Wolf
1287 a35e1c17 Kevin Wolf
    nb_sectors = bdrv_getlength(bs) >> 9;
1288 a35e1c17 Kevin Wolf
    offset = 0;
1289 a35e1c17 Kevin Wolf
1290 a35e1c17 Kevin Wolf
    while (nb_sectors) {
1291 a35e1c17 Kevin Wolf
        num = MIN(nb_sectors, INT_MAX >> 9);
1292 060bee89 Kevin Wolf
        ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num,
1293 060bee89 Kevin Wolf
                                         &host_offset, &meta);
1294 148da7ea Kevin Wolf
        if (ret < 0) {
1295 19dbcbf7 Kevin Wolf
            return ret;
1296 a35e1c17 Kevin Wolf
        }
1297 a35e1c17 Kevin Wolf
1298 f50f88b9 Kevin Wolf
        ret = qcow2_alloc_cluster_link_l2(bs, meta);
1299 19dbcbf7 Kevin Wolf
        if (ret < 0) {
1300 6cfcb9b8 Kevin Wolf
            qcow2_free_any_clusters(bs, meta->alloc_offset, meta->nb_clusters,
1301 6cfcb9b8 Kevin Wolf
                                    QCOW2_DISCARD_NEVER);
1302 19dbcbf7 Kevin Wolf
            return ret;
1303 a35e1c17 Kevin Wolf
        }
1304 a35e1c17 Kevin Wolf
1305 f214978a Kevin Wolf
        /* There are no dependent requests, but we need to remove our request
1306 f214978a Kevin Wolf
         * from the list of in-flight requests */
1307 f50f88b9 Kevin Wolf
        if (meta != NULL) {
1308 4e95314e Kevin Wolf
            QLIST_REMOVE(meta, next_in_flight);
1309 f50f88b9 Kevin Wolf
        }
1310 f214978a Kevin Wolf
1311 a35e1c17 Kevin Wolf
        /* TODO Preallocate data if requested */
1312 a35e1c17 Kevin Wolf
1313 a35e1c17 Kevin Wolf
        nb_sectors -= num;
1314 a35e1c17 Kevin Wolf
        offset += num << 9;
1315 a35e1c17 Kevin Wolf
    }
1316 a35e1c17 Kevin Wolf
1317 a35e1c17 Kevin Wolf
    /*
1318 a35e1c17 Kevin Wolf
     * It is expected that the image file is large enough to actually contain
1319 a35e1c17 Kevin Wolf
     * all of the allocated clusters (otherwise we get failing reads after
1320 a35e1c17 Kevin Wolf
     * EOF). Extend the image to the last allocated sector.
1321 a35e1c17 Kevin Wolf
     */
1322 060bee89 Kevin Wolf
    if (host_offset != 0) {
1323 ea80b906 Kevin Wolf
        uint8_t buf[512];
1324 ea80b906 Kevin Wolf
        memset(buf, 0, 512);
1325 060bee89 Kevin Wolf
        ret = bdrv_write(bs->file, (host_offset >> 9) + num - 1, buf, 1);
1326 19dbcbf7 Kevin Wolf
        if (ret < 0) {
1327 19dbcbf7 Kevin Wolf
            return ret;
1328 19dbcbf7 Kevin Wolf
        }
1329 a35e1c17 Kevin Wolf
    }
1330 a35e1c17 Kevin Wolf
1331 a35e1c17 Kevin Wolf
    return 0;
1332 a35e1c17 Kevin Wolf
}
1333 a35e1c17 Kevin Wolf
1334 7c80ab3f Jes Sorensen
static int qcow2_create2(const char *filename, int64_t total_size,
1335 7c80ab3f Jes Sorensen
                         const char *backing_file, const char *backing_format,
1336 7c80ab3f Jes Sorensen
                         int flags, size_t cluster_size, int prealloc,
1337 6744cbab Kevin Wolf
                         QEMUOptionParameter *options, int version)
1338 a9420734 Kevin Wolf
{
1339 9b2260cb Dong Xu Wang
    /* Calculate cluster_bits */
1340 a9420734 Kevin Wolf
    int cluster_bits;
1341 a9420734 Kevin Wolf
    cluster_bits = ffs(cluster_size) - 1;
1342 a9420734 Kevin Wolf
    if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
1343 a9420734 Kevin Wolf
        (1 << cluster_bits) != cluster_size)
1344 a9420734 Kevin Wolf
    {
1345 a9420734 Kevin Wolf
        error_report(
1346 6daf194d Markus Armbruster
            "Cluster size must be a power of two between %d and %dk",
1347 a9420734 Kevin Wolf
            1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
1348 a9420734 Kevin Wolf
        return -EINVAL;
1349 a9420734 Kevin Wolf
    }
1350 a9420734 Kevin Wolf
1351 a9420734 Kevin Wolf
    /*
1352 a9420734 Kevin Wolf
     * Open the image file and write a minimal qcow2 header.
1353 a9420734 Kevin Wolf
     *
1354 a9420734 Kevin Wolf
     * We keep things simple and start with a zero-sized image. We also
1355 a9420734 Kevin Wolf
     * do without refcount blocks or a L1 table for now. We'll fix the
1356 a9420734 Kevin Wolf
     * inconsistency later.
1357 a9420734 Kevin Wolf
     *
1358 a9420734 Kevin Wolf
     * We do need a refcount table because growing the refcount table means
1359 a9420734 Kevin Wolf
     * allocating two new refcount blocks - the seconds of which would be at
1360 a9420734 Kevin Wolf
     * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
1361 a9420734 Kevin Wolf
     * size for any qcow2 image.
1362 a9420734 Kevin Wolf
     */
1363 a9420734 Kevin Wolf
    BlockDriverState* bs;
1364 a9420734 Kevin Wolf
    QCowHeader header;
1365 a9420734 Kevin Wolf
    uint8_t* refcount_table;
1366 a9420734 Kevin Wolf
    int ret;
1367 a9420734 Kevin Wolf
1368 cc84d90f Max Reitz
    ret = bdrv_create_file(filename, options, NULL);
1369 a9420734 Kevin Wolf
    if (ret < 0) {
1370 a9420734 Kevin Wolf
        return ret;
1371 a9420734 Kevin Wolf
    }
1372 a9420734 Kevin Wolf
1373 34b5d2c6 Max Reitz
    ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR, NULL);
1374 a9420734 Kevin Wolf
    if (ret < 0) {
1375 a9420734 Kevin Wolf
        return ret;
1376 a9420734 Kevin Wolf
    }
1377 a9420734 Kevin Wolf
1378 a9420734 Kevin Wolf
    /* Write the header */
1379 a9420734 Kevin Wolf
    memset(&header, 0, sizeof(header));
1380 a9420734 Kevin Wolf
    header.magic = cpu_to_be32(QCOW_MAGIC);
1381 6744cbab Kevin Wolf
    header.version = cpu_to_be32(version);
1382 a9420734 Kevin Wolf
    header.cluster_bits = cpu_to_be32(cluster_bits);
1383 a9420734 Kevin Wolf
    header.size = cpu_to_be64(0);
1384 a9420734 Kevin Wolf
    header.l1_table_offset = cpu_to_be64(0);
1385 a9420734 Kevin Wolf
    header.l1_size = cpu_to_be32(0);
1386 a9420734 Kevin Wolf
    header.refcount_table_offset = cpu_to_be64(cluster_size);
1387 a9420734 Kevin Wolf
    header.refcount_table_clusters = cpu_to_be32(1);
1388 6744cbab Kevin Wolf
    header.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT);
1389 6744cbab Kevin Wolf
    header.header_length = cpu_to_be32(sizeof(header));
1390 a9420734 Kevin Wolf
1391 a9420734 Kevin Wolf
    if (flags & BLOCK_FLAG_ENCRYPT) {
1392 a9420734 Kevin Wolf
        header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
1393 a9420734 Kevin Wolf
    } else {
1394 a9420734 Kevin Wolf
        header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
1395 a9420734 Kevin Wolf
    }
1396 a9420734 Kevin Wolf
1397 bfe8043e Stefan Hajnoczi
    if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) {
1398 bfe8043e Stefan Hajnoczi
        header.compatible_features |=
1399 bfe8043e Stefan Hajnoczi
            cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
1400 bfe8043e Stefan Hajnoczi
    }
1401 bfe8043e Stefan Hajnoczi
1402 a9420734 Kevin Wolf
    ret = bdrv_pwrite(bs, 0, &header, sizeof(header));
1403 a9420734 Kevin Wolf
    if (ret < 0) {
1404 a9420734 Kevin Wolf
        goto out;
1405 a9420734 Kevin Wolf
    }
1406 a9420734 Kevin Wolf
1407 a9420734 Kevin Wolf
    /* Write an empty refcount table */
1408 7267c094 Anthony Liguori
    refcount_table = g_malloc0(cluster_size);
1409 a9420734 Kevin Wolf
    ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size);
1410 7267c094 Anthony Liguori
    g_free(refcount_table);
1411 a9420734 Kevin Wolf
1412 a9420734 Kevin Wolf
    if (ret < 0) {
1413 a9420734 Kevin Wolf
        goto out;
1414 a9420734 Kevin Wolf
    }
1415 a9420734 Kevin Wolf
1416 a9420734 Kevin Wolf
    bdrv_close(bs);
1417 a9420734 Kevin Wolf
1418 a9420734 Kevin Wolf
    /*
1419 a9420734 Kevin Wolf
     * And now open the image and make it consistent first (i.e. increase the
1420 a9420734 Kevin Wolf
     * refcount of the cluster that is occupied by the header and the refcount
1421 a9420734 Kevin Wolf
     * table)
1422 a9420734 Kevin Wolf
     */
1423 a9420734 Kevin Wolf
    BlockDriver* drv = bdrv_find_format("qcow2");
1424 a9420734 Kevin Wolf
    assert(drv != NULL);
1425 de9c0cec Kevin Wolf
    ret = bdrv_open(bs, filename, NULL,
1426 34b5d2c6 Max Reitz
        BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv, NULL);
1427 a9420734 Kevin Wolf
    if (ret < 0) {
1428 a9420734 Kevin Wolf
        goto out;
1429 a9420734 Kevin Wolf
    }
1430 a9420734 Kevin Wolf
1431 a9420734 Kevin Wolf
    ret = qcow2_alloc_clusters(bs, 2 * cluster_size);
1432 a9420734 Kevin Wolf
    if (ret < 0) {
1433 a9420734 Kevin Wolf
        goto out;
1434 a9420734 Kevin Wolf
1435 a9420734 Kevin Wolf
    } else if (ret != 0) {
1436 a9420734 Kevin Wolf
        error_report("Huh, first cluster in empty image is already in use?");
1437 a9420734 Kevin Wolf
        abort();
1438 a9420734 Kevin Wolf
    }
1439 a9420734 Kevin Wolf
1440 a9420734 Kevin Wolf
    /* Okay, now that we have a valid image, let's give it the right size */
1441 a9420734 Kevin Wolf
    ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE);
1442 a9420734 Kevin Wolf
    if (ret < 0) {
1443 a9420734 Kevin Wolf
        goto out;
1444 a9420734 Kevin Wolf
    }
1445 a9420734 Kevin Wolf
1446 a9420734 Kevin Wolf
    /* Want a backing file? There you go.*/
1447 a9420734 Kevin Wolf
    if (backing_file) {
1448 a9420734 Kevin Wolf
        ret = bdrv_change_backing_file(bs, backing_file, backing_format);
1449 a9420734 Kevin Wolf
        if (ret < 0) {
1450 a9420734 Kevin Wolf
            goto out;
1451 a9420734 Kevin Wolf
        }
1452 a9420734 Kevin Wolf
    }
1453 a9420734 Kevin Wolf
1454 a9420734 Kevin Wolf
    /* And if we're supposed to preallocate metadata, do that now */
1455 a9420734 Kevin Wolf
    if (prealloc) {
1456 15552c4a Zhi Yong Wu
        BDRVQcowState *s = bs->opaque;
1457 15552c4a Zhi Yong Wu
        qemu_co_mutex_lock(&s->lock);
1458 a9420734 Kevin Wolf
        ret = preallocate(bs);
1459 15552c4a Zhi Yong Wu
        qemu_co_mutex_unlock(&s->lock);
1460 a9420734 Kevin Wolf
        if (ret < 0) {
1461 a9420734 Kevin Wolf
            goto out;
1462 a9420734 Kevin Wolf
        }
1463 a9420734 Kevin Wolf
    }
1464 a9420734 Kevin Wolf
1465 a9420734 Kevin Wolf
    ret = 0;
1466 a9420734 Kevin Wolf
out:
1467 4f6fd349 Fam Zheng
    bdrv_unref(bs);
1468 a9420734 Kevin Wolf
    return ret;
1469 a9420734 Kevin Wolf
}
1470 de5f3f40 Kevin Wolf
1471 d5124c00 Max Reitz
static int qcow2_create(const char *filename, QEMUOptionParameter *options,
1472 d5124c00 Max Reitz
                        Error **errp)
1473 de5f3f40 Kevin Wolf
{
1474 de5f3f40 Kevin Wolf
    const char *backing_file = NULL;
1475 de5f3f40 Kevin Wolf
    const char *backing_fmt = NULL;
1476 de5f3f40 Kevin Wolf
    uint64_t sectors = 0;
1477 de5f3f40 Kevin Wolf
    int flags = 0;
1478 99cce9fa Kevin Wolf
    size_t cluster_size = DEFAULT_CLUSTER_SIZE;
1479 de5f3f40 Kevin Wolf
    int prealloc = 0;
1480 8ad1898c Kevin Wolf
    int version = 3;
1481 de5f3f40 Kevin Wolf
1482 de5f3f40 Kevin Wolf
    /* Read out options */
1483 de5f3f40 Kevin Wolf
    while (options && options->name) {
1484 de5f3f40 Kevin Wolf
        if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
1485 de5f3f40 Kevin Wolf
            sectors = options->value.n / 512;
1486 de5f3f40 Kevin Wolf
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
1487 de5f3f40 Kevin Wolf
            backing_file = options->value.s;
1488 de5f3f40 Kevin Wolf
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
1489 de5f3f40 Kevin Wolf
            backing_fmt = options->value.s;
1490 de5f3f40 Kevin Wolf
        } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) {
1491 de5f3f40 Kevin Wolf
            flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0;
1492 de5f3f40 Kevin Wolf
        } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
1493 de5f3f40 Kevin Wolf
            if (options->value.n) {
1494 de5f3f40 Kevin Wolf
                cluster_size = options->value.n;
1495 de5f3f40 Kevin Wolf
            }
1496 de5f3f40 Kevin Wolf
        } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
1497 de5f3f40 Kevin Wolf
            if (!options->value.s || !strcmp(options->value.s, "off")) {
1498 de5f3f40 Kevin Wolf
                prealloc = 0;
1499 de5f3f40 Kevin Wolf
            } else if (!strcmp(options->value.s, "metadata")) {
1500 de5f3f40 Kevin Wolf
                prealloc = 1;
1501 de5f3f40 Kevin Wolf
            } else {
1502 de5f3f40 Kevin Wolf
                fprintf(stderr, "Invalid preallocation mode: '%s'\n",
1503 de5f3f40 Kevin Wolf
                    options->value.s);
1504 de5f3f40 Kevin Wolf
                return -EINVAL;
1505 de5f3f40 Kevin Wolf
            }
1506 6744cbab Kevin Wolf
        } else if (!strcmp(options->name, BLOCK_OPT_COMPAT_LEVEL)) {
1507 9117b477 Kevin Wolf
            if (!options->value.s) {
1508 9117b477 Kevin Wolf
                /* keep the default */
1509 9117b477 Kevin Wolf
            } else if (!strcmp(options->value.s, "0.10")) {
1510 6744cbab Kevin Wolf
                version = 2;
1511 6744cbab Kevin Wolf
            } else if (!strcmp(options->value.s, "1.1")) {
1512 6744cbab Kevin Wolf
                version = 3;
1513 6744cbab Kevin Wolf
            } else {
1514 6744cbab Kevin Wolf
                fprintf(stderr, "Invalid compatibility level: '%s'\n",
1515 6744cbab Kevin Wolf
                    options->value.s);
1516 6744cbab Kevin Wolf
                return -EINVAL;
1517 6744cbab Kevin Wolf
            }
1518 bfe8043e Stefan Hajnoczi
        } else if (!strcmp(options->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
1519 bfe8043e Stefan Hajnoczi
            flags |= options->value.n ? BLOCK_FLAG_LAZY_REFCOUNTS : 0;
1520 de5f3f40 Kevin Wolf
        }
1521 de5f3f40 Kevin Wolf
        options++;
1522 de5f3f40 Kevin Wolf
    }
1523 de5f3f40 Kevin Wolf
1524 de5f3f40 Kevin Wolf
    if (backing_file && prealloc) {
1525 de5f3f40 Kevin Wolf
        fprintf(stderr, "Backing file and preallocation cannot be used at "
1526 de5f3f40 Kevin Wolf
            "the same time\n");
1527 de5f3f40 Kevin Wolf
        return -EINVAL;
1528 de5f3f40 Kevin Wolf
    }
1529 de5f3f40 Kevin Wolf
1530 bfe8043e Stefan Hajnoczi
    if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) {
1531 bfe8043e Stefan Hajnoczi
        fprintf(stderr, "Lazy refcounts only supported with compatibility "
1532 bfe8043e Stefan Hajnoczi
                "level 1.1 and above (use compat=1.1 or greater)\n");
1533 bfe8043e Stefan Hajnoczi
        return -EINVAL;
1534 bfe8043e Stefan Hajnoczi
    }
1535 bfe8043e Stefan Hajnoczi
1536 7c80ab3f Jes Sorensen
    return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags,
1537 6744cbab Kevin Wolf
                         cluster_size, prealloc, options, version);
1538 de5f3f40 Kevin Wolf
}
1539 de5f3f40 Kevin Wolf
1540 7c80ab3f Jes Sorensen
static int qcow2_make_empty(BlockDriverState *bs)
1541 20d97356 Blue Swirl
{
1542 20d97356 Blue Swirl
#if 0
1543 20d97356 Blue Swirl
    /* XXX: not correct */
1544 20d97356 Blue Swirl
    BDRVQcowState *s = bs->opaque;
1545 20d97356 Blue Swirl
    uint32_t l1_length = s->l1_size * sizeof(uint64_t);
1546 20d97356 Blue Swirl
    int ret;
1547 20d97356 Blue Swirl

1548 20d97356 Blue Swirl
    memset(s->l1_table, 0, l1_length);
1549 66f82cee Kevin Wolf
    if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0)
1550 20d97356 Blue Swirl
        return -1;
1551 66f82cee Kevin Wolf
    ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length);
1552 20d97356 Blue Swirl
    if (ret < 0)
1553 20d97356 Blue Swirl
        return ret;
1554 20d97356 Blue Swirl

1555 20d97356 Blue Swirl
    l2_cache_reset(bs);
1556 20d97356 Blue Swirl
#endif
1557 20d97356 Blue Swirl
    return 0;
1558 20d97356 Blue Swirl
}
1559 20d97356 Blue Swirl
1560 621f0589 Kevin Wolf
static coroutine_fn int qcow2_co_write_zeroes(BlockDriverState *bs,
1561 621f0589 Kevin Wolf
    int64_t sector_num, int nb_sectors)
1562 621f0589 Kevin Wolf
{
1563 621f0589 Kevin Wolf
    int ret;
1564 621f0589 Kevin Wolf
    BDRVQcowState *s = bs->opaque;
1565 621f0589 Kevin Wolf
1566 621f0589 Kevin Wolf
    /* Emulate misaligned zero writes */
1567 621f0589 Kevin Wolf
    if (sector_num % s->cluster_sectors || nb_sectors % s->cluster_sectors) {
1568 621f0589 Kevin Wolf
        return -ENOTSUP;
1569 621f0589 Kevin Wolf
    }
1570 621f0589 Kevin Wolf
1571 621f0589 Kevin Wolf
    /* Whatever is left can use real zero clusters */
1572 621f0589 Kevin Wolf
    qemu_co_mutex_lock(&s->lock);
1573 621f0589 Kevin Wolf
    ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS,
1574 621f0589 Kevin Wolf
        nb_sectors);
1575 621f0589 Kevin Wolf
    qemu_co_mutex_unlock(&s->lock);
1576 621f0589 Kevin Wolf
1577 621f0589 Kevin Wolf
    return ret;
1578 621f0589 Kevin Wolf
}
1579 621f0589 Kevin Wolf
1580 6db39ae2 Paolo Bonzini
static coroutine_fn int qcow2_co_discard(BlockDriverState *bs,
1581 6db39ae2 Paolo Bonzini
    int64_t sector_num, int nb_sectors)
1582 5ea929e3 Kevin Wolf
{
1583 6db39ae2 Paolo Bonzini
    int ret;
1584 6db39ae2 Paolo Bonzini
    BDRVQcowState *s = bs->opaque;
1585 6db39ae2 Paolo Bonzini
1586 6db39ae2 Paolo Bonzini
    qemu_co_mutex_lock(&s->lock);
1587 6db39ae2 Paolo Bonzini
    ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS,
1588 670df5e3 Kevin Wolf
        nb_sectors, QCOW2_DISCARD_REQUEST);
1589 6db39ae2 Paolo Bonzini
    qemu_co_mutex_unlock(&s->lock);
1590 6db39ae2 Paolo Bonzini
    return ret;
1591 5ea929e3 Kevin Wolf
}
1592 5ea929e3 Kevin Wolf
1593 419b19d9 Stefan Hajnoczi
static int qcow2_truncate(BlockDriverState *bs, int64_t offset)
1594 419b19d9 Stefan Hajnoczi
{
1595 419b19d9 Stefan Hajnoczi
    BDRVQcowState *s = bs->opaque;
1596 2cf7cfa1 Kevin Wolf
    int64_t new_l1_size;
1597 2cf7cfa1 Kevin Wolf
    int ret;
1598 419b19d9 Stefan Hajnoczi
1599 419b19d9 Stefan Hajnoczi
    if (offset & 511) {
1600 259b2173 Kevin Wolf
        error_report("The new size must be a multiple of 512");
1601 419b19d9 Stefan Hajnoczi
        return -EINVAL;
1602 419b19d9 Stefan Hajnoczi
    }
1603 419b19d9 Stefan Hajnoczi
1604 419b19d9 Stefan Hajnoczi
    /* cannot proceed if image has snapshots */
1605 419b19d9 Stefan Hajnoczi
    if (s->nb_snapshots) {
1606 259b2173 Kevin Wolf
        error_report("Can't resize an image which has snapshots");
1607 419b19d9 Stefan Hajnoczi
        return -ENOTSUP;
1608 419b19d9 Stefan Hajnoczi
    }
1609 419b19d9 Stefan Hajnoczi
1610 419b19d9 Stefan Hajnoczi
    /* shrinking is currently not supported */
1611 419b19d9 Stefan Hajnoczi
    if (offset < bs->total_sectors * 512) {
1612 259b2173 Kevin Wolf
        error_report("qcow2 doesn't support shrinking images yet");
1613 419b19d9 Stefan Hajnoczi
        return -ENOTSUP;
1614 419b19d9 Stefan Hajnoczi
    }
1615 419b19d9 Stefan Hajnoczi
1616 419b19d9 Stefan Hajnoczi
    new_l1_size = size_to_l1(s, offset);
1617 72893756 Stefan Hajnoczi
    ret = qcow2_grow_l1_table(bs, new_l1_size, true);
1618 419b19d9 Stefan Hajnoczi
    if (ret < 0) {
1619 419b19d9 Stefan Hajnoczi
        return ret;
1620 419b19d9 Stefan Hajnoczi
    }
1621 419b19d9 Stefan Hajnoczi
1622 419b19d9 Stefan Hajnoczi
    /* write updated header.size */
1623 419b19d9 Stefan Hajnoczi
    offset = cpu_to_be64(offset);
1624 8b3b7206 Kevin Wolf
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
1625 8b3b7206 Kevin Wolf
                           &offset, sizeof(uint64_t));
1626 419b19d9 Stefan Hajnoczi
    if (ret < 0) {
1627 419b19d9 Stefan Hajnoczi
        return ret;
1628 419b19d9 Stefan Hajnoczi
    }
1629 419b19d9 Stefan Hajnoczi
1630 419b19d9 Stefan Hajnoczi
    s->l1_vm_state_index = new_l1_size;
1631 419b19d9 Stefan Hajnoczi
    return 0;
1632 419b19d9 Stefan Hajnoczi
}
1633 419b19d9 Stefan Hajnoczi
1634 20d97356 Blue Swirl
/* XXX: put compressed sectors first, then all the cluster aligned
1635 20d97356 Blue Swirl
   tables to avoid losing bytes in alignment */
1636 7c80ab3f Jes Sorensen
static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num,
1637 7c80ab3f Jes Sorensen
                                  const uint8_t *buf, int nb_sectors)
1638 20d97356 Blue Swirl
{
1639 20d97356 Blue Swirl
    BDRVQcowState *s = bs->opaque;
1640 20d97356 Blue Swirl
    z_stream strm;
1641 20d97356 Blue Swirl
    int ret, out_len;
1642 20d97356 Blue Swirl
    uint8_t *out_buf;
1643 20d97356 Blue Swirl
    uint64_t cluster_offset;
1644 20d97356 Blue Swirl
1645 20d97356 Blue Swirl
    if (nb_sectors == 0) {
1646 20d97356 Blue Swirl
        /* align end of file to a sector boundary to ease reading with
1647 20d97356 Blue Swirl
           sector based I/Os */
1648 66f82cee Kevin Wolf
        cluster_offset = bdrv_getlength(bs->file);
1649 20d97356 Blue Swirl
        cluster_offset = (cluster_offset + 511) & ~511;
1650 66f82cee Kevin Wolf
        bdrv_truncate(bs->file, cluster_offset);
1651 20d97356 Blue Swirl
        return 0;
1652 20d97356 Blue Swirl
    }
1653 20d97356 Blue Swirl
1654 f4d38bef Stefan Hajnoczi
    if (nb_sectors != s->cluster_sectors) {
1655 f4d38bef Stefan Hajnoczi
        ret = -EINVAL;
1656 f4d38bef Stefan Hajnoczi
1657 f4d38bef Stefan Hajnoczi
        /* Zero-pad last write if image size is not cluster aligned */
1658 f4d38bef Stefan Hajnoczi
        if (sector_num + nb_sectors == bs->total_sectors &&
1659 f4d38bef Stefan Hajnoczi
            nb_sectors < s->cluster_sectors) {
1660 f4d38bef Stefan Hajnoczi
            uint8_t *pad_buf = qemu_blockalign(bs, s->cluster_size);
1661 f4d38bef Stefan Hajnoczi
            memset(pad_buf, 0, s->cluster_size);
1662 f4d38bef Stefan Hajnoczi
            memcpy(pad_buf, buf, nb_sectors * BDRV_SECTOR_SIZE);
1663 f4d38bef Stefan Hajnoczi
            ret = qcow2_write_compressed(bs, sector_num,
1664 f4d38bef Stefan Hajnoczi
                                         pad_buf, s->cluster_sectors);
1665 f4d38bef Stefan Hajnoczi
            qemu_vfree(pad_buf);
1666 f4d38bef Stefan Hajnoczi
        }
1667 f4d38bef Stefan Hajnoczi
        return ret;
1668 f4d38bef Stefan Hajnoczi
    }
1669 20d97356 Blue Swirl
1670 7267c094 Anthony Liguori
    out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
1671 20d97356 Blue Swirl
1672 20d97356 Blue Swirl
    /* best compression, small window, no zlib header */
1673 20d97356 Blue Swirl
    memset(&strm, 0, sizeof(strm));
1674 20d97356 Blue Swirl
    ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
1675 20d97356 Blue Swirl
                       Z_DEFLATED, -12,
1676 20d97356 Blue Swirl
                       9, Z_DEFAULT_STRATEGY);
1677 20d97356 Blue Swirl
    if (ret != 0) {
1678 8f1efd00 Kevin Wolf
        ret = -EINVAL;
1679 8f1efd00 Kevin Wolf
        goto fail;
1680 20d97356 Blue Swirl
    }
1681 20d97356 Blue Swirl
1682 20d97356 Blue Swirl
    strm.avail_in = s->cluster_size;
1683 20d97356 Blue Swirl
    strm.next_in = (uint8_t *)buf;
1684 20d97356 Blue Swirl
    strm.avail_out = s->cluster_size;
1685 20d97356 Blue Swirl
    strm.next_out = out_buf;
1686 20d97356 Blue Swirl
1687 20d97356 Blue Swirl
    ret = deflate(&strm, Z_FINISH);
1688 20d97356 Blue Swirl
    if (ret != Z_STREAM_END && ret != Z_OK) {
1689 20d97356 Blue Swirl
        deflateEnd(&strm);
1690 8f1efd00 Kevin Wolf
        ret = -EINVAL;
1691 8f1efd00 Kevin Wolf
        goto fail;
1692 20d97356 Blue Swirl
    }
1693 20d97356 Blue Swirl
    out_len = strm.next_out - out_buf;
1694 20d97356 Blue Swirl
1695 20d97356 Blue Swirl
    deflateEnd(&strm);
1696 20d97356 Blue Swirl
1697 20d97356 Blue Swirl
    if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
1698 20d97356 Blue Swirl
        /* could not compress: write normal cluster */
1699 cf93980e Max Reitz
1700 cf93980e Max Reitz
        ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
1701 cf93980e Max Reitz
                sector_num * BDRV_SECTOR_SIZE,
1702 cf93980e Max Reitz
                s->cluster_sectors * BDRV_SECTOR_SIZE);
1703 cf93980e Max Reitz
        if (ret < 0) {
1704 cf93980e Max Reitz
            goto fail;
1705 cf93980e Max Reitz
        }
1706 cf93980e Max Reitz
1707 8f1efd00 Kevin Wolf
        ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors);
1708 8f1efd00 Kevin Wolf
        if (ret < 0) {
1709 8f1efd00 Kevin Wolf
            goto fail;
1710 8f1efd00 Kevin Wolf
        }
1711 20d97356 Blue Swirl
    } else {
1712 20d97356 Blue Swirl
        cluster_offset = qcow2_alloc_compressed_cluster_offset(bs,
1713 20d97356 Blue Swirl
            sector_num << 9, out_len);
1714 8f1efd00 Kevin Wolf
        if (!cluster_offset) {
1715 8f1efd00 Kevin Wolf
            ret = -EIO;
1716 8f1efd00 Kevin Wolf
            goto fail;
1717 8f1efd00 Kevin Wolf
        }
1718 20d97356 Blue Swirl
        cluster_offset &= s->cluster_offset_mask;
1719 cf93980e Max Reitz
1720 cf93980e Max Reitz
        ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
1721 cf93980e Max Reitz
                cluster_offset, out_len);
1722 cf93980e Max Reitz
        if (ret < 0) {
1723 cf93980e Max Reitz
            goto fail;
1724 cf93980e Max Reitz
        }
1725 cf93980e Max Reitz
1726 66f82cee Kevin Wolf
        BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
1727 8f1efd00 Kevin Wolf
        ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len);
1728 8f1efd00 Kevin Wolf
        if (ret < 0) {
1729 8f1efd00 Kevin Wolf
            goto fail;
1730 20d97356 Blue Swirl
        }
1731 20d97356 Blue Swirl
    }
1732 20d97356 Blue Swirl
1733 8f1efd00 Kevin Wolf
    ret = 0;
1734 8f1efd00 Kevin Wolf
fail:
1735 7267c094 Anthony Liguori
    g_free(out_buf);
1736 8f1efd00 Kevin Wolf
    return ret;
1737 20d97356 Blue Swirl
}
1738 20d97356 Blue Swirl
1739 a968168c Dong Xu Wang
static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
1740 20d97356 Blue Swirl
{
1741 29c1a730 Kevin Wolf
    BDRVQcowState *s = bs->opaque;
1742 29c1a730 Kevin Wolf
    int ret;
1743 29c1a730 Kevin Wolf
1744 8b94ff85 Paolo Bonzini
    qemu_co_mutex_lock(&s->lock);
1745 29c1a730 Kevin Wolf
    ret = qcow2_cache_flush(bs, s->l2_table_cache);
1746 29c1a730 Kevin Wolf
    if (ret < 0) {
1747 c95de7e2 Dong Xu Wang
        qemu_co_mutex_unlock(&s->lock);
1748 8b94ff85 Paolo Bonzini
        return ret;
1749 29c1a730 Kevin Wolf
    }
1750 29c1a730 Kevin Wolf
1751 bfe8043e Stefan Hajnoczi
    if (qcow2_need_accurate_refcounts(s)) {
1752 bfe8043e Stefan Hajnoczi
        ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1753 bfe8043e Stefan Hajnoczi
        if (ret < 0) {
1754 bfe8043e Stefan Hajnoczi
            qemu_co_mutex_unlock(&s->lock);
1755 bfe8043e Stefan Hajnoczi
            return ret;
1756 bfe8043e Stefan Hajnoczi
        }
1757 29c1a730 Kevin Wolf
    }
1758 8b94ff85 Paolo Bonzini
    qemu_co_mutex_unlock(&s->lock);
1759 29c1a730 Kevin Wolf
1760 eb489bb1 Kevin Wolf
    return 0;
1761 eb489bb1 Kevin Wolf
}
1762 eb489bb1 Kevin Wolf
1763 7c80ab3f Jes Sorensen
static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1764 20d97356 Blue Swirl
{
1765 20d97356 Blue Swirl
    BDRVQcowState *s = bs->opaque;
1766 20d97356 Blue Swirl
    bdi->cluster_size = s->cluster_size;
1767 7c80ab3f Jes Sorensen
    bdi->vm_state_offset = qcow2_vm_state_offset(s);
1768 20d97356 Blue Swirl
    return 0;
1769 20d97356 Blue Swirl
}
1770 20d97356 Blue Swirl
1771 20d97356 Blue Swirl
#if 0
1772 20d97356 Blue Swirl
static void dump_refcounts(BlockDriverState *bs)
1773 20d97356 Blue Swirl
{
1774 20d97356 Blue Swirl
    BDRVQcowState *s = bs->opaque;
1775 20d97356 Blue Swirl
    int64_t nb_clusters, k, k1, size;
1776 20d97356 Blue Swirl
    int refcount;
1777 20d97356 Blue Swirl

1778 66f82cee Kevin Wolf
    size = bdrv_getlength(bs->file);
1779 20d97356 Blue Swirl
    nb_clusters = size_to_clusters(s, size);
1780 20d97356 Blue Swirl
    for(k = 0; k < nb_clusters;) {
1781 20d97356 Blue Swirl
        k1 = k;
1782 20d97356 Blue Swirl
        refcount = get_refcount(bs, k);
1783 20d97356 Blue Swirl
        k++;
1784 20d97356 Blue Swirl
        while (k < nb_clusters && get_refcount(bs, k) == refcount)
1785 20d97356 Blue Swirl
            k++;
1786 0bfcd599 Blue Swirl
        printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount,
1787 0bfcd599 Blue Swirl
               k - k1);
1788 20d97356 Blue Swirl
    }
1789 20d97356 Blue Swirl
}
1790 20d97356 Blue Swirl
#endif
1791 20d97356 Blue Swirl
1792 cf8074b3 Kevin Wolf
static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
1793 cf8074b3 Kevin Wolf
                              int64_t pos)
1794 20d97356 Blue Swirl
{
1795 20d97356 Blue Swirl
    BDRVQcowState *s = bs->opaque;
1796 20d97356 Blue Swirl
    int growable = bs->growable;
1797 20d97356 Blue Swirl
    int ret;
1798 20d97356 Blue Swirl
1799 66f82cee Kevin Wolf
    BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
1800 20d97356 Blue Swirl
    bs->growable = 1;
1801 8d3b1a2d Kevin Wolf
    ret = bdrv_pwritev(bs, qcow2_vm_state_offset(s) + pos, qiov);
1802 20d97356 Blue Swirl
    bs->growable = growable;
1803 20d97356 Blue Swirl
1804 20d97356 Blue Swirl
    return ret;
1805 20d97356 Blue Swirl
}
1806 20d97356 Blue Swirl
1807 7c80ab3f Jes Sorensen
static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1808 7c80ab3f Jes Sorensen
                              int64_t pos, int size)
1809 20d97356 Blue Swirl
{
1810 20d97356 Blue Swirl
    BDRVQcowState *s = bs->opaque;
1811 20d97356 Blue Swirl
    int growable = bs->growable;
1812 0d51b4de Asias He
    bool zero_beyond_eof = bs->zero_beyond_eof;
1813 20d97356 Blue Swirl
    int ret;
1814 20d97356 Blue Swirl
1815 66f82cee Kevin Wolf
    BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
1816 20d97356 Blue Swirl
    bs->growable = 1;
1817 0d51b4de Asias He
    bs->zero_beyond_eof = false;
1818 7c80ab3f Jes Sorensen
    ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
1819 20d97356 Blue Swirl
    bs->growable = growable;
1820 0d51b4de Asias He
    bs->zero_beyond_eof = zero_beyond_eof;
1821 20d97356 Blue Swirl
1822 20d97356 Blue Swirl
    return ret;
1823 20d97356 Blue Swirl
}
1824 20d97356 Blue Swirl
1825 9296b3ed Max Reitz
/*
1826 9296b3ed Max Reitz
 * Downgrades an image's version. To achieve this, any incompatible features
1827 9296b3ed Max Reitz
 * have to be removed.
1828 9296b3ed Max Reitz
 */
1829 9296b3ed Max Reitz
static int qcow2_downgrade(BlockDriverState *bs, int target_version)
1830 9296b3ed Max Reitz
{
1831 9296b3ed Max Reitz
    BDRVQcowState *s = bs->opaque;
1832 9296b3ed Max Reitz
    int current_version = s->qcow_version;
1833 9296b3ed Max Reitz
    int ret;
1834 9296b3ed Max Reitz
1835 9296b3ed Max Reitz
    if (target_version == current_version) {
1836 9296b3ed Max Reitz
        return 0;
1837 9296b3ed Max Reitz
    } else if (target_version > current_version) {
1838 9296b3ed Max Reitz
        return -EINVAL;
1839 9296b3ed Max Reitz
    } else if (target_version != 2) {
1840 9296b3ed Max Reitz
        return -EINVAL;
1841 9296b3ed Max Reitz
    }
1842 9296b3ed Max Reitz
1843 9296b3ed Max Reitz
    if (s->refcount_order != 4) {
1844 9296b3ed Max Reitz
        /* we would have to convert the image to a refcount_order == 4 image
1845 9296b3ed Max Reitz
         * here; however, since qemu (at the time of writing this) does not
1846 9296b3ed Max Reitz
         * support anything different than 4 anyway, there is no point in doing
1847 9296b3ed Max Reitz
         * so right now; however, we should error out (if qemu supports this in
1848 9296b3ed Max Reitz
         * the future and this code has not been adapted) */
1849 9296b3ed Max Reitz
        error_report("qcow2_downgrade: Image refcount orders other than 4 are"
1850 9296b3ed Max Reitz
                     "currently not supported.");
1851 9296b3ed Max Reitz
        return -ENOTSUP;
1852 9296b3ed Max Reitz
    }
1853 9296b3ed Max Reitz
1854 9296b3ed Max Reitz
    /* clear incompatible features */
1855 9296b3ed Max Reitz
    if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
1856 9296b3ed Max Reitz
        ret = qcow2_mark_clean(bs);
1857 9296b3ed Max Reitz
        if (ret < 0) {
1858 9296b3ed Max Reitz
            return ret;
1859 9296b3ed Max Reitz
        }
1860 9296b3ed Max Reitz
    }
1861 9296b3ed Max Reitz
1862 9296b3ed Max Reitz
    /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in
1863 9296b3ed Max Reitz
     * the first place; if that happens nonetheless, returning -ENOTSUP is the
1864 9296b3ed Max Reitz
     * best thing to do anyway */
1865 9296b3ed Max Reitz
1866 9296b3ed Max Reitz
    if (s->incompatible_features) {
1867 9296b3ed Max Reitz
        return -ENOTSUP;
1868 9296b3ed Max Reitz
    }
1869 9296b3ed Max Reitz
1870 9296b3ed Max Reitz
    /* since we can ignore compatible features, we can set them to 0 as well */
1871 9296b3ed Max Reitz
    s->compatible_features = 0;
1872 9296b3ed Max Reitz
    /* if lazy refcounts have been used, they have already been fixed through
1873 9296b3ed Max Reitz
     * clearing the dirty flag */
1874 9296b3ed Max Reitz
1875 9296b3ed Max Reitz
    /* clearing autoclear features is trivial */
1876 9296b3ed Max Reitz
    s->autoclear_features = 0;
1877 9296b3ed Max Reitz
1878 9296b3ed Max Reitz
    ret = qcow2_expand_zero_clusters(bs);
1879 9296b3ed Max Reitz
    if (ret < 0) {
1880 9296b3ed Max Reitz
        return ret;
1881 9296b3ed Max Reitz
    }
1882 9296b3ed Max Reitz
1883 9296b3ed Max Reitz
    s->qcow_version = target_version;
1884 9296b3ed Max Reitz
    ret = qcow2_update_header(bs);
1885 9296b3ed Max Reitz
    if (ret < 0) {
1886 9296b3ed Max Reitz
        s->qcow_version = current_version;
1887 9296b3ed Max Reitz
        return ret;
1888 9296b3ed Max Reitz
    }
1889 9296b3ed Max Reitz
    return 0;
1890 9296b3ed Max Reitz
}
1891 9296b3ed Max Reitz
1892 9296b3ed Max Reitz
static int qcow2_amend_options(BlockDriverState *bs,
1893 9296b3ed Max Reitz
                               QEMUOptionParameter *options)
1894 9296b3ed Max Reitz
{
1895 9296b3ed Max Reitz
    BDRVQcowState *s = bs->opaque;
1896 9296b3ed Max Reitz
    int old_version = s->qcow_version, new_version = old_version;
1897 9296b3ed Max Reitz
    uint64_t new_size = 0;
1898 9296b3ed Max Reitz
    const char *backing_file = NULL, *backing_format = NULL;
1899 9296b3ed Max Reitz
    bool lazy_refcounts = s->use_lazy_refcounts;
1900 9296b3ed Max Reitz
    int ret;
1901 9296b3ed Max Reitz
    int i;
1902 9296b3ed Max Reitz
1903 9296b3ed Max Reitz
    for (i = 0; options[i].name; i++)
1904 9296b3ed Max Reitz
    {
1905 9296b3ed Max Reitz
        if (!options[i].assigned) {
1906 9296b3ed Max Reitz
            /* only change explicitly defined options */
1907 9296b3ed Max Reitz
            continue;
1908 9296b3ed Max Reitz
        }
1909 9296b3ed Max Reitz
1910 9296b3ed Max Reitz
        if (!strcmp(options[i].name, "compat")) {
1911 9296b3ed Max Reitz
            if (!options[i].value.s) {
1912 9296b3ed Max Reitz
                /* preserve default */
1913 9296b3ed Max Reitz
            } else if (!strcmp(options[i].value.s, "0.10")) {
1914 9296b3ed Max Reitz
                new_version = 2;
1915 9296b3ed Max Reitz
            } else if (!strcmp(options[i].value.s, "1.1")) {
1916 9296b3ed Max Reitz
                new_version = 3;
1917 9296b3ed Max Reitz
            } else {
1918 9296b3ed Max Reitz
                fprintf(stderr, "Unknown compatibility level %s.\n",
1919 9296b3ed Max Reitz
                        options[i].value.s);
1920 9296b3ed Max Reitz
                return -EINVAL;
1921 9296b3ed Max Reitz
            }
1922 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "preallocation")) {
1923 9296b3ed Max Reitz
            fprintf(stderr, "Cannot change preallocation mode.\n");
1924 9296b3ed Max Reitz
            return -ENOTSUP;
1925 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "size")) {
1926 9296b3ed Max Reitz
            new_size = options[i].value.n;
1927 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "backing_file")) {
1928 9296b3ed Max Reitz
            backing_file = options[i].value.s;
1929 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "backing_fmt")) {
1930 9296b3ed Max Reitz
            backing_format = options[i].value.s;
1931 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "encryption")) {
1932 9296b3ed Max Reitz
            if ((options[i].value.n != !!s->crypt_method)) {
1933 9296b3ed Max Reitz
                fprintf(stderr, "Changing the encryption flag is not "
1934 9296b3ed Max Reitz
                        "supported.\n");
1935 9296b3ed Max Reitz
                return -ENOTSUP;
1936 9296b3ed Max Reitz
            }
1937 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "cluster_size")) {
1938 9296b3ed Max Reitz
            if (options[i].value.n != s->cluster_size) {
1939 9296b3ed Max Reitz
                fprintf(stderr, "Changing the cluster size is not "
1940 9296b3ed Max Reitz
                        "supported.\n");
1941 9296b3ed Max Reitz
                return -ENOTSUP;
1942 9296b3ed Max Reitz
            }
1943 9296b3ed Max Reitz
        } else if (!strcmp(options[i].name, "lazy_refcounts")) {
1944 9296b3ed Max Reitz
            lazy_refcounts = options[i].value.n;
1945 9296b3ed Max Reitz
        } else {
1946 9296b3ed Max Reitz
            /* if this assertion fails, this probably means a new option was
1947 9296b3ed Max Reitz
             * added without having it covered here */
1948 9296b3ed Max Reitz
            assert(false);
1949 9296b3ed Max Reitz
        }
1950 9296b3ed Max Reitz
    }
1951 9296b3ed Max Reitz
1952 9296b3ed Max Reitz
    if (new_version != old_version) {
1953 9296b3ed Max Reitz
        if (new_version > old_version) {
1954 9296b3ed Max Reitz
            /* Upgrade */
1955 9296b3ed Max Reitz
            s->qcow_version = new_version;
1956 9296b3ed Max Reitz
            ret = qcow2_update_header(bs);
1957 9296b3ed Max Reitz
            if (ret < 0) {
1958 9296b3ed Max Reitz
                s->qcow_version = old_version;
1959 9296b3ed Max Reitz
                return ret;
1960 9296b3ed Max Reitz
            }
1961 9296b3ed Max Reitz
        } else {
1962 9296b3ed Max Reitz
            ret = qcow2_downgrade(bs, new_version);
1963 9296b3ed Max Reitz
            if (ret < 0) {
1964 9296b3ed Max Reitz
                return ret;
1965 9296b3ed Max Reitz
            }
1966 9296b3ed Max Reitz
        }
1967 9296b3ed Max Reitz
    }
1968 9296b3ed Max Reitz
1969 9296b3ed Max Reitz
    if (backing_file || backing_format) {
1970 9296b3ed Max Reitz
        ret = qcow2_change_backing_file(bs, backing_file ?: bs->backing_file,
1971 9296b3ed Max Reitz
                                        backing_format ?: bs->backing_format);
1972 9296b3ed Max Reitz
        if (ret < 0) {
1973 9296b3ed Max Reitz
            return ret;
1974 9296b3ed Max Reitz
        }
1975 9296b3ed Max Reitz
    }
1976 9296b3ed Max Reitz
1977 9296b3ed Max Reitz
    if (s->use_lazy_refcounts != lazy_refcounts) {
1978 9296b3ed Max Reitz
        if (lazy_refcounts) {
1979 9296b3ed Max Reitz
            if (s->qcow_version < 3) {
1980 9296b3ed Max Reitz
                fprintf(stderr, "Lazy refcounts only supported with compatibility "
1981 9296b3ed Max Reitz
                        "level 1.1 and above (use compat=1.1 or greater)\n");
1982 9296b3ed Max Reitz
                return -EINVAL;
1983 9296b3ed Max Reitz
            }
1984 9296b3ed Max Reitz
            s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
1985 9296b3ed Max Reitz
            ret = qcow2_update_header(bs);
1986 9296b3ed Max Reitz
            if (ret < 0) {
1987 9296b3ed Max Reitz
                s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
1988 9296b3ed Max Reitz
                return ret;
1989 9296b3ed Max Reitz
            }
1990 9296b3ed Max Reitz
            s->use_lazy_refcounts = true;
1991 9296b3ed Max Reitz
        } else {
1992 9296b3ed Max Reitz
            /* make image clean first */
1993 9296b3ed Max Reitz
            ret = qcow2_mark_clean(bs);
1994 9296b3ed Max Reitz
            if (ret < 0) {
1995 9296b3ed Max Reitz
                return ret;
1996 9296b3ed Max Reitz
            }
1997 9296b3ed Max Reitz
            /* now disallow lazy refcounts */
1998 9296b3ed Max Reitz
            s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
1999 9296b3ed Max Reitz
            ret = qcow2_update_header(bs);
2000 9296b3ed Max Reitz
            if (ret < 0) {
2001 9296b3ed Max Reitz
                s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
2002 9296b3ed Max Reitz
                return ret;
2003 9296b3ed Max Reitz
            }
2004 9296b3ed Max Reitz
            s->use_lazy_refcounts = false;
2005 9296b3ed Max Reitz
        }
2006 9296b3ed Max Reitz
    }
2007 9296b3ed Max Reitz
2008 9296b3ed Max Reitz
    if (new_size) {
2009 9296b3ed Max Reitz
        ret = bdrv_truncate(bs, new_size);
2010 9296b3ed Max Reitz
        if (ret < 0) {
2011 9296b3ed Max Reitz
            return ret;
2012 9296b3ed Max Reitz
        }
2013 9296b3ed Max Reitz
    }
2014 9296b3ed Max Reitz
2015 9296b3ed Max Reitz
    return 0;
2016 9296b3ed Max Reitz
}
2017 9296b3ed Max Reitz
2018 7c80ab3f Jes Sorensen
static QEMUOptionParameter qcow2_create_options[] = {
2019 20d97356 Blue Swirl
    {
2020 20d97356 Blue Swirl
        .name = BLOCK_OPT_SIZE,
2021 20d97356 Blue Swirl
        .type = OPT_SIZE,
2022 20d97356 Blue Swirl
        .help = "Virtual disk size"
2023 20d97356 Blue Swirl
    },
2024 20d97356 Blue Swirl
    {
2025 6744cbab Kevin Wolf
        .name = BLOCK_OPT_COMPAT_LEVEL,
2026 6744cbab Kevin Wolf
        .type = OPT_STRING,
2027 6744cbab Kevin Wolf
        .help = "Compatibility level (0.10 or 1.1)"
2028 6744cbab Kevin Wolf
    },
2029 6744cbab Kevin Wolf
    {
2030 20d97356 Blue Swirl
        .name = BLOCK_OPT_BACKING_FILE,
2031 20d97356 Blue Swirl
        .type = OPT_STRING,
2032 20d97356 Blue Swirl
        .help = "File name of a base image"
2033 20d97356 Blue Swirl
    },
2034 20d97356 Blue Swirl
    {
2035 20d97356 Blue Swirl
        .name = BLOCK_OPT_BACKING_FMT,
2036 20d97356 Blue Swirl
        .type = OPT_STRING,
2037 20d97356 Blue Swirl
        .help = "Image format of the base image"
2038 20d97356 Blue Swirl
    },
2039 20d97356 Blue Swirl
    {
2040 20d97356 Blue Swirl
        .name = BLOCK_OPT_ENCRYPT,
2041 20d97356 Blue Swirl
        .type = OPT_FLAG,
2042 20d97356 Blue Swirl
        .help = "Encrypt the image"
2043 20d97356 Blue Swirl
    },
2044 20d97356 Blue Swirl
    {
2045 20d97356 Blue Swirl
        .name = BLOCK_OPT_CLUSTER_SIZE,
2046 20d97356 Blue Swirl
        .type = OPT_SIZE,
2047 99cce9fa Kevin Wolf
        .help = "qcow2 cluster size",
2048 99cce9fa Kevin Wolf
        .value = { .n = DEFAULT_CLUSTER_SIZE },
2049 20d97356 Blue Swirl
    },
2050 20d97356 Blue Swirl
    {
2051 20d97356 Blue Swirl
        .name = BLOCK_OPT_PREALLOC,
2052 20d97356 Blue Swirl
        .type = OPT_STRING,
2053 20d97356 Blue Swirl
        .help = "Preallocation mode (allowed values: off, metadata)"
2054 20d97356 Blue Swirl
    },
2055 bfe8043e Stefan Hajnoczi
    {
2056 bfe8043e Stefan Hajnoczi
        .name = BLOCK_OPT_LAZY_REFCOUNTS,
2057 bfe8043e Stefan Hajnoczi
        .type = OPT_FLAG,
2058 bfe8043e Stefan Hajnoczi
        .help = "Postpone refcount updates",
2059 bfe8043e Stefan Hajnoczi
    },
2060 20d97356 Blue Swirl
    { NULL }
2061 20d97356 Blue Swirl
};
2062 20d97356 Blue Swirl
2063 20d97356 Blue Swirl
static BlockDriver bdrv_qcow2 = {
2064 7c80ab3f Jes Sorensen
    .format_name        = "qcow2",
2065 7c80ab3f Jes Sorensen
    .instance_size      = sizeof(BDRVQcowState),
2066 7c80ab3f Jes Sorensen
    .bdrv_probe         = qcow2_probe,
2067 7c80ab3f Jes Sorensen
    .bdrv_open          = qcow2_open,
2068 7c80ab3f Jes Sorensen
    .bdrv_close         = qcow2_close,
2069 21d82ac9 Jeff Cody
    .bdrv_reopen_prepare  = qcow2_reopen_prepare,
2070 7c80ab3f Jes Sorensen
    .bdrv_create        = qcow2_create,
2071 3ac21627 Peter Lieven
    .bdrv_has_zero_init = bdrv_has_zero_init_1,
2072 b6b8a333 Paolo Bonzini
    .bdrv_co_get_block_status = qcow2_co_get_block_status,
2073 7c80ab3f Jes Sorensen
    .bdrv_set_key       = qcow2_set_key,
2074 7c80ab3f Jes Sorensen
    .bdrv_make_empty    = qcow2_make_empty,
2075 7c80ab3f Jes Sorensen
2076 c68b89ac Kevin Wolf
    .bdrv_co_readv          = qcow2_co_readv,
2077 c68b89ac Kevin Wolf
    .bdrv_co_writev         = qcow2_co_writev,
2078 eb489bb1 Kevin Wolf
    .bdrv_co_flush_to_os    = qcow2_co_flush_to_os,
2079 419b19d9 Stefan Hajnoczi
2080 621f0589 Kevin Wolf
    .bdrv_co_write_zeroes   = qcow2_co_write_zeroes,
2081 6db39ae2 Paolo Bonzini
    .bdrv_co_discard        = qcow2_co_discard,
2082 419b19d9 Stefan Hajnoczi
    .bdrv_truncate          = qcow2_truncate,
2083 7c80ab3f Jes Sorensen
    .bdrv_write_compressed  = qcow2_write_compressed,
2084 20d97356 Blue Swirl
2085 20d97356 Blue Swirl
    .bdrv_snapshot_create   = qcow2_snapshot_create,
2086 20d97356 Blue Swirl
    .bdrv_snapshot_goto     = qcow2_snapshot_goto,
2087 20d97356 Blue Swirl
    .bdrv_snapshot_delete   = qcow2_snapshot_delete,
2088 20d97356 Blue Swirl
    .bdrv_snapshot_list     = qcow2_snapshot_list,
2089 51ef6727 edison
    .bdrv_snapshot_load_tmp     = qcow2_snapshot_load_tmp,
2090 7c80ab3f Jes Sorensen
    .bdrv_get_info      = qcow2_get_info,
2091 20d97356 Blue Swirl
2092 7c80ab3f Jes Sorensen
    .bdrv_save_vmstate    = qcow2_save_vmstate,
2093 7c80ab3f Jes Sorensen
    .bdrv_load_vmstate    = qcow2_load_vmstate,
2094 20d97356 Blue Swirl
2095 20d97356 Blue Swirl
    .bdrv_change_backing_file   = qcow2_change_backing_file,
2096 20d97356 Blue Swirl
2097 06d9260f Anthony Liguori
    .bdrv_invalidate_cache      = qcow2_invalidate_cache,
2098 06d9260f Anthony Liguori
2099 7c80ab3f Jes Sorensen
    .create_options = qcow2_create_options,
2100 7c80ab3f Jes Sorensen
    .bdrv_check = qcow2_check,
2101 9296b3ed Max Reitz
    .bdrv_amend_options = qcow2_amend_options,
2102 20d97356 Blue Swirl
};
2103 20d97356 Blue Swirl
2104 5efa9d5a Anthony Liguori
static void bdrv_qcow2_init(void)
2105 5efa9d5a Anthony Liguori
{
2106 5efa9d5a Anthony Liguori
    bdrv_register(&bdrv_qcow2);
2107 5efa9d5a Anthony Liguori
}
2108 5efa9d5a Anthony Liguori
2109 5efa9d5a Anthony Liguori
block_init(bdrv_qcow2_init);