Statistics
| Branch: | Revision:

root / block / qcow2-cluster.c @ 1d3afd64

History | View | Annotate | Download (35.4 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include <zlib.h>
26

    
27
#include "qemu-common.h"
28
#include "block_int.h"
29
#include "block/qcow2.h"
30
#include "trace.h"
31

    
32
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size)
33
{
34
    BDRVQcowState *s = bs->opaque;
35
    int new_l1_size, new_l1_size2, ret, i;
36
    uint64_t *new_l1_table;
37
    int64_t new_l1_table_offset;
38
    uint8_t data[12];
39

    
40
    if (min_size <= s->l1_size)
41
        return 0;
42

    
43
    if (exact_size) {
44
        new_l1_size = min_size;
45
    } else {
46
        /* Bump size up to reduce the number of times we have to grow */
47
        new_l1_size = s->l1_size;
48
        if (new_l1_size == 0) {
49
            new_l1_size = 1;
50
        }
51
        while (min_size > new_l1_size) {
52
            new_l1_size = (new_l1_size * 3 + 1) / 2;
53
        }
54
    }
55

    
56
#ifdef DEBUG_ALLOC2
57
    fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
58
#endif
59

    
60
    new_l1_size2 = sizeof(uint64_t) * new_l1_size;
61
    new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
62
    memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
63

    
64
    /* write new table (align to cluster) */
65
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
66
    new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
67
    if (new_l1_table_offset < 0) {
68
        g_free(new_l1_table);
69
        return new_l1_table_offset;
70
    }
71

    
72
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
73
    if (ret < 0) {
74
        goto fail;
75
    }
76

    
77
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
78
    for(i = 0; i < s->l1_size; i++)
79
        new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
80
    ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
81
    if (ret < 0)
82
        goto fail;
83
    for(i = 0; i < s->l1_size; i++)
84
        new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
85

    
86
    /* set new table */
87
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
88
    cpu_to_be32w((uint32_t*)data, new_l1_size);
89
    cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
90
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
91
    if (ret < 0) {
92
        goto fail;
93
    }
94
    g_free(s->l1_table);
95
    qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
96
    s->l1_table_offset = new_l1_table_offset;
97
    s->l1_table = new_l1_table;
98
    s->l1_size = new_l1_size;
99
    return 0;
100
 fail:
101
    g_free(new_l1_table);
102
    qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
103
    return ret;
104
}
105

    
106
/*
107
 * l2_load
108
 *
109
 * Loads a L2 table into memory. If the table is in the cache, the cache
110
 * is used; otherwise the L2 table is loaded from the image file.
111
 *
112
 * Returns a pointer to the L2 table on success, or NULL if the read from
113
 * the image file failed.
114
 */
115

    
116
static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
117
    uint64_t **l2_table)
118
{
119
    BDRVQcowState *s = bs->opaque;
120
    int ret;
121

    
122
    ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
123

    
124
    return ret;
125
}
126

    
127
/*
128
 * Writes one sector of the L1 table to the disk (can't update single entries
129
 * and we really don't want bdrv_pread to perform a read-modify-write)
130
 */
131
#define L1_ENTRIES_PER_SECTOR (512 / 8)
132
static int write_l1_entry(BlockDriverState *bs, int l1_index)
133
{
134
    BDRVQcowState *s = bs->opaque;
135
    uint64_t buf[L1_ENTRIES_PER_SECTOR];
136
    int l1_start_index;
137
    int i, ret;
138

    
139
    l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
140
    for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
141
        buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
142
    }
143

    
144
    BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
145
    ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
146
        buf, sizeof(buf));
147
    if (ret < 0) {
148
        return ret;
149
    }
150

    
151
    return 0;
152
}
153

    
154
/*
155
 * l2_allocate
156
 *
157
 * Allocate a new l2 entry in the file. If l1_index points to an already
158
 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
159
 * table) copy the contents of the old L2 table into the newly allocated one.
160
 * Otherwise the new table is initialized with zeros.
161
 *
162
 */
163

    
164
static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
165
{
166
    BDRVQcowState *s = bs->opaque;
167
    uint64_t old_l2_offset;
168
    uint64_t *l2_table;
169
    int64_t l2_offset;
170
    int ret;
171

    
172
    old_l2_offset = s->l1_table[l1_index];
173

    
174
    trace_qcow2_l2_allocate(bs, l1_index);
175

    
176
    /* allocate a new l2 entry */
177

    
178
    l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
179
    if (l2_offset < 0) {
180
        return l2_offset;
181
    }
182

    
183
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
184
    if (ret < 0) {
185
        goto fail;
186
    }
187

    
188
    /* allocate a new entry in the l2 cache */
189

    
190
    trace_qcow2_l2_allocate_get_empty(bs, l1_index);
191
    ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
192
    if (ret < 0) {
193
        return ret;
194
    }
195

    
196
    l2_table = *table;
197

    
198
    if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
199
        /* if there was no old l2 table, clear the new table */
200
        memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
201
    } else {
202
        uint64_t* old_table;
203

    
204
        /* if there was an old l2 table, read it from the disk */
205
        BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
206
        ret = qcow2_cache_get(bs, s->l2_table_cache,
207
            old_l2_offset & L1E_OFFSET_MASK,
208
            (void**) &old_table);
209
        if (ret < 0) {
210
            goto fail;
211
        }
212

    
213
        memcpy(l2_table, old_table, s->cluster_size);
214

    
215
        ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
216
        if (ret < 0) {
217
            goto fail;
218
        }
219
    }
220

    
221
    /* write the l2 table to the file */
222
    BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
223

    
224
    trace_qcow2_l2_allocate_write_l2(bs, l1_index);
225
    qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
226
    ret = qcow2_cache_flush(bs, s->l2_table_cache);
227
    if (ret < 0) {
228
        goto fail;
229
    }
230

    
231
    /* update the L1 entry */
232
    trace_qcow2_l2_allocate_write_l1(bs, l1_index);
233
    s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
234
    ret = write_l1_entry(bs, l1_index);
235
    if (ret < 0) {
236
        goto fail;
237
    }
238

    
239
    *table = l2_table;
240
    trace_qcow2_l2_allocate_done(bs, l1_index, 0);
241
    return 0;
242

    
243
fail:
244
    trace_qcow2_l2_allocate_done(bs, l1_index, ret);
245
    qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
246
    s->l1_table[l1_index] = old_l2_offset;
247
    return ret;
248
}
249

    
250
/*
251
 * Checks how many clusters in a given L2 table are contiguous in the image
252
 * file. As soon as one of the flags in the bitmask stop_flags changes compared
253
 * to the first cluster, the search is stopped and the cluster is not counted
254
 * as contiguous. (This allows it, for example, to stop at the first compressed
255
 * cluster which may require a different handling)
256
 */
257
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
258
        uint64_t *l2_table, uint64_t start, uint64_t stop_flags)
259
{
260
    int i;
261
    uint64_t mask = stop_flags | L2E_OFFSET_MASK;
262
    uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
263

    
264
    if (!offset)
265
        return 0;
266

    
267
    for (i = start; i < start + nb_clusters; i++) {
268
        uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
269
        if (offset + (uint64_t) i * cluster_size != l2_entry) {
270
            break;
271
        }
272
    }
273

    
274
        return (i - start);
275
}
276

    
277
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
278
{
279
    int i;
280

    
281
    for (i = 0; i < nb_clusters; i++) {
282
        int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
283

    
284
        if (type != QCOW2_CLUSTER_UNALLOCATED) {
285
            break;
286
        }
287
    }
288

    
289
    return i;
290
}
291

    
292
/* The crypt function is compatible with the linux cryptoloop
293
   algorithm for < 4 GB images. NOTE: out_buf == in_buf is
294
   supported */
295
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
296
                           uint8_t *out_buf, const uint8_t *in_buf,
297
                           int nb_sectors, int enc,
298
                           const AES_KEY *key)
299
{
300
    union {
301
        uint64_t ll[2];
302
        uint8_t b[16];
303
    } ivec;
304
    int i;
305

    
306
    for(i = 0; i < nb_sectors; i++) {
307
        ivec.ll[0] = cpu_to_le64(sector_num);
308
        ivec.ll[1] = 0;
309
        AES_cbc_encrypt(in_buf, out_buf, 512, key,
310
                        ivec.b, enc);
311
        sector_num++;
312
        in_buf += 512;
313
        out_buf += 512;
314
    }
315
}
316

    
317
static int coroutine_fn copy_sectors(BlockDriverState *bs,
318
                                     uint64_t start_sect,
319
                                     uint64_t cluster_offset,
320
                                     int n_start, int n_end)
321
{
322
    BDRVQcowState *s = bs->opaque;
323
    QEMUIOVector qiov;
324
    struct iovec iov;
325
    int n, ret;
326

    
327
    /*
328
     * If this is the last cluster and it is only partially used, we must only
329
     * copy until the end of the image, or bdrv_check_request will fail for the
330
     * bdrv_read/write calls below.
331
     */
332
    if (start_sect + n_end > bs->total_sectors) {
333
        n_end = bs->total_sectors - start_sect;
334
    }
335

    
336
    n = n_end - n_start;
337
    if (n <= 0) {
338
        return 0;
339
    }
340

    
341
    iov.iov_len = n * BDRV_SECTOR_SIZE;
342
    iov.iov_base = qemu_blockalign(bs, iov.iov_len);
343

    
344
    qemu_iovec_init_external(&qiov, &iov, 1);
345

    
346
    BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
347

    
348
    /* Call .bdrv_co_readv() directly instead of using the public block-layer
349
     * interface.  This avoids double I/O throttling and request tracking,
350
     * which can lead to deadlock when block layer copy-on-read is enabled.
351
     */
352
    ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
353
    if (ret < 0) {
354
        goto out;
355
    }
356

    
357
    if (s->crypt_method) {
358
        qcow2_encrypt_sectors(s, start_sect + n_start,
359
                        iov.iov_base, iov.iov_base, n, 1,
360
                        &s->aes_encrypt_key);
361
    }
362

    
363
    BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
364
    ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
365
    if (ret < 0) {
366
        goto out;
367
    }
368

    
369
    ret = 0;
370
out:
371
    qemu_vfree(iov.iov_base);
372
    return ret;
373
}
374

    
375

    
376
/*
377
 * get_cluster_offset
378
 *
379
 * For a given offset of the disk image, find the cluster offset in
380
 * qcow2 file. The offset is stored in *cluster_offset.
381
 *
382
 * on entry, *num is the number of contiguous sectors we'd like to
383
 * access following offset.
384
 *
385
 * on exit, *num is the number of contiguous sectors we can read.
386
 *
387
 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
388
 * cases.
389
 */
390
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
391
    int *num, uint64_t *cluster_offset)
392
{
393
    BDRVQcowState *s = bs->opaque;
394
    unsigned int l1_index, l2_index;
395
    uint64_t l2_offset, *l2_table;
396
    int l1_bits, c;
397
    unsigned int index_in_cluster, nb_clusters;
398
    uint64_t nb_available, nb_needed;
399
    int ret;
400

    
401
    index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
402
    nb_needed = *num + index_in_cluster;
403

    
404
    l1_bits = s->l2_bits + s->cluster_bits;
405

    
406
    /* compute how many bytes there are between the offset and
407
     * the end of the l1 entry
408
     */
409

    
410
    nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
411

    
412
    /* compute the number of available sectors */
413

    
414
    nb_available = (nb_available >> 9) + index_in_cluster;
415

    
416
    if (nb_needed > nb_available) {
417
        nb_needed = nb_available;
418
    }
419

    
420
    *cluster_offset = 0;
421

    
422
    /* seek the the l2 offset in the l1 table */
423

    
424
    l1_index = offset >> l1_bits;
425
    if (l1_index >= s->l1_size) {
426
        ret = QCOW2_CLUSTER_UNALLOCATED;
427
        goto out;
428
    }
429

    
430
    l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
431
    if (!l2_offset) {
432
        ret = QCOW2_CLUSTER_UNALLOCATED;
433
        goto out;
434
    }
435

    
436
    /* load the l2 table in memory */
437

    
438
    ret = l2_load(bs, l2_offset, &l2_table);
439
    if (ret < 0) {
440
        return ret;
441
    }
442

    
443
    /* find the cluster offset for the given disk offset */
444

    
445
    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
446
    *cluster_offset = be64_to_cpu(l2_table[l2_index]);
447
    nb_clusters = size_to_clusters(s, nb_needed << 9);
448

    
449
    ret = qcow2_get_cluster_type(*cluster_offset);
450
    switch (ret) {
451
    case QCOW2_CLUSTER_COMPRESSED:
452
        /* Compressed clusters can only be processed one by one */
453
        c = 1;
454
        *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
455
        break;
456
    case QCOW2_CLUSTER_ZERO:
457
        c = count_contiguous_clusters(nb_clusters, s->cluster_size,
458
                &l2_table[l2_index], 0,
459
                QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
460
        *cluster_offset = 0;
461
        break;
462
    case QCOW2_CLUSTER_UNALLOCATED:
463
        /* how many empty clusters ? */
464
        c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
465
        *cluster_offset = 0;
466
        break;
467
    case QCOW2_CLUSTER_NORMAL:
468
        /* how many allocated clusters ? */
469
        c = count_contiguous_clusters(nb_clusters, s->cluster_size,
470
                &l2_table[l2_index], 0,
471
                QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
472
        *cluster_offset &= L2E_OFFSET_MASK;
473
        break;
474
    default:
475
        abort();
476
    }
477

    
478
    qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
479

    
480
    nb_available = (c * s->cluster_sectors);
481

    
482
out:
483
    if (nb_available > nb_needed)
484
        nb_available = nb_needed;
485

    
486
    *num = nb_available - index_in_cluster;
487

    
488
    return ret;
489
}
490

    
491
/*
492
 * get_cluster_table
493
 *
494
 * for a given disk offset, load (and allocate if needed)
495
 * the l2 table.
496
 *
497
 * the l2 table offset in the qcow2 file and the cluster index
498
 * in the l2 table are given to the caller.
499
 *
500
 * Returns 0 on success, -errno in failure case
501
 */
502
static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
503
                             uint64_t **new_l2_table,
504
                             int *new_l2_index)
505
{
506
    BDRVQcowState *s = bs->opaque;
507
    unsigned int l1_index, l2_index;
508
    uint64_t l2_offset;
509
    uint64_t *l2_table = NULL;
510
    int ret;
511

    
512
    /* seek the the l2 offset in the l1 table */
513

    
514
    l1_index = offset >> (s->l2_bits + s->cluster_bits);
515
    if (l1_index >= s->l1_size) {
516
        ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
517
        if (ret < 0) {
518
            return ret;
519
        }
520
    }
521

    
522
    l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
523

    
524
    /* seek the l2 table of the given l2 offset */
525

    
526
    if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
527
        /* load the l2 table in memory */
528
        ret = l2_load(bs, l2_offset, &l2_table);
529
        if (ret < 0) {
530
            return ret;
531
        }
532
    } else {
533
        /* First allocate a new L2 table (and do COW if needed) */
534
        ret = l2_allocate(bs, l1_index, &l2_table);
535
        if (ret < 0) {
536
            return ret;
537
        }
538

    
539
        /* Then decrease the refcount of the old table */
540
        if (l2_offset) {
541
            qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
542
        }
543
    }
544

    
545
    /* find the cluster offset for the given disk offset */
546

    
547
    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
548

    
549
    *new_l2_table = l2_table;
550
    *new_l2_index = l2_index;
551

    
552
    return 0;
553
}
554

    
555
/*
556
 * alloc_compressed_cluster_offset
557
 *
558
 * For a given offset of the disk image, return cluster offset in
559
 * qcow2 file.
560
 *
561
 * If the offset is not found, allocate a new compressed cluster.
562
 *
563
 * Return the cluster offset if successful,
564
 * Return 0, otherwise.
565
 *
566
 */
567

    
568
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
569
                                               uint64_t offset,
570
                                               int compressed_size)
571
{
572
    BDRVQcowState *s = bs->opaque;
573
    int l2_index, ret;
574
    uint64_t *l2_table;
575
    int64_t cluster_offset;
576
    int nb_csectors;
577

    
578
    ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
579
    if (ret < 0) {
580
        return 0;
581
    }
582

    
583
    /* Compression can't overwrite anything. Fail if the cluster was already
584
     * allocated. */
585
    cluster_offset = be64_to_cpu(l2_table[l2_index]);
586
    if (cluster_offset & L2E_OFFSET_MASK) {
587
        qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
588
        return 0;
589
    }
590

    
591
    cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
592
    if (cluster_offset < 0) {
593
        qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
594
        return 0;
595
    }
596

    
597
    nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
598
                  (cluster_offset >> 9);
599

    
600
    cluster_offset |= QCOW_OFLAG_COMPRESSED |
601
                      ((uint64_t)nb_csectors << s->csize_shift);
602

    
603
    /* update L2 table */
604

    
605
    /* compressed clusters never have the copied flag */
606

    
607
    BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
608
    qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
609
    l2_table[l2_index] = cpu_to_be64(cluster_offset);
610
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
611
    if (ret < 0) {
612
        return 0;
613
    }
614

    
615
    return cluster_offset;
616
}
617

    
618
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
619
{
620
    BDRVQcowState *s = bs->opaque;
621
    int i, j = 0, l2_index, ret;
622
    uint64_t *old_cluster, start_sect, *l2_table;
623
    uint64_t cluster_offset = m->alloc_offset;
624
    bool cow = false;
625

    
626
    trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
627

    
628
    if (m->nb_clusters == 0)
629
        return 0;
630

    
631
    old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
632

    
633
    /* copy content of unmodified sectors */
634
    start_sect = m->offset >> 9;
635
    if (m->n_start) {
636
        cow = true;
637
        qemu_co_mutex_unlock(&s->lock);
638
        ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
639
        qemu_co_mutex_lock(&s->lock);
640
        if (ret < 0)
641
            goto err;
642
    }
643

    
644
    if (m->nb_available & (s->cluster_sectors - 1)) {
645
        cow = true;
646
        qemu_co_mutex_unlock(&s->lock);
647
        ret = copy_sectors(bs, start_sect, cluster_offset, m->nb_available,
648
                           align_offset(m->nb_available, s->cluster_sectors));
649
        qemu_co_mutex_lock(&s->lock);
650
        if (ret < 0)
651
            goto err;
652
    }
653

    
654
    /*
655
     * Update L2 table.
656
     *
657
     * Before we update the L2 table to actually point to the new cluster, we
658
     * need to be sure that the refcounts have been increased and COW was
659
     * handled.
660
     */
661
    if (cow) {
662
        qcow2_cache_depends_on_flush(s->l2_table_cache);
663
    }
664

    
665
    if (qcow2_need_accurate_refcounts(s)) {
666
        qcow2_cache_set_dependency(bs, s->l2_table_cache,
667
                                   s->refcount_block_cache);
668
    }
669
    ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
670
    if (ret < 0) {
671
        goto err;
672
    }
673
    qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
674

    
675
    for (i = 0; i < m->nb_clusters; i++) {
676
        /* if two concurrent writes happen to the same unallocated cluster
677
         * each write allocates separate cluster and writes data concurrently.
678
         * The first one to complete updates l2 table with pointer to its
679
         * cluster the second one has to do RMW (which is done above by
680
         * copy_sectors()), update l2 table with its cluster pointer and free
681
         * old cluster. This is what this loop does */
682
        if(l2_table[l2_index + i] != 0)
683
            old_cluster[j++] = l2_table[l2_index + i];
684

    
685
        l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
686
                    (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
687
     }
688

    
689

    
690
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
691
    if (ret < 0) {
692
        goto err;
693
    }
694

    
695
    /*
696
     * If this was a COW, we need to decrease the refcount of the old cluster.
697
     * Also flush bs->file to get the right order for L2 and refcount update.
698
     */
699
    if (j != 0) {
700
        for (i = 0; i < j; i++) {
701
            qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
702
        }
703
    }
704

    
705
    ret = 0;
706
err:
707
    g_free(old_cluster);
708
    return ret;
709
 }
710

    
711
/*
712
 * Returns the number of contiguous clusters that can be used for an allocating
713
 * write, but require COW to be performed (this includes yet unallocated space,
714
 * which must copy from the backing file)
715
 */
716
static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
717
    uint64_t *l2_table, int l2_index)
718
{
719
    int i;
720

    
721
    for (i = 0; i < nb_clusters; i++) {
722
        uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
723
        int cluster_type = qcow2_get_cluster_type(l2_entry);
724

    
725
        switch(cluster_type) {
726
        case QCOW2_CLUSTER_NORMAL:
727
            if (l2_entry & QCOW_OFLAG_COPIED) {
728
                goto out;
729
            }
730
            break;
731
        case QCOW2_CLUSTER_UNALLOCATED:
732
        case QCOW2_CLUSTER_COMPRESSED:
733
        case QCOW2_CLUSTER_ZERO:
734
            break;
735
        default:
736
            abort();
737
        }
738
    }
739

    
740
out:
741
    assert(i <= nb_clusters);
742
    return i;
743
}
744

    
745
/*
746
 * Allocates new clusters for the given guest_offset.
747
 *
748
 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
749
 * contain the number of clusters that have been allocated and are contiguous
750
 * in the image file.
751
 *
752
 * If *host_offset is non-zero, it specifies the offset in the image file at
753
 * which the new clusters must start. *nb_clusters can be 0 on return in this
754
 * case if the cluster at host_offset is already in use. If *host_offset is
755
 * zero, the clusters can be allocated anywhere in the image file.
756
 *
757
 * *host_offset is updated to contain the offset into the image file at which
758
 * the first allocated cluster starts.
759
 *
760
 * Return 0 on success and -errno in error cases. -EAGAIN means that the
761
 * function has been waiting for another request and the allocation must be
762
 * restarted, but the whole request should not be failed.
763
 */
764
static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
765
    uint64_t *host_offset, unsigned int *nb_clusters)
766
{
767
    BDRVQcowState *s = bs->opaque;
768
    QCowL2Meta *old_alloc;
769

    
770
    trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
771
                                         *host_offset, *nb_clusters);
772

    
773
    /*
774
     * Check if there already is an AIO write request in flight which allocates
775
     * the same cluster. In this case we need to wait until the previous
776
     * request has completed and updated the L2 table accordingly.
777
     */
778
    QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
779

    
780
        uint64_t start = guest_offset >> s->cluster_bits;
781
        uint64_t end = start + *nb_clusters;
782
        uint64_t old_start = old_alloc->offset >> s->cluster_bits;
783
        uint64_t old_end = old_start + old_alloc->nb_clusters;
784

    
785
        if (end < old_start || start > old_end) {
786
            /* No intersection */
787
        } else {
788
            if (start < old_start) {
789
                /* Stop at the start of a running allocation */
790
                *nb_clusters = old_start - start;
791
            } else {
792
                *nb_clusters = 0;
793
            }
794

    
795
            if (*nb_clusters == 0) {
796
                /* Wait for the dependency to complete. We need to recheck
797
                 * the free/allocated clusters when we continue. */
798
                qemu_co_mutex_unlock(&s->lock);
799
                qemu_co_queue_wait(&old_alloc->dependent_requests);
800
                qemu_co_mutex_lock(&s->lock);
801
                return -EAGAIN;
802
            }
803
        }
804
    }
805

    
806
    if (!*nb_clusters) {
807
        abort();
808
    }
809

    
810
    /* Allocate new clusters */
811
    trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
812
    if (*host_offset == 0) {
813
        int64_t cluster_offset =
814
            qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
815
        if (cluster_offset < 0) {
816
            return cluster_offset;
817
        }
818
        *host_offset = cluster_offset;
819
        return 0;
820
    } else {
821
        int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
822
        if (ret < 0) {
823
            return ret;
824
        }
825
        *nb_clusters = ret;
826
        return 0;
827
    }
828
}
829

    
830
/*
831
 * alloc_cluster_offset
832
 *
833
 * For a given offset on the virtual disk, find the cluster offset in qcow2
834
 * file. If the offset is not found, allocate a new cluster.
835
 *
836
 * If the cluster was already allocated, m->nb_clusters is set to 0 and
837
 * other fields in m are meaningless.
838
 *
839
 * If the cluster is newly allocated, m->nb_clusters is set to the number of
840
 * contiguous clusters that have been allocated. In this case, the other
841
 * fields of m are valid and contain information about the first allocated
842
 * cluster.
843
 *
844
 * If the request conflicts with another write request in flight, the coroutine
845
 * is queued and will be reentered when the dependency has completed.
846
 *
847
 * Return 0 on success and -errno in error cases
848
 */
849
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
850
    int n_start, int n_end, int *num, QCowL2Meta *m)
851
{
852
    BDRVQcowState *s = bs->opaque;
853
    int l2_index, ret, sectors;
854
    uint64_t *l2_table;
855
    unsigned int nb_clusters, keep_clusters;
856
    uint64_t cluster_offset;
857

    
858
    trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
859
                                      n_start, n_end);
860

    
861
    /* Find L2 entry for the first involved cluster */
862
again:
863
    ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
864
    if (ret < 0) {
865
        return ret;
866
    }
867

    
868
    /*
869
     * Calculate the number of clusters to look for. We stop at L2 table
870
     * boundaries to keep things simple.
871
     */
872
    nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS),
873
                      s->l2_size - l2_index);
874

    
875
    cluster_offset = be64_to_cpu(l2_table[l2_index]);
876

    
877
    /*
878
     * Check how many clusters are already allocated and don't need COW, and how
879
     * many need a new allocation.
880
     */
881
    if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
882
        && (cluster_offset & QCOW_OFLAG_COPIED))
883
    {
884
        /* We keep all QCOW_OFLAG_COPIED clusters */
885
        keep_clusters =
886
            count_contiguous_clusters(nb_clusters, s->cluster_size,
887
                                      &l2_table[l2_index], 0,
888
                                      QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
889
        assert(keep_clusters <= nb_clusters);
890
        nb_clusters -= keep_clusters;
891
    } else {
892
        keep_clusters = 0;
893
        cluster_offset = 0;
894
    }
895

    
896
    if (nb_clusters > 0) {
897
        /* For the moment, overwrite compressed clusters one by one */
898
        uint64_t entry = be64_to_cpu(l2_table[l2_index + keep_clusters]);
899
        if (entry & QCOW_OFLAG_COMPRESSED) {
900
            nb_clusters = 1;
901
        } else {
902
            nb_clusters = count_cow_clusters(s, nb_clusters, l2_table,
903
                                             l2_index + keep_clusters);
904
        }
905
    }
906

    
907
    cluster_offset &= L2E_OFFSET_MASK;
908

    
909
    /*
910
     * The L2 table isn't used any more after this. As long as the cache works
911
     * synchronously, it's important to release it before calling
912
     * do_alloc_cluster_offset, which may yield if we need to wait for another
913
     * request to complete. If we still had the reference, we could use up the
914
     * whole cache with sleeping requests.
915
     */
916
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
917
    if (ret < 0) {
918
        return ret;
919
    }
920

    
921
    /* If there is something left to allocate, do that now */
922
    *m = (QCowL2Meta) {
923
        .cluster_offset     = cluster_offset,
924
        .nb_clusters        = 0,
925
    };
926
    qemu_co_queue_init(&m->dependent_requests);
927

    
928
    if (nb_clusters > 0) {
929
        uint64_t alloc_offset;
930
        uint64_t alloc_cluster_offset;
931
        uint64_t keep_bytes = keep_clusters * s->cluster_size;
932

    
933
        /* Calculate start and size of allocation */
934
        alloc_offset = offset + keep_bytes;
935

    
936
        if (keep_clusters == 0) {
937
            alloc_cluster_offset = 0;
938
        } else {
939
            alloc_cluster_offset = cluster_offset + keep_bytes;
940
        }
941

    
942
        /* Allocate, if necessary at a given offset in the image file */
943
        ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
944
                                      &nb_clusters);
945
        if (ret == -EAGAIN) {
946
            goto again;
947
        } else if (ret < 0) {
948
            goto fail;
949
        }
950

    
951
        /* save info needed for meta data update */
952
        if (nb_clusters > 0) {
953
            /*
954
             * requested_sectors: Number of sectors from the start of the first
955
             * newly allocated cluster to the end of the (possibly shortened
956
             * before) write request.
957
             *
958
             * avail_sectors: Number of sectors from the start of the first
959
             * newly allocated to the end of the last newly allocated cluster.
960
             */
961
            int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
962
            int avail_sectors = nb_clusters
963
                                << (s->cluster_bits - BDRV_SECTOR_BITS);
964

    
965
            *m = (QCowL2Meta) {
966
                .cluster_offset = keep_clusters == 0 ?
967
                                  alloc_cluster_offset : cluster_offset,
968
                .alloc_offset   = alloc_cluster_offset,
969
                .offset         = alloc_offset & ~(s->cluster_size - 1),
970
                .n_start        = keep_clusters == 0 ? n_start : 0,
971
                .nb_clusters    = nb_clusters,
972
                .nb_available   = MIN(requested_sectors, avail_sectors),
973
            };
974
            qemu_co_queue_init(&m->dependent_requests);
975
            QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
976
        }
977
    }
978

    
979
    /* Some cleanup work */
980
    sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
981
    if (sectors > n_end) {
982
        sectors = n_end;
983
    }
984

    
985
    assert(sectors > n_start);
986
    *num = sectors - n_start;
987

    
988
    return 0;
989

    
990
fail:
991
    if (m->nb_clusters > 0) {
992
        QLIST_REMOVE(m, next_in_flight);
993
    }
994
    return ret;
995
}
996

    
997
static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
998
                             const uint8_t *buf, int buf_size)
999
{
1000
    z_stream strm1, *strm = &strm1;
1001
    int ret, out_len;
1002

    
1003
    memset(strm, 0, sizeof(*strm));
1004

    
1005
    strm->next_in = (uint8_t *)buf;
1006
    strm->avail_in = buf_size;
1007
    strm->next_out = out_buf;
1008
    strm->avail_out = out_buf_size;
1009

    
1010
    ret = inflateInit2(strm, -12);
1011
    if (ret != Z_OK)
1012
        return -1;
1013
    ret = inflate(strm, Z_FINISH);
1014
    out_len = strm->next_out - out_buf;
1015
    if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1016
        out_len != out_buf_size) {
1017
        inflateEnd(strm);
1018
        return -1;
1019
    }
1020
    inflateEnd(strm);
1021
    return 0;
1022
}
1023

    
1024
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1025
{
1026
    BDRVQcowState *s = bs->opaque;
1027
    int ret, csize, nb_csectors, sector_offset;
1028
    uint64_t coffset;
1029

    
1030
    coffset = cluster_offset & s->cluster_offset_mask;
1031
    if (s->cluster_cache_offset != coffset) {
1032
        nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1033
        sector_offset = coffset & 511;
1034
        csize = nb_csectors * 512 - sector_offset;
1035
        BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1036
        ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1037
        if (ret < 0) {
1038
            return ret;
1039
        }
1040
        if (decompress_buffer(s->cluster_cache, s->cluster_size,
1041
                              s->cluster_data + sector_offset, csize) < 0) {
1042
            return -EIO;
1043
        }
1044
        s->cluster_cache_offset = coffset;
1045
    }
1046
    return 0;
1047
}
1048

    
1049
/*
1050
 * This discards as many clusters of nb_clusters as possible at once (i.e.
1051
 * all clusters in the same L2 table) and returns the number of discarded
1052
 * clusters.
1053
 */
1054
static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1055
    unsigned int nb_clusters)
1056
{
1057
    BDRVQcowState *s = bs->opaque;
1058
    uint64_t *l2_table;
1059
    int l2_index;
1060
    int ret;
1061
    int i;
1062

    
1063
    ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1064
    if (ret < 0) {
1065
        return ret;
1066
    }
1067

    
1068
    /* Limit nb_clusters to one L2 table */
1069
    nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1070

    
1071
    for (i = 0; i < nb_clusters; i++) {
1072
        uint64_t old_offset;
1073

    
1074
        old_offset = be64_to_cpu(l2_table[l2_index + i]);
1075
        if ((old_offset & L2E_OFFSET_MASK) == 0) {
1076
            continue;
1077
        }
1078

    
1079
        /* First remove L2 entries */
1080
        qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1081
        l2_table[l2_index + i] = cpu_to_be64(0);
1082

    
1083
        /* Then decrease the refcount */
1084
        qcow2_free_any_clusters(bs, old_offset, 1);
1085
    }
1086

    
1087
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1088
    if (ret < 0) {
1089
        return ret;
1090
    }
1091

    
1092
    return nb_clusters;
1093
}
1094

    
1095
int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1096
    int nb_sectors)
1097
{
1098
    BDRVQcowState *s = bs->opaque;
1099
    uint64_t end_offset;
1100
    unsigned int nb_clusters;
1101
    int ret;
1102

    
1103
    end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1104

    
1105
    /* Round start up and end down */
1106
    offset = align_offset(offset, s->cluster_size);
1107
    end_offset &= ~(s->cluster_size - 1);
1108

    
1109
    if (offset > end_offset) {
1110
        return 0;
1111
    }
1112

    
1113
    nb_clusters = size_to_clusters(s, end_offset - offset);
1114

    
1115
    /* Each L2 table is handled by its own loop iteration */
1116
    while (nb_clusters > 0) {
1117
        ret = discard_single_l2(bs, offset, nb_clusters);
1118
        if (ret < 0) {
1119
            return ret;
1120
        }
1121

    
1122
        nb_clusters -= ret;
1123
        offset += (ret * s->cluster_size);
1124
    }
1125

    
1126
    return 0;
1127
}
1128

    
1129
/*
1130
 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1131
 * all clusters in the same L2 table) and returns the number of zeroed
1132
 * clusters.
1133
 */
1134
static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1135
    unsigned int nb_clusters)
1136
{
1137
    BDRVQcowState *s = bs->opaque;
1138
    uint64_t *l2_table;
1139
    int l2_index;
1140
    int ret;
1141
    int i;
1142

    
1143
    ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1144
    if (ret < 0) {
1145
        return ret;
1146
    }
1147

    
1148
    /* Limit nb_clusters to one L2 table */
1149
    nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1150

    
1151
    for (i = 0; i < nb_clusters; i++) {
1152
        uint64_t old_offset;
1153

    
1154
        old_offset = be64_to_cpu(l2_table[l2_index + i]);
1155

    
1156
        /* Update L2 entries */
1157
        qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1158
        if (old_offset & QCOW_OFLAG_COMPRESSED) {
1159
            l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1160
            qcow2_free_any_clusters(bs, old_offset, 1);
1161
        } else {
1162
            l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1163
        }
1164
    }
1165

    
1166
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1167
    if (ret < 0) {
1168
        return ret;
1169
    }
1170

    
1171
    return nb_clusters;
1172
}
1173

    
1174
int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1175
{
1176
    BDRVQcowState *s = bs->opaque;
1177
    unsigned int nb_clusters;
1178
    int ret;
1179

    
1180
    /* The zero flag is only supported by version 3 and newer */
1181
    if (s->qcow_version < 3) {
1182
        return -ENOTSUP;
1183
    }
1184

    
1185
    /* Each L2 table is handled by its own loop iteration */
1186
    nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1187

    
1188
    while (nb_clusters > 0) {
1189
        ret = zero_single_l2(bs, offset, nb_clusters);
1190
        if (ret < 0) {
1191
            return ret;
1192
        }
1193

    
1194
        nb_clusters -= ret;
1195
        offset += (ret * s->cluster_size);
1196
    }
1197

    
1198
    return 0;
1199
}