Statistics
| Branch: | Revision:

root / block / qcow2-cluster.c @ 80fa3341

History | View | Annotate | Download (27.8 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include <zlib.h>
26

    
27
#include "qemu-common.h"
28
#include "block_int.h"
29
#include "block/qcow2.h"
30

    
31
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size)
32
{
33
    BDRVQcowState *s = bs->opaque;
34
    int new_l1_size, new_l1_size2, ret, i;
35
    uint64_t *new_l1_table;
36
    int64_t new_l1_table_offset;
37
    uint8_t data[12];
38

    
39
    if (min_size <= s->l1_size)
40
        return 0;
41

    
42
    if (exact_size) {
43
        new_l1_size = min_size;
44
    } else {
45
        /* Bump size up to reduce the number of times we have to grow */
46
        new_l1_size = s->l1_size;
47
        if (new_l1_size == 0) {
48
            new_l1_size = 1;
49
        }
50
        while (min_size > new_l1_size) {
51
            new_l1_size = (new_l1_size * 3 + 1) / 2;
52
        }
53
    }
54

    
55
#ifdef DEBUG_ALLOC2
56
    printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
57
#endif
58

    
59
    new_l1_size2 = sizeof(uint64_t) * new_l1_size;
60
    new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512));
61
    memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
62

    
63
    /* write new table (align to cluster) */
64
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
65
    new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
66
    if (new_l1_table_offset < 0) {
67
        qemu_free(new_l1_table);
68
        return new_l1_table_offset;
69
    }
70

    
71
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
72
    if (ret < 0) {
73
        goto fail;
74
    }
75

    
76
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
77
    for(i = 0; i < s->l1_size; i++)
78
        new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
79
    ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
80
    if (ret < 0)
81
        goto fail;
82
    for(i = 0; i < s->l1_size; i++)
83
        new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
84

    
85
    /* set new table */
86
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
87
    cpu_to_be32w((uint32_t*)data, new_l1_size);
88
    cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
89
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
90
    if (ret < 0) {
91
        goto fail;
92
    }
93
    qemu_free(s->l1_table);
94
    qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
95
    s->l1_table_offset = new_l1_table_offset;
96
    s->l1_table = new_l1_table;
97
    s->l1_size = new_l1_size;
98
    return 0;
99
 fail:
100
    qemu_free(new_l1_table);
101
    qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
102
    return ret;
103
}
104

    
105
/*
106
 * l2_load
107
 *
108
 * Loads a L2 table into memory. If the table is in the cache, the cache
109
 * is used; otherwise the L2 table is loaded from the image file.
110
 *
111
 * Returns a pointer to the L2 table on success, or NULL if the read from
112
 * the image file failed.
113
 */
114

    
115
static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
116
    uint64_t **l2_table)
117
{
118
    BDRVQcowState *s = bs->opaque;
119
    int ret;
120

    
121
    ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
122

    
123
    return ret;
124
}
125

    
126
/*
127
 * Writes one sector of the L1 table to the disk (can't update single entries
128
 * and we really don't want bdrv_pread to perform a read-modify-write)
129
 */
130
#define L1_ENTRIES_PER_SECTOR (512 / 8)
131
static int write_l1_entry(BlockDriverState *bs, int l1_index)
132
{
133
    BDRVQcowState *s = bs->opaque;
134
    uint64_t buf[L1_ENTRIES_PER_SECTOR];
135
    int l1_start_index;
136
    int i, ret;
137

    
138
    l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
139
    for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
140
        buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
141
    }
142

    
143
    BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
144
    ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
145
        buf, sizeof(buf));
146
    if (ret < 0) {
147
        return ret;
148
    }
149

    
150
    return 0;
151
}
152

    
153
/*
154
 * l2_allocate
155
 *
156
 * Allocate a new l2 entry in the file. If l1_index points to an already
157
 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
158
 * table) copy the contents of the old L2 table into the newly allocated one.
159
 * Otherwise the new table is initialized with zeros.
160
 *
161
 */
162

    
163
static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
164
{
165
    BDRVQcowState *s = bs->opaque;
166
    uint64_t old_l2_offset;
167
    uint64_t *l2_table;
168
    int64_t l2_offset;
169
    int ret;
170

    
171
    old_l2_offset = s->l1_table[l1_index];
172

    
173
    /* allocate a new l2 entry */
174

    
175
    l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
176
    if (l2_offset < 0) {
177
        return l2_offset;
178
    }
179

    
180
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
181
    if (ret < 0) {
182
        goto fail;
183
    }
184

    
185
    /* allocate a new entry in the l2 cache */
186

    
187
    ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
188
    if (ret < 0) {
189
        return ret;
190
    }
191

    
192
    l2_table = *table;
193

    
194
    if (old_l2_offset == 0) {
195
        /* if there was no old l2 table, clear the new table */
196
        memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
197
    } else {
198
        uint64_t* old_table;
199

    
200
        /* if there was an old l2 table, read it from the disk */
201
        BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
202
        ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset,
203
            (void**) &old_table);
204
        if (ret < 0) {
205
            goto fail;
206
        }
207

    
208
        memcpy(l2_table, old_table, s->cluster_size);
209

    
210
        ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
211
        if (ret < 0) {
212
            goto fail;
213
        }
214
    }
215

    
216
    /* write the l2 table to the file */
217
    BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
218

    
219
    qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
220
    ret = qcow2_cache_flush(bs, s->l2_table_cache);
221
    if (ret < 0) {
222
        goto fail;
223
    }
224

    
225
    /* update the L1 entry */
226
    s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
227
    ret = write_l1_entry(bs, l1_index);
228
    if (ret < 0) {
229
        goto fail;
230
    }
231

    
232
    *table = l2_table;
233
    return 0;
234

    
235
fail:
236
    qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
237
    s->l1_table[l1_index] = old_l2_offset;
238
    return ret;
239
}
240

    
241
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
242
        uint64_t *l2_table, uint64_t start, uint64_t mask)
243
{
244
    int i;
245
    uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
246

    
247
    if (!offset)
248
        return 0;
249

    
250
    for (i = start; i < start + nb_clusters; i++)
251
        if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
252
            break;
253

    
254
        return (i - start);
255
}
256

    
257
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
258
{
259
    int i = 0;
260

    
261
    while(nb_clusters-- && l2_table[i] == 0)
262
        i++;
263

    
264
    return i;
265
}
266

    
267
/* The crypt function is compatible with the linux cryptoloop
268
   algorithm for < 4 GB images. NOTE: out_buf == in_buf is
269
   supported */
270
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
271
                           uint8_t *out_buf, const uint8_t *in_buf,
272
                           int nb_sectors, int enc,
273
                           const AES_KEY *key)
274
{
275
    union {
276
        uint64_t ll[2];
277
        uint8_t b[16];
278
    } ivec;
279
    int i;
280

    
281
    for(i = 0; i < nb_sectors; i++) {
282
        ivec.ll[0] = cpu_to_le64(sector_num);
283
        ivec.ll[1] = 0;
284
        AES_cbc_encrypt(in_buf, out_buf, 512, key,
285
                        ivec.b, enc);
286
        sector_num++;
287
        in_buf += 512;
288
        out_buf += 512;
289
    }
290
}
291

    
292

    
293
static int qcow2_read(BlockDriverState *bs, int64_t sector_num,
294
                      uint8_t *buf, int nb_sectors)
295
{
296
    BDRVQcowState *s = bs->opaque;
297
    int ret, index_in_cluster, n, n1;
298
    uint64_t cluster_offset;
299
    struct iovec iov;
300
    QEMUIOVector qiov;
301

    
302
    while (nb_sectors > 0) {
303
        n = nb_sectors;
304

    
305
        ret = qcow2_get_cluster_offset(bs, sector_num << 9, &n,
306
            &cluster_offset);
307
        if (ret < 0) {
308
            return ret;
309
        }
310

    
311
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
312
        if (!cluster_offset) {
313
            if (bs->backing_hd) {
314
                /* read from the base image */
315
                iov.iov_base = buf;
316
                iov.iov_len = n * 512;
317
                qemu_iovec_init_external(&qiov, &iov, 1);
318

    
319
                n1 = qcow2_backing_read1(bs->backing_hd, &qiov, sector_num, n);
320
                if (n1 > 0) {
321
                    BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING);
322
                    ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
323
                    if (ret < 0)
324
                        return -1;
325
                }
326
            } else {
327
                memset(buf, 0, 512 * n);
328
            }
329
        } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
330
            if (qcow2_decompress_cluster(bs, cluster_offset) < 0)
331
                return -1;
332
            memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
333
        } else {
334
            BLKDBG_EVENT(bs->file, BLKDBG_READ);
335
            ret = bdrv_pread(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512);
336
            if (ret != n * 512)
337
                return -1;
338
            if (s->crypt_method) {
339
                qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
340
                                &s->aes_decrypt_key);
341
            }
342
        }
343
        nb_sectors -= n;
344
        sector_num += n;
345
        buf += n * 512;
346
    }
347
    return 0;
348
}
349

    
350
static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
351
                        uint64_t cluster_offset, int n_start, int n_end)
352
{
353
    BDRVQcowState *s = bs->opaque;
354
    int n, ret;
355

    
356
    n = n_end - n_start;
357
    if (n <= 0)
358
        return 0;
359
    BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
360
    ret = qcow2_read(bs, start_sect + n_start, s->cluster_data, n);
361
    if (ret < 0)
362
        return ret;
363
    if (s->crypt_method) {
364
        qcow2_encrypt_sectors(s, start_sect + n_start,
365
                        s->cluster_data,
366
                        s->cluster_data, n, 1,
367
                        &s->aes_encrypt_key);
368
    }
369
    BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
370
    ret = bdrv_write(bs->file, (cluster_offset >> 9) + n_start,
371
        s->cluster_data, n);
372
    if (ret < 0)
373
        return ret;
374
    return 0;
375
}
376

    
377

    
378
/*
379
 * get_cluster_offset
380
 *
381
 * For a given offset of the disk image, find the cluster offset in
382
 * qcow2 file. The offset is stored in *cluster_offset.
383
 *
384
 * on entry, *num is the number of contiguous clusters we'd like to
385
 * access following offset.
386
 *
387
 * on exit, *num is the number of contiguous clusters we can read.
388
 *
389
 * Return 0, if the offset is found
390
 * Return -errno, otherwise.
391
 *
392
 */
393

    
394
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
395
    int *num, uint64_t *cluster_offset)
396
{
397
    BDRVQcowState *s = bs->opaque;
398
    unsigned int l1_index, l2_index;
399
    uint64_t l2_offset, *l2_table;
400
    int l1_bits, c;
401
    unsigned int index_in_cluster, nb_clusters;
402
    uint64_t nb_available, nb_needed;
403
    int ret;
404

    
405
    index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
406
    nb_needed = *num + index_in_cluster;
407

    
408
    l1_bits = s->l2_bits + s->cluster_bits;
409

    
410
    /* compute how many bytes there are between the offset and
411
     * the end of the l1 entry
412
     */
413

    
414
    nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
415

    
416
    /* compute the number of available sectors */
417

    
418
    nb_available = (nb_available >> 9) + index_in_cluster;
419

    
420
    if (nb_needed > nb_available) {
421
        nb_needed = nb_available;
422
    }
423

    
424
    *cluster_offset = 0;
425

    
426
    /* seek the the l2 offset in the l1 table */
427

    
428
    l1_index = offset >> l1_bits;
429
    if (l1_index >= s->l1_size)
430
        goto out;
431

    
432
    l2_offset = s->l1_table[l1_index];
433

    
434
    /* seek the l2 table of the given l2 offset */
435

    
436
    if (!l2_offset)
437
        goto out;
438

    
439
    /* load the l2 table in memory */
440

    
441
    l2_offset &= ~QCOW_OFLAG_COPIED;
442
    ret = l2_load(bs, l2_offset, &l2_table);
443
    if (ret < 0) {
444
        return ret;
445
    }
446

    
447
    /* find the cluster offset for the given disk offset */
448

    
449
    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
450
    *cluster_offset = be64_to_cpu(l2_table[l2_index]);
451
    nb_clusters = size_to_clusters(s, nb_needed << 9);
452

    
453
    if (!*cluster_offset) {
454
        /* how many empty clusters ? */
455
        c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
456
    } else {
457
        /* how many allocated clusters ? */
458
        c = count_contiguous_clusters(nb_clusters, s->cluster_size,
459
                &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
460
    }
461

    
462
    qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
463

    
464
   nb_available = (c * s->cluster_sectors);
465
out:
466
    if (nb_available > nb_needed)
467
        nb_available = nb_needed;
468

    
469
    *num = nb_available - index_in_cluster;
470

    
471
    *cluster_offset &=~QCOW_OFLAG_COPIED;
472
    return 0;
473
}
474

    
475
/*
476
 * get_cluster_table
477
 *
478
 * for a given disk offset, load (and allocate if needed)
479
 * the l2 table.
480
 *
481
 * the l2 table offset in the qcow2 file and the cluster index
482
 * in the l2 table are given to the caller.
483
 *
484
 * Returns 0 on success, -errno in failure case
485
 */
486
static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
487
                             uint64_t **new_l2_table,
488
                             uint64_t *new_l2_offset,
489
                             int *new_l2_index)
490
{
491
    BDRVQcowState *s = bs->opaque;
492
    unsigned int l1_index, l2_index;
493
    uint64_t l2_offset;
494
    uint64_t *l2_table = NULL;
495
    int ret;
496

    
497
    /* seek the the l2 offset in the l1 table */
498

    
499
    l1_index = offset >> (s->l2_bits + s->cluster_bits);
500
    if (l1_index >= s->l1_size) {
501
        ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
502
        if (ret < 0) {
503
            return ret;
504
        }
505
    }
506
    l2_offset = s->l1_table[l1_index];
507

    
508
    /* seek the l2 table of the given l2 offset */
509

    
510
    if (l2_offset & QCOW_OFLAG_COPIED) {
511
        /* load the l2 table in memory */
512
        l2_offset &= ~QCOW_OFLAG_COPIED;
513
        ret = l2_load(bs, l2_offset, &l2_table);
514
        if (ret < 0) {
515
            return ret;
516
        }
517
    } else {
518
        /* First allocate a new L2 table (and do COW if needed) */
519
        ret = l2_allocate(bs, l1_index, &l2_table);
520
        if (ret < 0) {
521
            return ret;
522
        }
523

    
524
        /* Then decrease the refcount of the old table */
525
        if (l2_offset) {
526
            qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
527
        }
528
        l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
529
    }
530

    
531
    /* find the cluster offset for the given disk offset */
532

    
533
    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
534

    
535
    *new_l2_table = l2_table;
536
    *new_l2_offset = l2_offset;
537
    *new_l2_index = l2_index;
538

    
539
    return 0;
540
}
541

    
542
/*
543
 * alloc_compressed_cluster_offset
544
 *
545
 * For a given offset of the disk image, return cluster offset in
546
 * qcow2 file.
547
 *
548
 * If the offset is not found, allocate a new compressed cluster.
549
 *
550
 * Return the cluster offset if successful,
551
 * Return 0, otherwise.
552
 *
553
 */
554

    
555
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
556
                                               uint64_t offset,
557
                                               int compressed_size)
558
{
559
    BDRVQcowState *s = bs->opaque;
560
    int l2_index, ret;
561
    uint64_t l2_offset, *l2_table;
562
    int64_t cluster_offset;
563
    int nb_csectors;
564

    
565
    ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
566
    if (ret < 0) {
567
        return 0;
568
    }
569

    
570
    cluster_offset = be64_to_cpu(l2_table[l2_index]);
571
    if (cluster_offset & QCOW_OFLAG_COPIED)
572
        return cluster_offset & ~QCOW_OFLAG_COPIED;
573

    
574
    if (cluster_offset)
575
        qcow2_free_any_clusters(bs, cluster_offset, 1);
576

    
577
    cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
578
    if (cluster_offset < 0) {
579
        qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
580
        return 0;
581
    }
582

    
583
    nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
584
                  (cluster_offset >> 9);
585

    
586
    cluster_offset |= QCOW_OFLAG_COMPRESSED |
587
                      ((uint64_t)nb_csectors << s->csize_shift);
588

    
589
    /* update L2 table */
590

    
591
    /* compressed clusters never have the copied flag */
592

    
593
    BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
594
    qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
595
    l2_table[l2_index] = cpu_to_be64(cluster_offset);
596
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
597
    if (ret < 0) {
598
        return 0;
599
    }
600

    
601
    return cluster_offset;
602
}
603

    
604
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
605
{
606
    BDRVQcowState *s = bs->opaque;
607
    int i, j = 0, l2_index, ret;
608
    uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
609
    uint64_t cluster_offset = m->cluster_offset;
610
    bool cow = false;
611

    
612
    if (m->nb_clusters == 0)
613
        return 0;
614

    
615
    old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
616

    
617
    /* copy content of unmodified sectors */
618
    start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
619
    if (m->n_start) {
620
        cow = true;
621
        ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
622
        if (ret < 0)
623
            goto err;
624
    }
625

    
626
    if (m->nb_available & (s->cluster_sectors - 1)) {
627
        uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
628
        cow = true;
629
        ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
630
                m->nb_available - end, s->cluster_sectors);
631
        if (ret < 0)
632
            goto err;
633
    }
634

    
635
    /*
636
     * Update L2 table.
637
     *
638
     * Before we update the L2 table to actually point to the new cluster, we
639
     * need to be sure that the refcounts have been increased and COW was
640
     * handled.
641
     */
642
    if (cow) {
643
        qcow2_cache_depends_on_flush(s->l2_table_cache);
644
    }
645

    
646
    qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
647
    ret = get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index);
648
    if (ret < 0) {
649
        goto err;
650
    }
651
    qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
652

    
653
    for (i = 0; i < m->nb_clusters; i++) {
654
        /* if two concurrent writes happen to the same unallocated cluster
655
         * each write allocates separate cluster and writes data concurrently.
656
         * The first one to complete updates l2 table with pointer to its
657
         * cluster the second one has to do RMW (which is done above by
658
         * copy_sectors()), update l2 table with its cluster pointer and free
659
         * old cluster. This is what this loop does */
660
        if(l2_table[l2_index + i] != 0)
661
            old_cluster[j++] = l2_table[l2_index + i];
662

    
663
        l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
664
                    (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
665
     }
666

    
667

    
668
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
669
    if (ret < 0) {
670
        goto err;
671
    }
672

    
673
    /*
674
     * If this was a COW, we need to decrease the refcount of the old cluster.
675
     * Also flush bs->file to get the right order for L2 and refcount update.
676
     */
677
    if (j != 0) {
678
        for (i = 0; i < j; i++) {
679
            qcow2_free_any_clusters(bs,
680
                be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
681
        }
682
    }
683

    
684
    ret = 0;
685
err:
686
    qemu_free(old_cluster);
687
    return ret;
688
 }
689

    
690
/*
691
 * alloc_cluster_offset
692
 *
693
 * For a given offset of the disk image, return cluster offset in qcow2 file.
694
 * If the offset is not found, allocate a new cluster.
695
 *
696
 * If the cluster was already allocated, m->nb_clusters is set to 0,
697
 * m->depends_on is set to NULL and the other fields in m are meaningless.
698
 *
699
 * If the cluster is newly allocated, m->nb_clusters is set to the number of
700
 * contiguous clusters that have been allocated. This may be 0 if the request
701
 * conflict with another write request in flight; in this case, m->depends_on
702
 * is set and the remaining fields of m are meaningless.
703
 *
704
 * If m->nb_clusters is non-zero, the other fields of m are valid and contain
705
 * information about the first allocated cluster.
706
 *
707
 * Return 0 on success and -errno in error cases
708
 */
709
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
710
    int n_start, int n_end, int *num, QCowL2Meta *m)
711
{
712
    BDRVQcowState *s = bs->opaque;
713
    int l2_index, ret;
714
    uint64_t l2_offset, *l2_table;
715
    int64_t cluster_offset;
716
    unsigned int nb_clusters, i = 0;
717
    QCowL2Meta *old_alloc;
718

    
719
    ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
720
    if (ret < 0) {
721
        return ret;
722
    }
723

    
724
    nb_clusters = size_to_clusters(s, n_end << 9);
725

    
726
    nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
727

    
728
    cluster_offset = be64_to_cpu(l2_table[l2_index]);
729

    
730
    /* We keep all QCOW_OFLAG_COPIED clusters */
731

    
732
    if (cluster_offset & QCOW_OFLAG_COPIED) {
733
        nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
734
                &l2_table[l2_index], 0, 0);
735

    
736
        cluster_offset &= ~QCOW_OFLAG_COPIED;
737
        m->nb_clusters = 0;
738
        m->depends_on = NULL;
739

    
740
        goto out;
741
    }
742

    
743
    /* for the moment, multiple compressed clusters are not managed */
744

    
745
    if (cluster_offset & QCOW_OFLAG_COMPRESSED)
746
        nb_clusters = 1;
747

    
748
    /* how many available clusters ? */
749

    
750
    while (i < nb_clusters) {
751
        i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
752
                &l2_table[l2_index], i, 0);
753
        if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) {
754
            break;
755
        }
756

    
757
        i += count_contiguous_free_clusters(nb_clusters - i,
758
                &l2_table[l2_index + i]);
759
        if (i >= nb_clusters) {
760
            break;
761
        }
762

    
763
        cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
764

    
765
        if ((cluster_offset & QCOW_OFLAG_COPIED) ||
766
                (cluster_offset & QCOW_OFLAG_COMPRESSED))
767
            break;
768
    }
769
    assert(i <= nb_clusters);
770
    nb_clusters = i;
771

    
772
    /*
773
     * Check if there already is an AIO write request in flight which allocates
774
     * the same cluster. In this case we need to wait until the previous
775
     * request has completed and updated the L2 table accordingly.
776
     */
777
    QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
778

    
779
        uint64_t end_offset = offset + nb_clusters * s->cluster_size;
780
        uint64_t old_offset = old_alloc->offset;
781
        uint64_t old_end_offset = old_alloc->offset +
782
            old_alloc->nb_clusters * s->cluster_size;
783

    
784
        if (end_offset < old_offset || offset > old_end_offset) {
785
            /* No intersection */
786
        } else {
787
            if (offset < old_offset) {
788
                /* Stop at the start of a running allocation */
789
                nb_clusters = (old_offset - offset) >> s->cluster_bits;
790
            } else {
791
                nb_clusters = 0;
792
            }
793

    
794
            if (nb_clusters == 0) {
795
                /* Set dependency and wait for a callback */
796
                m->depends_on = old_alloc;
797
                m->nb_clusters = 0;
798
                *num = 0;
799
                ret = 0;
800
                goto fail;
801
            }
802
        }
803
    }
804

    
805
    if (!nb_clusters) {
806
        abort();
807
    }
808

    
809
    QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
810

    
811
    /* allocate a new cluster */
812

    
813
    cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
814
    if (cluster_offset < 0) {
815
        QLIST_REMOVE(m, next_in_flight);
816
        ret = cluster_offset;
817
        goto fail;
818
    }
819

    
820
    /* save info needed for meta data update */
821
    m->offset = offset;
822
    m->n_start = n_start;
823
    m->nb_clusters = nb_clusters;
824

    
825
out:
826
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
827
    if (ret < 0) {
828
        return ret;
829
    }
830

    
831
    m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
832
    m->cluster_offset = cluster_offset;
833

    
834
    *num = m->nb_available - n_start;
835

    
836
    return 0;
837

    
838
fail:
839
    qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
840
    return ret;
841
}
842

    
843
static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
844
                             const uint8_t *buf, int buf_size)
845
{
846
    z_stream strm1, *strm = &strm1;
847
    int ret, out_len;
848

    
849
    memset(strm, 0, sizeof(*strm));
850

    
851
    strm->next_in = (uint8_t *)buf;
852
    strm->avail_in = buf_size;
853
    strm->next_out = out_buf;
854
    strm->avail_out = out_buf_size;
855

    
856
    ret = inflateInit2(strm, -12);
857
    if (ret != Z_OK)
858
        return -1;
859
    ret = inflate(strm, Z_FINISH);
860
    out_len = strm->next_out - out_buf;
861
    if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
862
        out_len != out_buf_size) {
863
        inflateEnd(strm);
864
        return -1;
865
    }
866
    inflateEnd(strm);
867
    return 0;
868
}
869

    
870
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
871
{
872
    BDRVQcowState *s = bs->opaque;
873
    int ret, csize, nb_csectors, sector_offset;
874
    uint64_t coffset;
875

    
876
    coffset = cluster_offset & s->cluster_offset_mask;
877
    if (s->cluster_cache_offset != coffset) {
878
        nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
879
        sector_offset = coffset & 511;
880
        csize = nb_csectors * 512 - sector_offset;
881
        BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
882
        ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
883
        if (ret < 0) {
884
            return ret;
885
        }
886
        if (decompress_buffer(s->cluster_cache, s->cluster_size,
887
                              s->cluster_data + sector_offset, csize) < 0) {
888
            return -EIO;
889
        }
890
        s->cluster_cache_offset = coffset;
891
    }
892
    return 0;
893
}
894

    
895
/*
896
 * This discards as many clusters of nb_clusters as possible at once (i.e.
897
 * all clusters in the same L2 table) and returns the number of discarded
898
 * clusters.
899
 */
900
static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
901
    unsigned int nb_clusters)
902
{
903
    BDRVQcowState *s = bs->opaque;
904
    uint64_t l2_offset, *l2_table;
905
    int l2_index;
906
    int ret;
907
    int i;
908

    
909
    ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
910
    if (ret < 0) {
911
        return ret;
912
    }
913

    
914
    /* Limit nb_clusters to one L2 table */
915
    nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
916

    
917
    for (i = 0; i < nb_clusters; i++) {
918
        uint64_t old_offset;
919

    
920
        old_offset = be64_to_cpu(l2_table[l2_index + i]);
921
        old_offset &= ~QCOW_OFLAG_COPIED;
922

    
923
        if (old_offset == 0) {
924
            continue;
925
        }
926

    
927
        /* First remove L2 entries */
928
        qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
929
        l2_table[l2_index + i] = cpu_to_be64(0);
930

    
931
        /* Then decrease the refcount */
932
        qcow2_free_any_clusters(bs, old_offset, 1);
933
    }
934

    
935
    ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
936
    if (ret < 0) {
937
        return ret;
938
    }
939

    
940
    return nb_clusters;
941
}
942

    
943
int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
944
    int nb_sectors)
945
{
946
    BDRVQcowState *s = bs->opaque;
947
    uint64_t end_offset;
948
    unsigned int nb_clusters;
949
    int ret;
950

    
951
    end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
952

    
953
    /* Round start up and end down */
954
    offset = align_offset(offset, s->cluster_size);
955
    end_offset &= ~(s->cluster_size - 1);
956

    
957
    if (offset > end_offset) {
958
        return 0;
959
    }
960

    
961
    nb_clusters = size_to_clusters(s, end_offset - offset);
962

    
963
    /* Each L2 table is handled by its own loop iteration */
964
    while (nb_clusters > 0) {
965
        ret = discard_single_l2(bs, offset, nb_clusters);
966
        if (ret < 0) {
967
            return ret;
968
        }
969

    
970
        nb_clusters -= ret;
971
        offset += (ret * s->cluster_size);
972
    }
973

    
974
    return 0;
975
}