Statistics
| Branch: | Revision:

root / block / qcow2-refcount.c @ db3a964f

History | View | Annotate | Download (29.9 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "qemu-common.h"
26
#include "block_int.h"
27
#include "block/qcow2.h"
28

    
29
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
30
static int update_refcount(BlockDriverState *bs,
31
                            int64_t offset, int64_t length,
32
                            int addend);
33

    
34

    
35
static int cache_refcount_updates = 0;
36

    
37
static int write_refcount_block(BDRVQcowState *s)
38
{
39
    size_t size = s->cluster_size;
40

    
41
    if (s->refcount_block_cache_offset == 0) {
42
        return 0;
43
    }
44

    
45
    if (bdrv_pwrite(s->hd, s->refcount_block_cache_offset,
46
            s->refcount_block_cache, size) != size)
47
    {
48
        return -EIO;
49
    }
50

    
51
    return 0;
52
}
53

    
54
/*********************************************************/
55
/* refcount handling */
56

    
57
int qcow2_refcount_init(BlockDriverState *bs)
58
{
59
    BDRVQcowState *s = bs->opaque;
60
    int ret, refcount_table_size2, i;
61

    
62
    s->refcount_block_cache = qemu_malloc(s->cluster_size);
63
    refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
64
    s->refcount_table = qemu_malloc(refcount_table_size2);
65
    if (s->refcount_table_size > 0) {
66
        ret = bdrv_pread(s->hd, s->refcount_table_offset,
67
                         s->refcount_table, refcount_table_size2);
68
        if (ret != refcount_table_size2)
69
            goto fail;
70
        for(i = 0; i < s->refcount_table_size; i++)
71
            be64_to_cpus(&s->refcount_table[i]);
72
    }
73
    return 0;
74
 fail:
75
    return -ENOMEM;
76
}
77

    
78
void qcow2_refcount_close(BlockDriverState *bs)
79
{
80
    BDRVQcowState *s = bs->opaque;
81
    qemu_free(s->refcount_block_cache);
82
    qemu_free(s->refcount_table);
83
}
84

    
85

    
86
static int load_refcount_block(BlockDriverState *bs,
87
                               int64_t refcount_block_offset)
88
{
89
    BDRVQcowState *s = bs->opaque;
90
    int ret;
91

    
92
    if (cache_refcount_updates) {
93
        write_refcount_block(s);
94
    }
95

    
96
    ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
97
                     s->cluster_size);
98
    if (ret != s->cluster_size)
99
        return -EIO;
100
    s->refcount_block_cache_offset = refcount_block_offset;
101
    return 0;
102
}
103

    
104
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
105
{
106
    BDRVQcowState *s = bs->opaque;
107
    int refcount_table_index, block_index;
108
    int64_t refcount_block_offset;
109

    
110
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
111
    if (refcount_table_index >= s->refcount_table_size)
112
        return 0;
113
    refcount_block_offset = s->refcount_table[refcount_table_index];
114
    if (!refcount_block_offset)
115
        return 0;
116
    if (refcount_block_offset != s->refcount_block_cache_offset) {
117
        /* better than nothing: return allocated if read error */
118
        if (load_refcount_block(bs, refcount_block_offset) < 0)
119
            return 1;
120
    }
121
    block_index = cluster_index &
122
        ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
123
    return be16_to_cpu(s->refcount_block_cache[block_index]);
124
}
125

    
126
static int grow_refcount_table(BlockDriverState *bs, int min_size)
127
{
128
    BDRVQcowState *s = bs->opaque;
129
    int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
130
    uint64_t *new_table;
131
    int64_t table_offset;
132
    uint8_t data[12];
133
    int old_table_size;
134
    int64_t old_table_offset;
135

    
136
    if (min_size <= s->refcount_table_size)
137
        return 0;
138
    /* compute new table size */
139
    refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
140
    for(;;) {
141
        if (refcount_table_clusters == 0) {
142
            refcount_table_clusters = 1;
143
        } else {
144
            refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
145
        }
146
        new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
147
        if (min_size <= new_table_size)
148
            break;
149
    }
150
#ifdef DEBUG_ALLOC2
151
    printf("grow_refcount_table from %d to %d\n",
152
           s->refcount_table_size,
153
           new_table_size);
154
#endif
155
    new_table_size2 = new_table_size * sizeof(uint64_t);
156
    new_table = qemu_mallocz(new_table_size2);
157
    memcpy(new_table, s->refcount_table,
158
           s->refcount_table_size * sizeof(uint64_t));
159
    for(i = 0; i < s->refcount_table_size; i++)
160
        cpu_to_be64s(&new_table[i]);
161
    /* Note: we cannot update the refcount now to avoid recursion */
162
    table_offset = alloc_clusters_noref(bs, new_table_size2);
163
    ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
164
    if (ret != new_table_size2)
165
        goto fail;
166
    for(i = 0; i < s->refcount_table_size; i++)
167
        be64_to_cpus(&new_table[i]);
168

    
169
    cpu_to_be64w((uint64_t*)data, table_offset);
170
    cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
171
    ret = bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
172
                    data, sizeof(data));
173
    if (ret != sizeof(data)) {
174
        goto fail;
175
    }
176

    
177
    qemu_free(s->refcount_table);
178
    old_table_offset = s->refcount_table_offset;
179
    old_table_size = s->refcount_table_size;
180
    s->refcount_table = new_table;
181
    s->refcount_table_size = new_table_size;
182
    s->refcount_table_offset = table_offset;
183

    
184
    update_refcount(bs, table_offset, new_table_size2, 1);
185
    qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
186
    return 0;
187
 fail:
188
    qemu_free(new_table);
189
    return ret < 0 ? ret : -EIO;
190
}
191

    
192

    
193
static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
194
{
195
    BDRVQcowState *s = bs->opaque;
196
    int64_t offset, refcount_block_offset;
197
    unsigned int refcount_table_index;
198
    int ret;
199
    uint64_t data64;
200
    int cache = cache_refcount_updates;
201

    
202
    /* Find L1 index and grow refcount table if needed */
203
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
204
    if (refcount_table_index >= s->refcount_table_size) {
205
        ret = grow_refcount_table(bs, refcount_table_index + 1);
206
        if (ret < 0)
207
            return ret;
208
    }
209

    
210
    /* Load or allocate the refcount block */
211
    refcount_block_offset = s->refcount_table[refcount_table_index];
212
    if (!refcount_block_offset) {
213
        if (cache_refcount_updates) {
214
            write_refcount_block(s);
215
            cache_refcount_updates = 0;
216
        }
217
        /* create a new refcount block */
218
        /* Note: we cannot update the refcount now to avoid recursion */
219
        offset = alloc_clusters_noref(bs, s->cluster_size);
220
        memset(s->refcount_block_cache, 0, s->cluster_size);
221
        ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
222
        if (ret != s->cluster_size)
223
            return -EINVAL;
224
        s->refcount_table[refcount_table_index] = offset;
225
        data64 = cpu_to_be64(offset);
226
        ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
227
                          refcount_table_index * sizeof(uint64_t),
228
                          &data64, sizeof(data64));
229
        if (ret != sizeof(data64))
230
            return -EINVAL;
231

    
232
        refcount_block_offset = offset;
233
        s->refcount_block_cache_offset = offset;
234
        update_refcount(bs, offset, s->cluster_size, 1);
235
        cache_refcount_updates = cache;
236
    } else {
237
        if (refcount_block_offset != s->refcount_block_cache_offset) {
238
            if (load_refcount_block(bs, refcount_block_offset) < 0)
239
                return -EIO;
240
        }
241
    }
242

    
243
    return refcount_block_offset;
244
}
245

    
246
#define REFCOUNTS_PER_SECTOR (512 >> REFCOUNT_SHIFT)
247
static int write_refcount_block_entries(BDRVQcowState *s,
248
    int64_t refcount_block_offset, int first_index, int last_index)
249
{
250
    size_t size;
251

    
252
    if (cache_refcount_updates) {
253
        return 0;
254
    }
255

    
256
    first_index &= ~(REFCOUNTS_PER_SECTOR - 1);
257
    last_index = (last_index + REFCOUNTS_PER_SECTOR)
258
        & ~(REFCOUNTS_PER_SECTOR - 1);
259

    
260
    size = (last_index - first_index) << REFCOUNT_SHIFT;
261
    if (bdrv_pwrite(s->hd,
262
        refcount_block_offset + (first_index << REFCOUNT_SHIFT),
263
        &s->refcount_block_cache[first_index], size) != size)
264
    {
265
        return -EIO;
266
    }
267

    
268
    return 0;
269
}
270

    
271
/* XXX: cache several refcount block clusters ? */
272
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
273
    int64_t offset, int64_t length, int addend)
274
{
275
    BDRVQcowState *s = bs->opaque;
276
    int64_t start, last, cluster_offset;
277
    int64_t refcount_block_offset = 0;
278
    int64_t table_index = -1, old_table_index;
279
    int first_index = -1, last_index = -1;
280
    int ret;
281

    
282
#ifdef DEBUG_ALLOC2
283
    printf("update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n",
284
           offset, length, addend);
285
#endif
286
    if (length < 0) {
287
        return -EINVAL;
288
    } else if (length == 0) {
289
        return 0;
290
    }
291

    
292
    start = offset & ~(s->cluster_size - 1);
293
    last = (offset + length - 1) & ~(s->cluster_size - 1);
294
    for(cluster_offset = start; cluster_offset <= last;
295
        cluster_offset += s->cluster_size)
296
    {
297
        int block_index, refcount;
298
        int64_t cluster_index = cluster_offset >> s->cluster_bits;
299
        int64_t new_block;
300

    
301
        /* Only write refcount block to disk when we are done with it */
302
        old_table_index = table_index;
303
        table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
304
        if ((old_table_index >= 0) && (table_index != old_table_index)) {
305

    
306
            if (write_refcount_block_entries(s, refcount_block_offset,
307
                first_index, last_index) < 0)
308
            {
309
                return -EIO;
310
            }
311

    
312
            first_index = -1;
313
            last_index = -1;
314
        }
315

    
316
        /* Load the refcount block and allocate it if needed */
317
        new_block = alloc_refcount_block(bs, cluster_index);
318
        if (new_block < 0) {
319
            ret = new_block;
320
            goto fail;
321
        }
322
        refcount_block_offset = new_block;
323

    
324
        /* we can update the count and save it */
325
        block_index = cluster_index &
326
            ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
327
        if (first_index == -1 || block_index < first_index) {
328
            first_index = block_index;
329
        }
330
        if (block_index > last_index) {
331
            last_index = block_index;
332
        }
333

    
334
        refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
335
        refcount += addend;
336
        if (refcount < 0 || refcount > 0xffff) {
337
            ret = -EINVAL;
338
            goto fail;
339
        }
340
        if (refcount == 0 && cluster_index < s->free_cluster_index) {
341
            s->free_cluster_index = cluster_index;
342
        }
343
        s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
344
    }
345

    
346
    ret = 0;
347
fail:
348

    
349
    /* Write last changed block to disk */
350
    if (refcount_block_offset != 0) {
351
        if (write_refcount_block_entries(s, refcount_block_offset,
352
            first_index, last_index) < 0)
353
        {
354
            return ret < 0 ? ret : -EIO;
355
        }
356
    }
357

    
358
    /*
359
     * Try do undo any updates if an error is returned (This may succeed in
360
     * some cases like ENOSPC for allocating a new refcount block)
361
     */
362
    if (ret < 0) {
363
        int dummy;
364
        dummy = update_refcount(bs, offset, cluster_offset - offset, -addend);
365
    }
366

    
367
    return ret;
368
}
369

    
370
/* addend must be 1 or -1 */
371
static int update_cluster_refcount(BlockDriverState *bs,
372
                                   int64_t cluster_index,
373
                                   int addend)
374
{
375
    BDRVQcowState *s = bs->opaque;
376
    int ret;
377

    
378
    ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend);
379
    if (ret < 0) {
380
        return ret;
381
    }
382

    
383
    return get_refcount(bs, cluster_index);
384
}
385

    
386

    
387

    
388
/*********************************************************/
389
/* cluster allocation functions */
390

    
391

    
392

    
393
/* return < 0 if error */
394
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
395
{
396
    BDRVQcowState *s = bs->opaque;
397
    int i, nb_clusters;
398

    
399
    nb_clusters = size_to_clusters(s, size);
400
retry:
401
    for(i = 0; i < nb_clusters; i++) {
402
        int64_t i = s->free_cluster_index++;
403
        if (get_refcount(bs, i) != 0)
404
            goto retry;
405
    }
406
#ifdef DEBUG_ALLOC2
407
    printf("alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
408
            size,
409
            (s->free_cluster_index - nb_clusters) << s->cluster_bits);
410
#endif
411
    return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
412
}
413

    
414
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
415
{
416
    int64_t offset;
417
    int ret;
418

    
419
    offset = alloc_clusters_noref(bs, size);
420
    ret = update_refcount(bs, offset, size, 1);
421
    if (ret < 0) {
422
        return ret;
423
    }
424
    return offset;
425
}
426

    
427
/* only used to allocate compressed sectors. We try to allocate
428
   contiguous sectors. size must be <= cluster_size */
429
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
430
{
431
    BDRVQcowState *s = bs->opaque;
432
    int64_t offset, cluster_offset;
433
    int free_in_cluster;
434

    
435
    assert(size > 0 && size <= s->cluster_size);
436
    if (s->free_byte_offset == 0) {
437
        s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size);
438
    }
439
 redo:
440
    free_in_cluster = s->cluster_size -
441
        (s->free_byte_offset & (s->cluster_size - 1));
442
    if (size <= free_in_cluster) {
443
        /* enough space in current cluster */
444
        offset = s->free_byte_offset;
445
        s->free_byte_offset += size;
446
        free_in_cluster -= size;
447
        if (free_in_cluster == 0)
448
            s->free_byte_offset = 0;
449
        if ((offset & (s->cluster_size - 1)) != 0)
450
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
451
    } else {
452
        offset = qcow2_alloc_clusters(bs, s->cluster_size);
453
        cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
454
        if ((cluster_offset + s->cluster_size) == offset) {
455
            /* we are lucky: contiguous data */
456
            offset = s->free_byte_offset;
457
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
458
            s->free_byte_offset += size;
459
        } else {
460
            s->free_byte_offset = offset;
461
            goto redo;
462
        }
463
    }
464
    return offset;
465
}
466

    
467
void qcow2_free_clusters(BlockDriverState *bs,
468
                          int64_t offset, int64_t size)
469
{
470
    int ret;
471

    
472
    ret = update_refcount(bs, offset, size, -1);
473
    if (ret < 0) {
474
        fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
475
        abort();
476
    }
477
}
478

    
479
/*
480
 * free_any_clusters
481
 *
482
 * free clusters according to its type: compressed or not
483
 *
484
 */
485

    
486
void qcow2_free_any_clusters(BlockDriverState *bs,
487
    uint64_t cluster_offset, int nb_clusters)
488
{
489
    BDRVQcowState *s = bs->opaque;
490

    
491
    /* free the cluster */
492

    
493
    if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
494
        int nb_csectors;
495
        nb_csectors = ((cluster_offset >> s->csize_shift) &
496
                       s->csize_mask) + 1;
497
        qcow2_free_clusters(bs,
498
            (cluster_offset & s->cluster_offset_mask) & ~511,
499
            nb_csectors * 512);
500
        return;
501
    }
502

    
503
    qcow2_free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
504

    
505
    return;
506
}
507

    
508

    
509

    
510
/*********************************************************/
511
/* snapshots and image creation */
512

    
513

    
514

    
515
void qcow2_create_refcount_update(QCowCreateState *s, int64_t offset,
516
    int64_t size)
517
{
518
    int refcount;
519
    int64_t start, last, cluster_offset;
520
    uint16_t *p;
521

    
522
    start = offset & ~(s->cluster_size - 1);
523
    last = (offset + size - 1)  & ~(s->cluster_size - 1);
524
    for(cluster_offset = start; cluster_offset <= last;
525
        cluster_offset += s->cluster_size) {
526
        p = &s->refcount_block[cluster_offset >> s->cluster_bits];
527
        refcount = be16_to_cpu(*p);
528
        refcount++;
529
        *p = cpu_to_be16(refcount);
530
    }
531
}
532

    
533
/* update the refcounts of snapshots and the copied flag */
534
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
535
    int64_t l1_table_offset, int l1_size, int addend)
536
{
537
    BDRVQcowState *s = bs->opaque;
538
    uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
539
    int64_t old_offset, old_l2_offset;
540
    int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
541

    
542
    qcow2_l2_cache_reset(bs);
543
    cache_refcount_updates = 1;
544

    
545
    l2_table = NULL;
546
    l1_table = NULL;
547
    l1_size2 = l1_size * sizeof(uint64_t);
548
    if (l1_table_offset != s->l1_table_offset) {
549
        if (l1_size2 != 0) {
550
            l1_table = qemu_mallocz(align_offset(l1_size2, 512));
551
        } else {
552
            l1_table = NULL;
553
        }
554
        l1_allocated = 1;
555
        if (bdrv_pread(s->hd, l1_table_offset,
556
                       l1_table, l1_size2) != l1_size2)
557
            goto fail;
558
        for(i = 0;i < l1_size; i++)
559
            be64_to_cpus(&l1_table[i]);
560
    } else {
561
        assert(l1_size == s->l1_size);
562
        l1_table = s->l1_table;
563
        l1_allocated = 0;
564
    }
565

    
566
    l2_size = s->l2_size * sizeof(uint64_t);
567
    l2_table = qemu_malloc(l2_size);
568
    l1_modified = 0;
569
    for(i = 0; i < l1_size; i++) {
570
        l2_offset = l1_table[i];
571
        if (l2_offset) {
572
            old_l2_offset = l2_offset;
573
            l2_offset &= ~QCOW_OFLAG_COPIED;
574
            l2_modified = 0;
575
            if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
576
                goto fail;
577
            for(j = 0; j < s->l2_size; j++) {
578
                offset = be64_to_cpu(l2_table[j]);
579
                if (offset != 0) {
580
                    old_offset = offset;
581
                    offset &= ~QCOW_OFLAG_COPIED;
582
                    if (offset & QCOW_OFLAG_COMPRESSED) {
583
                        nb_csectors = ((offset >> s->csize_shift) &
584
                                       s->csize_mask) + 1;
585
                        if (addend != 0) {
586
                            int ret;
587
                            ret = update_refcount(bs,
588
                                (offset & s->cluster_offset_mask) & ~511,
589
                                nb_csectors * 512, addend);
590
                            if (ret < 0) {
591
                                goto fail;
592
                            }
593
                        }
594
                        /* compressed clusters are never modified */
595
                        refcount = 2;
596
                    } else {
597
                        if (addend != 0) {
598
                            refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
599
                        } else {
600
                            refcount = get_refcount(bs, offset >> s->cluster_bits);
601
                        }
602
                    }
603

    
604
                    if (refcount == 1) {
605
                        offset |= QCOW_OFLAG_COPIED;
606
                    }
607
                    if (offset != old_offset) {
608
                        l2_table[j] = cpu_to_be64(offset);
609
                        l2_modified = 1;
610
                    }
611
                }
612
            }
613
            if (l2_modified) {
614
                if (bdrv_pwrite(s->hd,
615
                                l2_offset, l2_table, l2_size) != l2_size)
616
                    goto fail;
617
            }
618

    
619
            if (addend != 0) {
620
                refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
621
            } else {
622
                refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
623
            }
624
            if (refcount == 1) {
625
                l2_offset |= QCOW_OFLAG_COPIED;
626
            }
627
            if (l2_offset != old_l2_offset) {
628
                l1_table[i] = l2_offset;
629
                l1_modified = 1;
630
            }
631
        }
632
    }
633
    if (l1_modified) {
634
        for(i = 0; i < l1_size; i++)
635
            cpu_to_be64s(&l1_table[i]);
636
        if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
637
                        l1_size2) != l1_size2)
638
            goto fail;
639
        for(i = 0; i < l1_size; i++)
640
            be64_to_cpus(&l1_table[i]);
641
    }
642
    if (l1_allocated)
643
        qemu_free(l1_table);
644
    qemu_free(l2_table);
645
    cache_refcount_updates = 0;
646
    write_refcount_block(s);
647
    return 0;
648
 fail:
649
    if (l1_allocated)
650
        qemu_free(l1_table);
651
    qemu_free(l2_table);
652
    cache_refcount_updates = 0;
653
    write_refcount_block(s);
654
    return -EIO;
655
}
656

    
657

    
658

    
659

    
660
/*********************************************************/
661
/* refcount checking functions */
662

    
663

    
664

    
665
/*
666
 * Increases the refcount for a range of clusters in a given refcount table.
667
 * This is used to construct a temporary refcount table out of L1 and L2 tables
668
 * which can be compared the the refcount table saved in the image.
669
 *
670
 * Returns the number of errors in the image that were found
671
 */
672
static int inc_refcounts(BlockDriverState *bs,
673
                          uint16_t *refcount_table,
674
                          int refcount_table_size,
675
                          int64_t offset, int64_t size)
676
{
677
    BDRVQcowState *s = bs->opaque;
678
    int64_t start, last, cluster_offset;
679
    int k;
680
    int errors = 0;
681

    
682
    if (size <= 0)
683
        return 0;
684

    
685
    start = offset & ~(s->cluster_size - 1);
686
    last = (offset + size - 1) & ~(s->cluster_size - 1);
687
    for(cluster_offset = start; cluster_offset <= last;
688
        cluster_offset += s->cluster_size) {
689
        k = cluster_offset >> s->cluster_bits;
690
        if (k < 0 || k >= refcount_table_size) {
691
            fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
692
                cluster_offset);
693
            errors++;
694
        } else {
695
            if (++refcount_table[k] == 0) {
696
                fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
697
                    "\n", cluster_offset);
698
                errors++;
699
            }
700
        }
701
    }
702

    
703
    return errors;
704
}
705

    
706
/*
707
 * Increases the refcount in the given refcount table for the all clusters
708
 * referenced in the L2 table. While doing so, performs some checks on L2
709
 * entries.
710
 *
711
 * Returns the number of errors found by the checks or -errno if an internal
712
 * error occurred.
713
 */
714
static int check_refcounts_l2(BlockDriverState *bs,
715
    uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset,
716
    int check_copied)
717
{
718
    BDRVQcowState *s = bs->opaque;
719
    uint64_t *l2_table, offset;
720
    int i, l2_size, nb_csectors, refcount;
721
    int errors = 0;
722

    
723
    /* Read L2 table from disk */
724
    l2_size = s->l2_size * sizeof(uint64_t);
725
    l2_table = qemu_malloc(l2_size);
726

    
727
    if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
728
        goto fail;
729

    
730
    /* Do the actual checks */
731
    for(i = 0; i < s->l2_size; i++) {
732
        offset = be64_to_cpu(l2_table[i]);
733
        if (offset != 0) {
734
            if (offset & QCOW_OFLAG_COMPRESSED) {
735
                /* Compressed clusters don't have QCOW_OFLAG_COPIED */
736
                if (offset & QCOW_OFLAG_COPIED) {
737
                    fprintf(stderr, "ERROR: cluster %" PRId64 ": "
738
                        "copied flag must never be set for compressed "
739
                        "clusters\n", offset >> s->cluster_bits);
740
                    offset &= ~QCOW_OFLAG_COPIED;
741
                    errors++;
742
                }
743

    
744
                /* Mark cluster as used */
745
                nb_csectors = ((offset >> s->csize_shift) &
746
                               s->csize_mask) + 1;
747
                offset &= s->cluster_offset_mask;
748
                errors += inc_refcounts(bs, refcount_table,
749
                              refcount_table_size,
750
                              offset & ~511, nb_csectors * 512);
751
            } else {
752
                /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
753
                if (check_copied) {
754
                    uint64_t entry = offset;
755
                    offset &= ~QCOW_OFLAG_COPIED;
756
                    refcount = get_refcount(bs, offset >> s->cluster_bits);
757
                    if ((refcount == 1) != ((entry & QCOW_OFLAG_COPIED) != 0)) {
758
                        fprintf(stderr, "ERROR OFLAG_COPIED: offset=%"
759
                            PRIx64 " refcount=%d\n", entry, refcount);
760
                        errors++;
761
                    }
762
                }
763

    
764
                /* Mark cluster as used */
765
                offset &= ~QCOW_OFLAG_COPIED;
766
                errors += inc_refcounts(bs, refcount_table,
767
                              refcount_table_size,
768
                              offset, s->cluster_size);
769

    
770
                /* Correct offsets are cluster aligned */
771
                if (offset & (s->cluster_size - 1)) {
772
                    fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
773
                        "properly aligned; L2 entry corrupted.\n", offset);
774
                    errors++;
775
                }
776
            }
777
        }
778
    }
779

    
780
    qemu_free(l2_table);
781
    return errors;
782

    
783
fail:
784
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
785
    qemu_free(l2_table);
786
    return -EIO;
787
}
788

    
789
/*
790
 * Increases the refcount for the L1 table, its L2 tables and all referenced
791
 * clusters in the given refcount table. While doing so, performs some checks
792
 * on L1 and L2 entries.
793
 *
794
 * Returns the number of errors found by the checks or -errno if an internal
795
 * error occurred.
796
 */
797
static int check_refcounts_l1(BlockDriverState *bs,
798
                              uint16_t *refcount_table,
799
                              int refcount_table_size,
800
                              int64_t l1_table_offset, int l1_size,
801
                              int check_copied)
802
{
803
    BDRVQcowState *s = bs->opaque;
804
    uint64_t *l1_table, l2_offset, l1_size2;
805
    int i, refcount, ret;
806
    int errors = 0;
807

    
808
    l1_size2 = l1_size * sizeof(uint64_t);
809

    
810
    /* Mark L1 table as used */
811
    errors += inc_refcounts(bs, refcount_table, refcount_table_size,
812
                  l1_table_offset, l1_size2);
813

    
814
    /* Read L1 table entries from disk */
815
    if (l1_size2 == 0) {
816
        l1_table = NULL;
817
    } else {
818
        l1_table = qemu_malloc(l1_size2);
819
        if (bdrv_pread(s->hd, l1_table_offset,
820
                       l1_table, l1_size2) != l1_size2)
821
            goto fail;
822
        for(i = 0;i < l1_size; i++)
823
            be64_to_cpus(&l1_table[i]);
824
    }
825

    
826
    /* Do the actual checks */
827
    for(i = 0; i < l1_size; i++) {
828
        l2_offset = l1_table[i];
829
        if (l2_offset) {
830
            /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
831
            if (check_copied) {
832
                refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED)
833
                    >> s->cluster_bits);
834
                if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
835
                    fprintf(stderr, "ERROR OFLAG_COPIED: l2_offset=%" PRIx64
836
                        " refcount=%d\n", l2_offset, refcount);
837
                    errors++;
838
                }
839
            }
840

    
841
            /* Mark L2 table as used */
842
            l2_offset &= ~QCOW_OFLAG_COPIED;
843
            errors += inc_refcounts(bs, refcount_table,
844
                          refcount_table_size,
845
                          l2_offset,
846
                          s->cluster_size);
847

    
848
            /* L2 tables are cluster aligned */
849
            if (l2_offset & (s->cluster_size - 1)) {
850
                fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
851
                    "cluster aligned; L1 entry corrupted\n", l2_offset);
852
                errors++;
853
            }
854

    
855
            /* Process and check L2 entries */
856
            ret = check_refcounts_l2(bs, refcount_table, refcount_table_size,
857
                l2_offset, check_copied);
858
            if (ret < 0) {
859
                goto fail;
860
            }
861
            errors += ret;
862
        }
863
    }
864
    qemu_free(l1_table);
865
    return errors;
866

    
867
fail:
868
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
869
    qemu_free(l1_table);
870
    return -EIO;
871
}
872

    
873
/*
874
 * Checks an image for refcount consistency.
875
 *
876
 * Returns 0 if no errors are found, the number of errors in case the image is
877
 * detected as corrupted, and -errno when an internal error occured.
878
 */
879
int qcow2_check_refcounts(BlockDriverState *bs)
880
{
881
    BDRVQcowState *s = bs->opaque;
882
    int64_t size;
883
    int nb_clusters, refcount1, refcount2, i;
884
    QCowSnapshot *sn;
885
    uint16_t *refcount_table;
886
    int ret, errors = 0;
887

    
888
    size = bdrv_getlength(s->hd);
889
    nb_clusters = size_to_clusters(s, size);
890
    refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
891

    
892
    /* header */
893
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
894
                  0, s->cluster_size);
895

    
896
    /* current L1 table */
897
    ret = check_refcounts_l1(bs, refcount_table, nb_clusters,
898
                       s->l1_table_offset, s->l1_size, 1);
899
    if (ret < 0) {
900
        return ret;
901
    }
902
    errors += ret;
903

    
904
    /* snapshots */
905
    for(i = 0; i < s->nb_snapshots; i++) {
906
        sn = s->snapshots + i;
907
        check_refcounts_l1(bs, refcount_table, nb_clusters,
908
                           sn->l1_table_offset, sn->l1_size, 0);
909
    }
910
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
911
                  s->snapshots_offset, s->snapshots_size);
912

    
913
    /* refcount data */
914
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
915
                  s->refcount_table_offset,
916
                  s->refcount_table_size * sizeof(uint64_t));
917
    for(i = 0; i < s->refcount_table_size; i++) {
918
        int64_t offset;
919
        offset = s->refcount_table[i];
920
        if (offset != 0) {
921
            errors += inc_refcounts(bs, refcount_table, nb_clusters,
922
                          offset, s->cluster_size);
923
        }
924
    }
925

    
926
    /* compare ref counts */
927
    for(i = 0; i < nb_clusters; i++) {
928
        refcount1 = get_refcount(bs, i);
929
        refcount2 = refcount_table[i];
930
        if (refcount1 != refcount2) {
931
            fprintf(stderr, "ERROR cluster %d refcount=%d reference=%d\n",
932
                   i, refcount1, refcount2);
933
            errors++;
934
        }
935
    }
936

    
937
    qemu_free(refcount_table);
938

    
939
    return errors;
940
}
941