Statistics
| Branch: | Revision:

root / block / qcow2-refcount.c @ 5d757b56

History | View | Annotate | Download (30 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "qemu-common.h"
26
#include "block_int.h"
27
#include "block/qcow2.h"
28

    
29
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
30
static int update_refcount(BlockDriverState *bs,
31
                            int64_t offset, int64_t length,
32
                            int addend);
33

    
34

    
35
static int cache_refcount_updates = 0;
36

    
37
static int write_refcount_block(BDRVQcowState *s)
38
{
39
    size_t size = s->cluster_size;
40

    
41
    if (s->refcount_block_cache_offset == 0) {
42
        return 0;
43
    }
44

    
45
    if (bdrv_pwrite(s->hd, s->refcount_block_cache_offset,
46
            s->refcount_block_cache, size) != size)
47
    {
48
        return -EIO;
49
    }
50

    
51
    return 0;
52
}
53

    
54
/*********************************************************/
55
/* refcount handling */
56

    
57
int qcow2_refcount_init(BlockDriverState *bs)
58
{
59
    BDRVQcowState *s = bs->opaque;
60
    int ret, refcount_table_size2, i;
61

    
62
    s->refcount_block_cache = qemu_malloc(s->cluster_size);
63
    refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
64
    s->refcount_table = qemu_malloc(refcount_table_size2);
65
    if (s->refcount_table_size > 0) {
66
        ret = bdrv_pread(s->hd, s->refcount_table_offset,
67
                         s->refcount_table, refcount_table_size2);
68
        if (ret != refcount_table_size2)
69
            goto fail;
70
        for(i = 0; i < s->refcount_table_size; i++)
71
            be64_to_cpus(&s->refcount_table[i]);
72
    }
73
    return 0;
74
 fail:
75
    return -ENOMEM;
76
}
77

    
78
void qcow2_refcount_close(BlockDriverState *bs)
79
{
80
    BDRVQcowState *s = bs->opaque;
81
    qemu_free(s->refcount_block_cache);
82
    qemu_free(s->refcount_table);
83
}
84

    
85

    
86
static int load_refcount_block(BlockDriverState *bs,
87
                               int64_t refcount_block_offset)
88
{
89
    BDRVQcowState *s = bs->opaque;
90
    int ret;
91

    
92
    if (cache_refcount_updates) {
93
        write_refcount_block(s);
94
    }
95

    
96
    ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
97
                     s->cluster_size);
98
    if (ret != s->cluster_size)
99
        return -EIO;
100
    s->refcount_block_cache_offset = refcount_block_offset;
101
    return 0;
102
}
103

    
104
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
105
{
106
    BDRVQcowState *s = bs->opaque;
107
    int refcount_table_index, block_index;
108
    int64_t refcount_block_offset;
109

    
110
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
111
    if (refcount_table_index >= s->refcount_table_size)
112
        return 0;
113
    refcount_block_offset = s->refcount_table[refcount_table_index];
114
    if (!refcount_block_offset)
115
        return 0;
116
    if (refcount_block_offset != s->refcount_block_cache_offset) {
117
        /* better than nothing: return allocated if read error */
118
        if (load_refcount_block(bs, refcount_block_offset) < 0)
119
            return 1;
120
    }
121
    block_index = cluster_index &
122
        ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
123
    return be16_to_cpu(s->refcount_block_cache[block_index]);
124
}
125

    
126
static int grow_refcount_table(BlockDriverState *bs, int min_size)
127
{
128
    BDRVQcowState *s = bs->opaque;
129
    int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
130
    uint64_t *new_table;
131
    int64_t table_offset;
132
    uint8_t data[12];
133
    int old_table_size;
134
    int64_t old_table_offset;
135

    
136
    if (min_size <= s->refcount_table_size)
137
        return 0;
138
    /* compute new table size */
139
    refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
140
    for(;;) {
141
        if (refcount_table_clusters == 0) {
142
            refcount_table_clusters = 1;
143
        } else {
144
            refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
145
        }
146
        new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
147
        if (min_size <= new_table_size)
148
            break;
149
    }
150
#ifdef DEBUG_ALLOC2
151
    printf("grow_refcount_table from %d to %d\n",
152
           s->refcount_table_size,
153
           new_table_size);
154
#endif
155
    new_table_size2 = new_table_size * sizeof(uint64_t);
156
    new_table = qemu_mallocz(new_table_size2);
157
    memcpy(new_table, s->refcount_table,
158
           s->refcount_table_size * sizeof(uint64_t));
159
    for(i = 0; i < s->refcount_table_size; i++)
160
        cpu_to_be64s(&new_table[i]);
161
    /* Note: we cannot update the refcount now to avoid recursion */
162
    table_offset = alloc_clusters_noref(bs, new_table_size2);
163
    ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
164
    if (ret != new_table_size2)
165
        goto fail;
166
    for(i = 0; i < s->refcount_table_size; i++)
167
        be64_to_cpus(&new_table[i]);
168

    
169
    cpu_to_be64w((uint64_t*)data, table_offset);
170
    cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
171
    ret = bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
172
                    data, sizeof(data));
173
    if (ret != sizeof(data)) {
174
        goto fail;
175
    }
176

    
177
    qemu_free(s->refcount_table);
178
    old_table_offset = s->refcount_table_offset;
179
    old_table_size = s->refcount_table_size;
180
    s->refcount_table = new_table;
181
    s->refcount_table_size = new_table_size;
182
    s->refcount_table_offset = table_offset;
183

    
184
    update_refcount(bs, table_offset, new_table_size2, 1);
185
    qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
186
    return 0;
187
 fail:
188
    qemu_free(new_table);
189
    return ret < 0 ? ret : -EIO;
190
}
191

    
192

    
193
static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
194
{
195
    BDRVQcowState *s = bs->opaque;
196
    int64_t offset, refcount_block_offset;
197
    unsigned int refcount_table_index;
198
    int ret;
199
    uint64_t data64;
200
    int cache = cache_refcount_updates;
201

    
202
    /* Find L1 index and grow refcount table if needed */
203
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
204
    if (refcount_table_index >= s->refcount_table_size) {
205
        ret = grow_refcount_table(bs, refcount_table_index + 1);
206
        if (ret < 0)
207
            return ret;
208
    }
209

    
210
    /* Load or allocate the refcount block */
211
    refcount_block_offset = s->refcount_table[refcount_table_index];
212
    if (!refcount_block_offset) {
213
        if (cache_refcount_updates) {
214
            write_refcount_block(s);
215
            cache_refcount_updates = 0;
216
        }
217
        /* create a new refcount block */
218
        /* Note: we cannot update the refcount now to avoid recursion */
219
        offset = alloc_clusters_noref(bs, s->cluster_size);
220
        memset(s->refcount_block_cache, 0, s->cluster_size);
221
        ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
222
        if (ret != s->cluster_size)
223
            return -EINVAL;
224
        s->refcount_table[refcount_table_index] = offset;
225
        data64 = cpu_to_be64(offset);
226
        ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
227
                          refcount_table_index * sizeof(uint64_t),
228
                          &data64, sizeof(data64));
229
        if (ret != sizeof(data64))
230
            return -EINVAL;
231

    
232
        refcount_block_offset = offset;
233
        s->refcount_block_cache_offset = offset;
234
        update_refcount(bs, offset, s->cluster_size, 1);
235
        cache_refcount_updates = cache;
236
    } else {
237
        if (refcount_block_offset != s->refcount_block_cache_offset) {
238
            if (load_refcount_block(bs, refcount_block_offset) < 0)
239
                return -EIO;
240
        }
241
    }
242

    
243
    return refcount_block_offset;
244
}
245

    
246
#define REFCOUNTS_PER_SECTOR (512 >> REFCOUNT_SHIFT)
247
static int write_refcount_block_entries(BDRVQcowState *s,
248
    int64_t refcount_block_offset, int first_index, int last_index)
249
{
250
    size_t size;
251

    
252
    if (cache_refcount_updates) {
253
        return 0;
254
    }
255

    
256
    first_index &= ~(REFCOUNTS_PER_SECTOR - 1);
257
    last_index = (last_index + REFCOUNTS_PER_SECTOR)
258
        & ~(REFCOUNTS_PER_SECTOR - 1);
259

    
260
    size = (last_index - first_index) << REFCOUNT_SHIFT;
261
    if (bdrv_pwrite(s->hd,
262
        refcount_block_offset + (first_index << REFCOUNT_SHIFT),
263
        &s->refcount_block_cache[first_index], size) != size)
264
    {
265
        return -EIO;
266
    }
267

    
268
    return 0;
269
}
270

    
271
/* XXX: cache several refcount block clusters ? */
272
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
273
    int64_t offset, int64_t length, int addend)
274
{
275
    BDRVQcowState *s = bs->opaque;
276
    int64_t start, last, cluster_offset;
277
    int64_t refcount_block_offset = 0;
278
    int64_t table_index = -1, old_table_index;
279
    int first_index = -1, last_index = -1;
280
    int ret;
281

    
282
#ifdef DEBUG_ALLOC2
283
    printf("update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n",
284
           offset, length, addend);
285
#endif
286
    if (length < 0) {
287
        return -EINVAL;
288
    } else if (length == 0) {
289
        return 0;
290
    }
291

    
292
    start = offset & ~(s->cluster_size - 1);
293
    last = (offset + length - 1) & ~(s->cluster_size - 1);
294
    for(cluster_offset = start; cluster_offset <= last;
295
        cluster_offset += s->cluster_size)
296
    {
297
        int block_index, refcount;
298
        int64_t cluster_index = cluster_offset >> s->cluster_bits;
299
        int64_t new_block;
300

    
301
        /* Only write refcount block to disk when we are done with it */
302
        old_table_index = table_index;
303
        table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
304
        if ((old_table_index >= 0) && (table_index != old_table_index)) {
305

    
306
            if (write_refcount_block_entries(s, refcount_block_offset,
307
                first_index, last_index) < 0)
308
            {
309
                return -EIO;
310
            }
311

    
312
            first_index = -1;
313
            last_index = -1;
314
        }
315

    
316
        /* Load the refcount block and allocate it if needed */
317
        new_block = alloc_refcount_block(bs, cluster_index);
318
        if (new_block < 0) {
319
            ret = new_block;
320
            goto fail;
321
        }
322
        refcount_block_offset = new_block;
323

    
324
        /* we can update the count and save it */
325
        block_index = cluster_index &
326
            ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
327
        if (first_index == -1 || block_index < first_index) {
328
            first_index = block_index;
329
        }
330
        if (block_index > last_index) {
331
            last_index = block_index;
332
        }
333

    
334
        refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
335
        refcount += addend;
336
        if (refcount < 0 || refcount > 0xffff) {
337
            ret = -EINVAL;
338
            goto fail;
339
        }
340
        if (refcount == 0 && cluster_index < s->free_cluster_index) {
341
            s->free_cluster_index = cluster_index;
342
        }
343
        s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
344
    }
345

    
346
    ret = 0;
347
fail:
348

    
349
    /* Write last changed block to disk */
350
    if (refcount_block_offset != 0) {
351
        if (write_refcount_block_entries(s, refcount_block_offset,
352
            first_index, last_index) < 0)
353
        {
354
            return ret < 0 ? ret : -EIO;
355
        }
356
    }
357

    
358
    /*
359
     * Try do undo any updates if an error is returned (This may succeed in
360
     * some cases like ENOSPC for allocating a new refcount block)
361
     */
362
    if (ret < 0) {
363
        int dummy;
364
        dummy = update_refcount(bs, offset, cluster_offset - offset, -addend);
365
    }
366

    
367
    return ret;
368
}
369

    
370
/* addend must be 1 or -1 */
371
static int update_cluster_refcount(BlockDriverState *bs,
372
                                   int64_t cluster_index,
373
                                   int addend)
374
{
375
    BDRVQcowState *s = bs->opaque;
376
    int ret;
377

    
378
    ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend);
379
    if (ret < 0) {
380
        return ret;
381
    }
382

    
383
    return get_refcount(bs, cluster_index);
384
}
385

    
386

    
387

    
388
/*********************************************************/
389
/* cluster allocation functions */
390

    
391

    
392

    
393
/* return < 0 if error */
394
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
395
{
396
    BDRVQcowState *s = bs->opaque;
397
    int i, nb_clusters;
398

    
399
    nb_clusters = size_to_clusters(s, size);
400
retry:
401
    for(i = 0; i < nb_clusters; i++) {
402
        int64_t i = s->free_cluster_index++;
403
        if (get_refcount(bs, i) != 0)
404
            goto retry;
405
    }
406
#ifdef DEBUG_ALLOC2
407
    printf("alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
408
            size,
409
            (s->free_cluster_index - nb_clusters) << s->cluster_bits);
410
#endif
411
    return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
412
}
413

    
414
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
415
{
416
    int64_t offset;
417
    int ret;
418

    
419
    offset = alloc_clusters_noref(bs, size);
420
    ret = update_refcount(bs, offset, size, 1);
421
    if (ret < 0) {
422
        return ret;
423
    }
424
    return offset;
425
}
426

    
427
/* only used to allocate compressed sectors. We try to allocate
428
   contiguous sectors. size must be <= cluster_size */
429
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
430
{
431
    BDRVQcowState *s = bs->opaque;
432
    int64_t offset, cluster_offset;
433
    int free_in_cluster;
434

    
435
    assert(size > 0 && size <= s->cluster_size);
436
    if (s->free_byte_offset == 0) {
437
        s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size);
438
        if (s->free_byte_offset < 0) {
439
            return s->free_byte_offset;
440
        }
441
    }
442
 redo:
443
    free_in_cluster = s->cluster_size -
444
        (s->free_byte_offset & (s->cluster_size - 1));
445
    if (size <= free_in_cluster) {
446
        /* enough space in current cluster */
447
        offset = s->free_byte_offset;
448
        s->free_byte_offset += size;
449
        free_in_cluster -= size;
450
        if (free_in_cluster == 0)
451
            s->free_byte_offset = 0;
452
        if ((offset & (s->cluster_size - 1)) != 0)
453
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
454
    } else {
455
        offset = qcow2_alloc_clusters(bs, s->cluster_size);
456
        if (offset < 0) {
457
            return offset;
458
        }
459
        cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
460
        if ((cluster_offset + s->cluster_size) == offset) {
461
            /* we are lucky: contiguous data */
462
            offset = s->free_byte_offset;
463
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
464
            s->free_byte_offset += size;
465
        } else {
466
            s->free_byte_offset = offset;
467
            goto redo;
468
        }
469
    }
470
    return offset;
471
}
472

    
473
void qcow2_free_clusters(BlockDriverState *bs,
474
                          int64_t offset, int64_t size)
475
{
476
    int ret;
477

    
478
    ret = update_refcount(bs, offset, size, -1);
479
    if (ret < 0) {
480
        fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
481
        abort();
482
    }
483
}
484

    
485
/*
486
 * free_any_clusters
487
 *
488
 * free clusters according to its type: compressed or not
489
 *
490
 */
491

    
492
void qcow2_free_any_clusters(BlockDriverState *bs,
493
    uint64_t cluster_offset, int nb_clusters)
494
{
495
    BDRVQcowState *s = bs->opaque;
496

    
497
    /* free the cluster */
498

    
499
    if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
500
        int nb_csectors;
501
        nb_csectors = ((cluster_offset >> s->csize_shift) &
502
                       s->csize_mask) + 1;
503
        qcow2_free_clusters(bs,
504
            (cluster_offset & s->cluster_offset_mask) & ~511,
505
            nb_csectors * 512);
506
        return;
507
    }
508

    
509
    qcow2_free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
510

    
511
    return;
512
}
513

    
514

    
515

    
516
/*********************************************************/
517
/* snapshots and image creation */
518

    
519

    
520

    
521
void qcow2_create_refcount_update(QCowCreateState *s, int64_t offset,
522
    int64_t size)
523
{
524
    int refcount;
525
    int64_t start, last, cluster_offset;
526
    uint16_t *p;
527

    
528
    start = offset & ~(s->cluster_size - 1);
529
    last = (offset + size - 1)  & ~(s->cluster_size - 1);
530
    for(cluster_offset = start; cluster_offset <= last;
531
        cluster_offset += s->cluster_size) {
532
        p = &s->refcount_block[cluster_offset >> s->cluster_bits];
533
        refcount = be16_to_cpu(*p);
534
        refcount++;
535
        *p = cpu_to_be16(refcount);
536
    }
537
}
538

    
539
/* update the refcounts of snapshots and the copied flag */
540
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
541
    int64_t l1_table_offset, int l1_size, int addend)
542
{
543
    BDRVQcowState *s = bs->opaque;
544
    uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
545
    int64_t old_offset, old_l2_offset;
546
    int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
547

    
548
    qcow2_l2_cache_reset(bs);
549
    cache_refcount_updates = 1;
550

    
551
    l2_table = NULL;
552
    l1_table = NULL;
553
    l1_size2 = l1_size * sizeof(uint64_t);
554
    if (l1_table_offset != s->l1_table_offset) {
555
        if (l1_size2 != 0) {
556
            l1_table = qemu_mallocz(align_offset(l1_size2, 512));
557
        } else {
558
            l1_table = NULL;
559
        }
560
        l1_allocated = 1;
561
        if (bdrv_pread(s->hd, l1_table_offset,
562
                       l1_table, l1_size2) != l1_size2)
563
            goto fail;
564
        for(i = 0;i < l1_size; i++)
565
            be64_to_cpus(&l1_table[i]);
566
    } else {
567
        assert(l1_size == s->l1_size);
568
        l1_table = s->l1_table;
569
        l1_allocated = 0;
570
    }
571

    
572
    l2_size = s->l2_size * sizeof(uint64_t);
573
    l2_table = qemu_malloc(l2_size);
574
    l1_modified = 0;
575
    for(i = 0; i < l1_size; i++) {
576
        l2_offset = l1_table[i];
577
        if (l2_offset) {
578
            old_l2_offset = l2_offset;
579
            l2_offset &= ~QCOW_OFLAG_COPIED;
580
            l2_modified = 0;
581
            if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
582
                goto fail;
583
            for(j = 0; j < s->l2_size; j++) {
584
                offset = be64_to_cpu(l2_table[j]);
585
                if (offset != 0) {
586
                    old_offset = offset;
587
                    offset &= ~QCOW_OFLAG_COPIED;
588
                    if (offset & QCOW_OFLAG_COMPRESSED) {
589
                        nb_csectors = ((offset >> s->csize_shift) &
590
                                       s->csize_mask) + 1;
591
                        if (addend != 0) {
592
                            int ret;
593
                            ret = update_refcount(bs,
594
                                (offset & s->cluster_offset_mask) & ~511,
595
                                nb_csectors * 512, addend);
596
                            if (ret < 0) {
597
                                goto fail;
598
                            }
599
                        }
600
                        /* compressed clusters are never modified */
601
                        refcount = 2;
602
                    } else {
603
                        if (addend != 0) {
604
                            refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
605
                        } else {
606
                            refcount = get_refcount(bs, offset >> s->cluster_bits);
607
                        }
608
                    }
609

    
610
                    if (refcount == 1) {
611
                        offset |= QCOW_OFLAG_COPIED;
612
                    }
613
                    if (offset != old_offset) {
614
                        l2_table[j] = cpu_to_be64(offset);
615
                        l2_modified = 1;
616
                    }
617
                }
618
            }
619
            if (l2_modified) {
620
                if (bdrv_pwrite(s->hd,
621
                                l2_offset, l2_table, l2_size) != l2_size)
622
                    goto fail;
623
            }
624

    
625
            if (addend != 0) {
626
                refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
627
            } else {
628
                refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
629
            }
630
            if (refcount == 1) {
631
                l2_offset |= QCOW_OFLAG_COPIED;
632
            }
633
            if (l2_offset != old_l2_offset) {
634
                l1_table[i] = l2_offset;
635
                l1_modified = 1;
636
            }
637
        }
638
    }
639
    if (l1_modified) {
640
        for(i = 0; i < l1_size; i++)
641
            cpu_to_be64s(&l1_table[i]);
642
        if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
643
                        l1_size2) != l1_size2)
644
            goto fail;
645
        for(i = 0; i < l1_size; i++)
646
            be64_to_cpus(&l1_table[i]);
647
    }
648
    if (l1_allocated)
649
        qemu_free(l1_table);
650
    qemu_free(l2_table);
651
    cache_refcount_updates = 0;
652
    write_refcount_block(s);
653
    return 0;
654
 fail:
655
    if (l1_allocated)
656
        qemu_free(l1_table);
657
    qemu_free(l2_table);
658
    cache_refcount_updates = 0;
659
    write_refcount_block(s);
660
    return -EIO;
661
}
662

    
663

    
664

    
665

    
666
/*********************************************************/
667
/* refcount checking functions */
668

    
669

    
670

    
671
/*
672
 * Increases the refcount for a range of clusters in a given refcount table.
673
 * This is used to construct a temporary refcount table out of L1 and L2 tables
674
 * which can be compared the the refcount table saved in the image.
675
 *
676
 * Returns the number of errors in the image that were found
677
 */
678
static int inc_refcounts(BlockDriverState *bs,
679
                          uint16_t *refcount_table,
680
                          int refcount_table_size,
681
                          int64_t offset, int64_t size)
682
{
683
    BDRVQcowState *s = bs->opaque;
684
    int64_t start, last, cluster_offset;
685
    int k;
686
    int errors = 0;
687

    
688
    if (size <= 0)
689
        return 0;
690

    
691
    start = offset & ~(s->cluster_size - 1);
692
    last = (offset + size - 1) & ~(s->cluster_size - 1);
693
    for(cluster_offset = start; cluster_offset <= last;
694
        cluster_offset += s->cluster_size) {
695
        k = cluster_offset >> s->cluster_bits;
696
        if (k < 0 || k >= refcount_table_size) {
697
            fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
698
                cluster_offset);
699
            errors++;
700
        } else {
701
            if (++refcount_table[k] == 0) {
702
                fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
703
                    "\n", cluster_offset);
704
                errors++;
705
            }
706
        }
707
    }
708

    
709
    return errors;
710
}
711

    
712
/*
713
 * Increases the refcount in the given refcount table for the all clusters
714
 * referenced in the L2 table. While doing so, performs some checks on L2
715
 * entries.
716
 *
717
 * Returns the number of errors found by the checks or -errno if an internal
718
 * error occurred.
719
 */
720
static int check_refcounts_l2(BlockDriverState *bs,
721
    uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset,
722
    int check_copied)
723
{
724
    BDRVQcowState *s = bs->opaque;
725
    uint64_t *l2_table, offset;
726
    int i, l2_size, nb_csectors, refcount;
727
    int errors = 0;
728

    
729
    /* Read L2 table from disk */
730
    l2_size = s->l2_size * sizeof(uint64_t);
731
    l2_table = qemu_malloc(l2_size);
732

    
733
    if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
734
        goto fail;
735

    
736
    /* Do the actual checks */
737
    for(i = 0; i < s->l2_size; i++) {
738
        offset = be64_to_cpu(l2_table[i]);
739
        if (offset != 0) {
740
            if (offset & QCOW_OFLAG_COMPRESSED) {
741
                /* Compressed clusters don't have QCOW_OFLAG_COPIED */
742
                if (offset & QCOW_OFLAG_COPIED) {
743
                    fprintf(stderr, "ERROR: cluster %" PRId64 ": "
744
                        "copied flag must never be set for compressed "
745
                        "clusters\n", offset >> s->cluster_bits);
746
                    offset &= ~QCOW_OFLAG_COPIED;
747
                    errors++;
748
                }
749

    
750
                /* Mark cluster as used */
751
                nb_csectors = ((offset >> s->csize_shift) &
752
                               s->csize_mask) + 1;
753
                offset &= s->cluster_offset_mask;
754
                errors += inc_refcounts(bs, refcount_table,
755
                              refcount_table_size,
756
                              offset & ~511, nb_csectors * 512);
757
            } else {
758
                /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
759
                if (check_copied) {
760
                    uint64_t entry = offset;
761
                    offset &= ~QCOW_OFLAG_COPIED;
762
                    refcount = get_refcount(bs, offset >> s->cluster_bits);
763
                    if ((refcount == 1) != ((entry & QCOW_OFLAG_COPIED) != 0)) {
764
                        fprintf(stderr, "ERROR OFLAG_COPIED: offset=%"
765
                            PRIx64 " refcount=%d\n", entry, refcount);
766
                        errors++;
767
                    }
768
                }
769

    
770
                /* Mark cluster as used */
771
                offset &= ~QCOW_OFLAG_COPIED;
772
                errors += inc_refcounts(bs, refcount_table,
773
                              refcount_table_size,
774
                              offset, s->cluster_size);
775

    
776
                /* Correct offsets are cluster aligned */
777
                if (offset & (s->cluster_size - 1)) {
778
                    fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
779
                        "properly aligned; L2 entry corrupted.\n", offset);
780
                    errors++;
781
                }
782
            }
783
        }
784
    }
785

    
786
    qemu_free(l2_table);
787
    return errors;
788

    
789
fail:
790
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
791
    qemu_free(l2_table);
792
    return -EIO;
793
}
794

    
795
/*
796
 * Increases the refcount for the L1 table, its L2 tables and all referenced
797
 * clusters in the given refcount table. While doing so, performs some checks
798
 * on L1 and L2 entries.
799
 *
800
 * Returns the number of errors found by the checks or -errno if an internal
801
 * error occurred.
802
 */
803
static int check_refcounts_l1(BlockDriverState *bs,
804
                              uint16_t *refcount_table,
805
                              int refcount_table_size,
806
                              int64_t l1_table_offset, int l1_size,
807
                              int check_copied)
808
{
809
    BDRVQcowState *s = bs->opaque;
810
    uint64_t *l1_table, l2_offset, l1_size2;
811
    int i, refcount, ret;
812
    int errors = 0;
813

    
814
    l1_size2 = l1_size * sizeof(uint64_t);
815

    
816
    /* Mark L1 table as used */
817
    errors += inc_refcounts(bs, refcount_table, refcount_table_size,
818
                  l1_table_offset, l1_size2);
819

    
820
    /* Read L1 table entries from disk */
821
    if (l1_size2 == 0) {
822
        l1_table = NULL;
823
    } else {
824
        l1_table = qemu_malloc(l1_size2);
825
        if (bdrv_pread(s->hd, l1_table_offset,
826
                       l1_table, l1_size2) != l1_size2)
827
            goto fail;
828
        for(i = 0;i < l1_size; i++)
829
            be64_to_cpus(&l1_table[i]);
830
    }
831

    
832
    /* Do the actual checks */
833
    for(i = 0; i < l1_size; i++) {
834
        l2_offset = l1_table[i];
835
        if (l2_offset) {
836
            /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
837
            if (check_copied) {
838
                refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED)
839
                    >> s->cluster_bits);
840
                if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
841
                    fprintf(stderr, "ERROR OFLAG_COPIED: l2_offset=%" PRIx64
842
                        " refcount=%d\n", l2_offset, refcount);
843
                    errors++;
844
                }
845
            }
846

    
847
            /* Mark L2 table as used */
848
            l2_offset &= ~QCOW_OFLAG_COPIED;
849
            errors += inc_refcounts(bs, refcount_table,
850
                          refcount_table_size,
851
                          l2_offset,
852
                          s->cluster_size);
853

    
854
            /* L2 tables are cluster aligned */
855
            if (l2_offset & (s->cluster_size - 1)) {
856
                fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
857
                    "cluster aligned; L1 entry corrupted\n", l2_offset);
858
                errors++;
859
            }
860

    
861
            /* Process and check L2 entries */
862
            ret = check_refcounts_l2(bs, refcount_table, refcount_table_size,
863
                l2_offset, check_copied);
864
            if (ret < 0) {
865
                goto fail;
866
            }
867
            errors += ret;
868
        }
869
    }
870
    qemu_free(l1_table);
871
    return errors;
872

    
873
fail:
874
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
875
    qemu_free(l1_table);
876
    return -EIO;
877
}
878

    
879
/*
880
 * Checks an image for refcount consistency.
881
 *
882
 * Returns 0 if no errors are found, the number of errors in case the image is
883
 * detected as corrupted, and -errno when an internal error occured.
884
 */
885
int qcow2_check_refcounts(BlockDriverState *bs)
886
{
887
    BDRVQcowState *s = bs->opaque;
888
    int64_t size;
889
    int nb_clusters, refcount1, refcount2, i;
890
    QCowSnapshot *sn;
891
    uint16_t *refcount_table;
892
    int ret, errors = 0;
893

    
894
    size = bdrv_getlength(s->hd);
895
    nb_clusters = size_to_clusters(s, size);
896
    refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
897

    
898
    /* header */
899
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
900
                  0, s->cluster_size);
901

    
902
    /* current L1 table */
903
    ret = check_refcounts_l1(bs, refcount_table, nb_clusters,
904
                       s->l1_table_offset, s->l1_size, 1);
905
    if (ret < 0) {
906
        return ret;
907
    }
908
    errors += ret;
909

    
910
    /* snapshots */
911
    for(i = 0; i < s->nb_snapshots; i++) {
912
        sn = s->snapshots + i;
913
        check_refcounts_l1(bs, refcount_table, nb_clusters,
914
                           sn->l1_table_offset, sn->l1_size, 0);
915
    }
916
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
917
                  s->snapshots_offset, s->snapshots_size);
918

    
919
    /* refcount data */
920
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
921
                  s->refcount_table_offset,
922
                  s->refcount_table_size * sizeof(uint64_t));
923
    for(i = 0; i < s->refcount_table_size; i++) {
924
        int64_t offset;
925
        offset = s->refcount_table[i];
926
        if (offset != 0) {
927
            errors += inc_refcounts(bs, refcount_table, nb_clusters,
928
                          offset, s->cluster_size);
929
        }
930
    }
931

    
932
    /* compare ref counts */
933
    for(i = 0; i < nb_clusters; i++) {
934
        refcount1 = get_refcount(bs, i);
935
        refcount2 = refcount_table[i];
936
        if (refcount1 != refcount2) {
937
            fprintf(stderr, "ERROR cluster %d refcount=%d reference=%d\n",
938
                   i, refcount1, refcount2);
939
            errors++;
940
        }
941
    }
942

    
943
    qemu_free(refcount_table);
944

    
945
    return errors;
946
}
947