Statistics
| Branch: | Revision:

root / block / qcow2-refcount.c @ f2b7c8b3

History | View | Annotate | Download (29 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "qemu-common.h"
26
#include "block_int.h"
27
#include "block/qcow2.h"
28

    
29
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
30
static int update_refcount(BlockDriverState *bs,
31
                            int64_t offset, int64_t length,
32
                            int addend);
33

    
34

    
35
static int cache_refcount_updates = 0;
36

    
37
static int write_refcount_block(BDRVQcowState *s)
38
{
39
    size_t size = s->cluster_size;
40

    
41
    if (s->refcount_block_cache_offset == 0) {
42
        return 0;
43
    }
44

    
45
    if (bdrv_pwrite(s->hd, s->refcount_block_cache_offset,
46
            s->refcount_block_cache, size) != size)
47
    {
48
        return -EIO;
49
    }
50

    
51
    return 0;
52
}
53

    
54
/*********************************************************/
55
/* refcount handling */
56

    
57
int qcow2_refcount_init(BlockDriverState *bs)
58
{
59
    BDRVQcowState *s = bs->opaque;
60
    int ret, refcount_table_size2, i;
61

    
62
    s->refcount_block_cache = qemu_malloc(s->cluster_size);
63
    refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
64
    s->refcount_table = qemu_malloc(refcount_table_size2);
65
    if (s->refcount_table_size > 0) {
66
        ret = bdrv_pread(s->hd, s->refcount_table_offset,
67
                         s->refcount_table, refcount_table_size2);
68
        if (ret != refcount_table_size2)
69
            goto fail;
70
        for(i = 0; i < s->refcount_table_size; i++)
71
            be64_to_cpus(&s->refcount_table[i]);
72
    }
73
    return 0;
74
 fail:
75
    return -ENOMEM;
76
}
77

    
78
void qcow2_refcount_close(BlockDriverState *bs)
79
{
80
    BDRVQcowState *s = bs->opaque;
81
    qemu_free(s->refcount_block_cache);
82
    qemu_free(s->refcount_table);
83
}
84

    
85

    
86
static int load_refcount_block(BlockDriverState *bs,
87
                               int64_t refcount_block_offset)
88
{
89
    BDRVQcowState *s = bs->opaque;
90
    int ret;
91

    
92
    if (cache_refcount_updates) {
93
        write_refcount_block(s);
94
    }
95

    
96
    ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
97
                     s->cluster_size);
98
    if (ret != s->cluster_size)
99
        return -EIO;
100
    s->refcount_block_cache_offset = refcount_block_offset;
101
    return 0;
102
}
103

    
104
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
105
{
106
    BDRVQcowState *s = bs->opaque;
107
    int refcount_table_index, block_index;
108
    int64_t refcount_block_offset;
109

    
110
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
111
    if (refcount_table_index >= s->refcount_table_size)
112
        return 0;
113
    refcount_block_offset = s->refcount_table[refcount_table_index];
114
    if (!refcount_block_offset)
115
        return 0;
116
    if (refcount_block_offset != s->refcount_block_cache_offset) {
117
        /* better than nothing: return allocated if read error */
118
        if (load_refcount_block(bs, refcount_block_offset) < 0)
119
            return 1;
120
    }
121
    block_index = cluster_index &
122
        ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
123
    return be16_to_cpu(s->refcount_block_cache[block_index]);
124
}
125

    
126
static int grow_refcount_table(BlockDriverState *bs, int min_size)
127
{
128
    BDRVQcowState *s = bs->opaque;
129
    int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
130
    uint64_t *new_table;
131
    int64_t table_offset;
132
    uint8_t data[12];
133
    int old_table_size;
134
    int64_t old_table_offset;
135

    
136
    if (min_size <= s->refcount_table_size)
137
        return 0;
138
    /* compute new table size */
139
    refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
140
    for(;;) {
141
        if (refcount_table_clusters == 0) {
142
            refcount_table_clusters = 1;
143
        } else {
144
            refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
145
        }
146
        new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
147
        if (min_size <= new_table_size)
148
            break;
149
    }
150
#ifdef DEBUG_ALLOC2
151
    printf("grow_refcount_table from %d to %d\n",
152
           s->refcount_table_size,
153
           new_table_size);
154
#endif
155
    new_table_size2 = new_table_size * sizeof(uint64_t);
156
    new_table = qemu_mallocz(new_table_size2);
157
    memcpy(new_table, s->refcount_table,
158
           s->refcount_table_size * sizeof(uint64_t));
159
    for(i = 0; i < s->refcount_table_size; i++)
160
        cpu_to_be64s(&new_table[i]);
161
    /* Note: we cannot update the refcount now to avoid recursion */
162
    table_offset = alloc_clusters_noref(bs, new_table_size2);
163
    ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
164
    if (ret != new_table_size2)
165
        goto fail;
166
    for(i = 0; i < s->refcount_table_size; i++)
167
        be64_to_cpus(&new_table[i]);
168

    
169
    cpu_to_be64w((uint64_t*)data, table_offset);
170
    cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
171
    ret = bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
172
                    data, sizeof(data));
173
    if (ret != sizeof(data)) {
174
        goto fail;
175
    }
176

    
177
    qemu_free(s->refcount_table);
178
    old_table_offset = s->refcount_table_offset;
179
    old_table_size = s->refcount_table_size;
180
    s->refcount_table = new_table;
181
    s->refcount_table_size = new_table_size;
182
    s->refcount_table_offset = table_offset;
183

    
184
    update_refcount(bs, table_offset, new_table_size2, 1);
185
    qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
186
    return 0;
187
 fail:
188
    qemu_free(new_table);
189
    return ret < 0 ? ret : -EIO;
190
}
191

    
192

    
193
static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
194
{
195
    BDRVQcowState *s = bs->opaque;
196
    int64_t offset, refcount_block_offset;
197
    unsigned int refcount_table_index;
198
    int ret;
199
    uint64_t data64;
200
    int cache = cache_refcount_updates;
201

    
202
    /* Find L1 index and grow refcount table if needed */
203
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
204
    if (refcount_table_index >= s->refcount_table_size) {
205
        ret = grow_refcount_table(bs, refcount_table_index + 1);
206
        if (ret < 0)
207
            return ret;
208
    }
209

    
210
    /* Load or allocate the refcount block */
211
    refcount_block_offset = s->refcount_table[refcount_table_index];
212
    if (!refcount_block_offset) {
213
        if (cache_refcount_updates) {
214
            write_refcount_block(s);
215
            cache_refcount_updates = 0;
216
        }
217
        /* create a new refcount block */
218
        /* Note: we cannot update the refcount now to avoid recursion */
219
        offset = alloc_clusters_noref(bs, s->cluster_size);
220
        memset(s->refcount_block_cache, 0, s->cluster_size);
221
        ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
222
        if (ret != s->cluster_size)
223
            return -EINVAL;
224
        s->refcount_table[refcount_table_index] = offset;
225
        data64 = cpu_to_be64(offset);
226
        ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
227
                          refcount_table_index * sizeof(uint64_t),
228
                          &data64, sizeof(data64));
229
        if (ret != sizeof(data64))
230
            return -EINVAL;
231

    
232
        refcount_block_offset = offset;
233
        s->refcount_block_cache_offset = offset;
234
        update_refcount(bs, offset, s->cluster_size, 1);
235
        cache_refcount_updates = cache;
236
    } else {
237
        if (refcount_block_offset != s->refcount_block_cache_offset) {
238
            if (load_refcount_block(bs, refcount_block_offset) < 0)
239
                return -EIO;
240
        }
241
    }
242

    
243
    return refcount_block_offset;
244
}
245

    
246
#define REFCOUNTS_PER_SECTOR (512 >> REFCOUNT_SHIFT)
247
static int write_refcount_block_entries(BDRVQcowState *s,
248
    int64_t refcount_block_offset, int first_index, int last_index)
249
{
250
    size_t size;
251

    
252
    if (cache_refcount_updates) {
253
        return 0;
254
    }
255

    
256
    first_index &= ~(REFCOUNTS_PER_SECTOR - 1);
257
    last_index = (last_index + REFCOUNTS_PER_SECTOR)
258
        & ~(REFCOUNTS_PER_SECTOR - 1);
259

    
260
    size = (last_index - first_index) << REFCOUNT_SHIFT;
261
    if (bdrv_pwrite(s->hd,
262
        refcount_block_offset + (first_index << REFCOUNT_SHIFT),
263
        &s->refcount_block_cache[first_index], size) != size)
264
    {
265
        return -EIO;
266
    }
267

    
268
    return 0;
269
}
270

    
271
/* XXX: cache several refcount block clusters ? */
272
static int update_refcount(BlockDriverState *bs,
273
                            int64_t offset, int64_t length,
274
                            int addend)
275
{
276
    BDRVQcowState *s = bs->opaque;
277
    int64_t start, last, cluster_offset;
278
    int64_t refcount_block_offset = 0;
279
    int64_t table_index = -1, old_table_index;
280
    int first_index = -1, last_index = -1;
281

    
282
#ifdef DEBUG_ALLOC2
283
    printf("update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n",
284
           offset, length, addend);
285
#endif
286
    if (length <= 0)
287
        return -EINVAL;
288
    start = offset & ~(s->cluster_size - 1);
289
    last = (offset + length - 1) & ~(s->cluster_size - 1);
290
    for(cluster_offset = start; cluster_offset <= last;
291
        cluster_offset += s->cluster_size)
292
    {
293
        int block_index, refcount;
294
        int64_t cluster_index = cluster_offset >> s->cluster_bits;
295

    
296
        /* Only write refcount block to disk when we are done with it */
297
        old_table_index = table_index;
298
        table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
299
        if ((old_table_index >= 0) && (table_index != old_table_index)) {
300

    
301
            if (write_refcount_block_entries(s, refcount_block_offset,
302
                first_index, last_index) < 0)
303
            {
304
                return -EIO;
305
            }
306

    
307
            first_index = -1;
308
            last_index = -1;
309
        }
310

    
311
        /* Load the refcount block and allocate it if needed */
312
        refcount_block_offset = alloc_refcount_block(bs, cluster_index);
313
        if (refcount_block_offset < 0) {
314
            return refcount_block_offset;
315
        }
316

    
317
        /* we can update the count and save it */
318
        block_index = cluster_index &
319
            ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
320
        if (first_index == -1 || block_index < first_index) {
321
            first_index = block_index;
322
        }
323
        if (block_index > last_index) {
324
            last_index = block_index;
325
        }
326

    
327
        refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
328
        refcount += addend;
329
        if (refcount < 0 || refcount > 0xffff)
330
            return -EINVAL;
331
        if (refcount == 0 && cluster_index < s->free_cluster_index) {
332
            s->free_cluster_index = cluster_index;
333
        }
334
        s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
335
    }
336

    
337
    /* Write last changed block to disk */
338
    if (refcount_block_offset != 0) {
339
        if (write_refcount_block_entries(s, refcount_block_offset,
340
            first_index, last_index) < 0)
341
        {
342
            return -EIO;
343
        }
344
    }
345

    
346
    return 0;
347
}
348

    
349
/* addend must be 1 or -1 */
350
static int update_cluster_refcount(BlockDriverState *bs,
351
                                   int64_t cluster_index,
352
                                   int addend)
353
{
354
    BDRVQcowState *s = bs->opaque;
355
    int ret;
356

    
357
    ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend);
358
    if (ret < 0) {
359
        return ret;
360
    }
361

    
362
    return get_refcount(bs, cluster_index);
363
}
364

    
365

    
366

    
367
/*********************************************************/
368
/* cluster allocation functions */
369

    
370

    
371

    
372
/* return < 0 if error */
373
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
374
{
375
    BDRVQcowState *s = bs->opaque;
376
    int i, nb_clusters;
377

    
378
    nb_clusters = size_to_clusters(s, size);
379
retry:
380
    for(i = 0; i < nb_clusters; i++) {
381
        int64_t i = s->free_cluster_index++;
382
        if (get_refcount(bs, i) != 0)
383
            goto retry;
384
    }
385
#ifdef DEBUG_ALLOC2
386
    printf("alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
387
            size,
388
            (s->free_cluster_index - nb_clusters) << s->cluster_bits);
389
#endif
390
    return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
391
}
392

    
393
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
394
{
395
    int64_t offset;
396

    
397
    offset = alloc_clusters_noref(bs, size);
398
    update_refcount(bs, offset, size, 1);
399
    return offset;
400
}
401

    
402
/* only used to allocate compressed sectors. We try to allocate
403
   contiguous sectors. size must be <= cluster_size */
404
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
405
{
406
    BDRVQcowState *s = bs->opaque;
407
    int64_t offset, cluster_offset;
408
    int free_in_cluster;
409

    
410
    assert(size > 0 && size <= s->cluster_size);
411
    if (s->free_byte_offset == 0) {
412
        s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size);
413
    }
414
 redo:
415
    free_in_cluster = s->cluster_size -
416
        (s->free_byte_offset & (s->cluster_size - 1));
417
    if (size <= free_in_cluster) {
418
        /* enough space in current cluster */
419
        offset = s->free_byte_offset;
420
        s->free_byte_offset += size;
421
        free_in_cluster -= size;
422
        if (free_in_cluster == 0)
423
            s->free_byte_offset = 0;
424
        if ((offset & (s->cluster_size - 1)) != 0)
425
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
426
    } else {
427
        offset = qcow2_alloc_clusters(bs, s->cluster_size);
428
        cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
429
        if ((cluster_offset + s->cluster_size) == offset) {
430
            /* we are lucky: contiguous data */
431
            offset = s->free_byte_offset;
432
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
433
            s->free_byte_offset += size;
434
        } else {
435
            s->free_byte_offset = offset;
436
            goto redo;
437
        }
438
    }
439
    return offset;
440
}
441

    
442
void qcow2_free_clusters(BlockDriverState *bs,
443
                          int64_t offset, int64_t size)
444
{
445
    update_refcount(bs, offset, size, -1);
446
}
447

    
448
/*
449
 * free_any_clusters
450
 *
451
 * free clusters according to its type: compressed or not
452
 *
453
 */
454

    
455
void qcow2_free_any_clusters(BlockDriverState *bs,
456
    uint64_t cluster_offset, int nb_clusters)
457
{
458
    BDRVQcowState *s = bs->opaque;
459

    
460
    /* free the cluster */
461

    
462
    if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
463
        int nb_csectors;
464
        nb_csectors = ((cluster_offset >> s->csize_shift) &
465
                       s->csize_mask) + 1;
466
        qcow2_free_clusters(bs,
467
            (cluster_offset & s->cluster_offset_mask) & ~511,
468
            nb_csectors * 512);
469
        return;
470
    }
471

    
472
    qcow2_free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
473

    
474
    return;
475
}
476

    
477

    
478

    
479
/*********************************************************/
480
/* snapshots and image creation */
481

    
482

    
483

    
484
void qcow2_create_refcount_update(QCowCreateState *s, int64_t offset,
485
    int64_t size)
486
{
487
    int refcount;
488
    int64_t start, last, cluster_offset;
489
    uint16_t *p;
490

    
491
    start = offset & ~(s->cluster_size - 1);
492
    last = (offset + size - 1)  & ~(s->cluster_size - 1);
493
    for(cluster_offset = start; cluster_offset <= last;
494
        cluster_offset += s->cluster_size) {
495
        p = &s->refcount_block[cluster_offset >> s->cluster_bits];
496
        refcount = be16_to_cpu(*p);
497
        refcount++;
498
        *p = cpu_to_be16(refcount);
499
    }
500
}
501

    
502
/* update the refcounts of snapshots and the copied flag */
503
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
504
    int64_t l1_table_offset, int l1_size, int addend)
505
{
506
    BDRVQcowState *s = bs->opaque;
507
    uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
508
    int64_t old_offset, old_l2_offset;
509
    int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
510

    
511
    qcow2_l2_cache_reset(bs);
512
    cache_refcount_updates = 1;
513

    
514
    l2_table = NULL;
515
    l1_table = NULL;
516
    l1_size2 = l1_size * sizeof(uint64_t);
517
    if (l1_table_offset != s->l1_table_offset) {
518
        if (l1_size2 != 0) {
519
            l1_table = qemu_mallocz(align_offset(l1_size2, 512));
520
        } else {
521
            l1_table = NULL;
522
        }
523
        l1_allocated = 1;
524
        if (bdrv_pread(s->hd, l1_table_offset,
525
                       l1_table, l1_size2) != l1_size2)
526
            goto fail;
527
        for(i = 0;i < l1_size; i++)
528
            be64_to_cpus(&l1_table[i]);
529
    } else {
530
        assert(l1_size == s->l1_size);
531
        l1_table = s->l1_table;
532
        l1_allocated = 0;
533
    }
534

    
535
    l2_size = s->l2_size * sizeof(uint64_t);
536
    l2_table = qemu_malloc(l2_size);
537
    l1_modified = 0;
538
    for(i = 0; i < l1_size; i++) {
539
        l2_offset = l1_table[i];
540
        if (l2_offset) {
541
            old_l2_offset = l2_offset;
542
            l2_offset &= ~QCOW_OFLAG_COPIED;
543
            l2_modified = 0;
544
            if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
545
                goto fail;
546
            for(j = 0; j < s->l2_size; j++) {
547
                offset = be64_to_cpu(l2_table[j]);
548
                if (offset != 0) {
549
                    old_offset = offset;
550
                    offset &= ~QCOW_OFLAG_COPIED;
551
                    if (offset & QCOW_OFLAG_COMPRESSED) {
552
                        nb_csectors = ((offset >> s->csize_shift) &
553
                                       s->csize_mask) + 1;
554
                        if (addend != 0)
555
                            update_refcount(bs, (offset & s->cluster_offset_mask) & ~511,
556
                                            nb_csectors * 512, addend);
557
                        /* compressed clusters are never modified */
558
                        refcount = 2;
559
                    } else {
560
                        if (addend != 0) {
561
                            refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
562
                        } else {
563
                            refcount = get_refcount(bs, offset >> s->cluster_bits);
564
                        }
565
                    }
566

    
567
                    if (refcount == 1) {
568
                        offset |= QCOW_OFLAG_COPIED;
569
                    }
570
                    if (offset != old_offset) {
571
                        l2_table[j] = cpu_to_be64(offset);
572
                        l2_modified = 1;
573
                    }
574
                }
575
            }
576
            if (l2_modified) {
577
                if (bdrv_pwrite(s->hd,
578
                                l2_offset, l2_table, l2_size) != l2_size)
579
                    goto fail;
580
            }
581

    
582
            if (addend != 0) {
583
                refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
584
            } else {
585
                refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
586
            }
587
            if (refcount == 1) {
588
                l2_offset |= QCOW_OFLAG_COPIED;
589
            }
590
            if (l2_offset != old_l2_offset) {
591
                l1_table[i] = l2_offset;
592
                l1_modified = 1;
593
            }
594
        }
595
    }
596
    if (l1_modified) {
597
        for(i = 0; i < l1_size; i++)
598
            cpu_to_be64s(&l1_table[i]);
599
        if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
600
                        l1_size2) != l1_size2)
601
            goto fail;
602
        for(i = 0; i < l1_size; i++)
603
            be64_to_cpus(&l1_table[i]);
604
    }
605
    if (l1_allocated)
606
        qemu_free(l1_table);
607
    qemu_free(l2_table);
608
    cache_refcount_updates = 0;
609
    write_refcount_block(s);
610
    return 0;
611
 fail:
612
    if (l1_allocated)
613
        qemu_free(l1_table);
614
    qemu_free(l2_table);
615
    cache_refcount_updates = 0;
616
    write_refcount_block(s);
617
    return -EIO;
618
}
619

    
620

    
621

    
622

    
623
/*********************************************************/
624
/* refcount checking functions */
625

    
626

    
627

    
628
/*
629
 * Increases the refcount for a range of clusters in a given refcount table.
630
 * This is used to construct a temporary refcount table out of L1 and L2 tables
631
 * which can be compared the the refcount table saved in the image.
632
 *
633
 * Returns the number of errors in the image that were found
634
 */
635
static int inc_refcounts(BlockDriverState *bs,
636
                          uint16_t *refcount_table,
637
                          int refcount_table_size,
638
                          int64_t offset, int64_t size)
639
{
640
    BDRVQcowState *s = bs->opaque;
641
    int64_t start, last, cluster_offset;
642
    int k;
643
    int errors = 0;
644

    
645
    if (size <= 0)
646
        return 0;
647

    
648
    start = offset & ~(s->cluster_size - 1);
649
    last = (offset + size - 1) & ~(s->cluster_size - 1);
650
    for(cluster_offset = start; cluster_offset <= last;
651
        cluster_offset += s->cluster_size) {
652
        k = cluster_offset >> s->cluster_bits;
653
        if (k < 0 || k >= refcount_table_size) {
654
            fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
655
                cluster_offset);
656
            errors++;
657
        } else {
658
            if (++refcount_table[k] == 0) {
659
                fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
660
                    "\n", cluster_offset);
661
                errors++;
662
            }
663
        }
664
    }
665

    
666
    return errors;
667
}
668

    
669
/*
670
 * Increases the refcount in the given refcount table for the all clusters
671
 * referenced in the L2 table. While doing so, performs some checks on L2
672
 * entries.
673
 *
674
 * Returns the number of errors found by the checks or -errno if an internal
675
 * error occurred.
676
 */
677
static int check_refcounts_l2(BlockDriverState *bs,
678
    uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset,
679
    int check_copied)
680
{
681
    BDRVQcowState *s = bs->opaque;
682
    uint64_t *l2_table, offset;
683
    int i, l2_size, nb_csectors, refcount;
684
    int errors = 0;
685

    
686
    /* Read L2 table from disk */
687
    l2_size = s->l2_size * sizeof(uint64_t);
688
    l2_table = qemu_malloc(l2_size);
689

    
690
    if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
691
        goto fail;
692

    
693
    /* Do the actual checks */
694
    for(i = 0; i < s->l2_size; i++) {
695
        offset = be64_to_cpu(l2_table[i]);
696
        if (offset != 0) {
697
            if (offset & QCOW_OFLAG_COMPRESSED) {
698
                /* Compressed clusters don't have QCOW_OFLAG_COPIED */
699
                if (offset & QCOW_OFLAG_COPIED) {
700
                    fprintf(stderr, "ERROR: cluster %" PRId64 ": "
701
                        "copied flag must never be set for compressed "
702
                        "clusters\n", offset >> s->cluster_bits);
703
                    offset &= ~QCOW_OFLAG_COPIED;
704
                    errors++;
705
                }
706

    
707
                /* Mark cluster as used */
708
                nb_csectors = ((offset >> s->csize_shift) &
709
                               s->csize_mask) + 1;
710
                offset &= s->cluster_offset_mask;
711
                errors += inc_refcounts(bs, refcount_table,
712
                              refcount_table_size,
713
                              offset & ~511, nb_csectors * 512);
714
            } else {
715
                /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
716
                if (check_copied) {
717
                    uint64_t entry = offset;
718
                    offset &= ~QCOW_OFLAG_COPIED;
719
                    refcount = get_refcount(bs, offset >> s->cluster_bits);
720
                    if ((refcount == 1) != ((entry & QCOW_OFLAG_COPIED) != 0)) {
721
                        fprintf(stderr, "ERROR OFLAG_COPIED: offset=%"
722
                            PRIx64 " refcount=%d\n", entry, refcount);
723
                        errors++;
724
                    }
725
                }
726

    
727
                /* Mark cluster as used */
728
                offset &= ~QCOW_OFLAG_COPIED;
729
                errors += inc_refcounts(bs, refcount_table,
730
                              refcount_table_size,
731
                              offset, s->cluster_size);
732

    
733
                /* Correct offsets are cluster aligned */
734
                if (offset & (s->cluster_size - 1)) {
735
                    fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
736
                        "properly aligned; L2 entry corrupted.\n", offset);
737
                    errors++;
738
                }
739
            }
740
        }
741
    }
742

    
743
    qemu_free(l2_table);
744
    return errors;
745

    
746
fail:
747
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
748
    qemu_free(l2_table);
749
    return -EIO;
750
}
751

    
752
/*
753
 * Increases the refcount for the L1 table, its L2 tables and all referenced
754
 * clusters in the given refcount table. While doing so, performs some checks
755
 * on L1 and L2 entries.
756
 *
757
 * Returns the number of errors found by the checks or -errno if an internal
758
 * error occurred.
759
 */
760
static int check_refcounts_l1(BlockDriverState *bs,
761
                              uint16_t *refcount_table,
762
                              int refcount_table_size,
763
                              int64_t l1_table_offset, int l1_size,
764
                              int check_copied)
765
{
766
    BDRVQcowState *s = bs->opaque;
767
    uint64_t *l1_table, l2_offset, l1_size2;
768
    int i, refcount, ret;
769
    int errors = 0;
770

    
771
    l1_size2 = l1_size * sizeof(uint64_t);
772

    
773
    /* Mark L1 table as used */
774
    errors += inc_refcounts(bs, refcount_table, refcount_table_size,
775
                  l1_table_offset, l1_size2);
776

    
777
    /* Read L1 table entries from disk */
778
    if (l1_size2 == 0) {
779
        l1_table = NULL;
780
    } else {
781
        l1_table = qemu_malloc(l1_size2);
782
        if (bdrv_pread(s->hd, l1_table_offset,
783
                       l1_table, l1_size2) != l1_size2)
784
            goto fail;
785
        for(i = 0;i < l1_size; i++)
786
            be64_to_cpus(&l1_table[i]);
787
    }
788

    
789
    /* Do the actual checks */
790
    for(i = 0; i < l1_size; i++) {
791
        l2_offset = l1_table[i];
792
        if (l2_offset) {
793
            /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
794
            if (check_copied) {
795
                refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED)
796
                    >> s->cluster_bits);
797
                if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
798
                    fprintf(stderr, "ERROR OFLAG_COPIED: l2_offset=%" PRIx64
799
                        " refcount=%d\n", l2_offset, refcount);
800
                    errors++;
801
                }
802
            }
803

    
804
            /* Mark L2 table as used */
805
            l2_offset &= ~QCOW_OFLAG_COPIED;
806
            errors += inc_refcounts(bs, refcount_table,
807
                          refcount_table_size,
808
                          l2_offset,
809
                          s->cluster_size);
810

    
811
            /* L2 tables are cluster aligned */
812
            if (l2_offset & (s->cluster_size - 1)) {
813
                fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
814
                    "cluster aligned; L1 entry corrupted\n", l2_offset);
815
                errors++;
816
            }
817

    
818
            /* Process and check L2 entries */
819
            ret = check_refcounts_l2(bs, refcount_table, refcount_table_size,
820
                l2_offset, check_copied);
821
            if (ret < 0) {
822
                goto fail;
823
            }
824
            errors += ret;
825
        }
826
    }
827
    qemu_free(l1_table);
828
    return errors;
829

    
830
fail:
831
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
832
    qemu_free(l1_table);
833
    return -EIO;
834
}
835

    
836
/*
837
 * Checks an image for refcount consistency.
838
 *
839
 * Returns 0 if no errors are found, the number of errors in case the image is
840
 * detected as corrupted, and -errno when an internal error occured.
841
 */
842
int qcow2_check_refcounts(BlockDriverState *bs)
843
{
844
    BDRVQcowState *s = bs->opaque;
845
    int64_t size;
846
    int nb_clusters, refcount1, refcount2, i;
847
    QCowSnapshot *sn;
848
    uint16_t *refcount_table;
849
    int ret, errors = 0;
850

    
851
    size = bdrv_getlength(s->hd);
852
    nb_clusters = size_to_clusters(s, size);
853
    refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
854

    
855
    /* header */
856
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
857
                  0, s->cluster_size);
858

    
859
    /* current L1 table */
860
    ret = check_refcounts_l1(bs, refcount_table, nb_clusters,
861
                       s->l1_table_offset, s->l1_size, 1);
862
    if (ret < 0) {
863
        return ret;
864
    }
865
    errors += ret;
866

    
867
    /* snapshots */
868
    for(i = 0; i < s->nb_snapshots; i++) {
869
        sn = s->snapshots + i;
870
        check_refcounts_l1(bs, refcount_table, nb_clusters,
871
                           sn->l1_table_offset, sn->l1_size, 0);
872
    }
873
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
874
                  s->snapshots_offset, s->snapshots_size);
875

    
876
    /* refcount data */
877
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
878
                  s->refcount_table_offset,
879
                  s->refcount_table_size * sizeof(uint64_t));
880
    for(i = 0; i < s->refcount_table_size; i++) {
881
        int64_t offset;
882
        offset = s->refcount_table[i];
883
        if (offset != 0) {
884
            errors += inc_refcounts(bs, refcount_table, nb_clusters,
885
                          offset, s->cluster_size);
886
        }
887
    }
888

    
889
    /* compare ref counts */
890
    for(i = 0; i < nb_clusters; i++) {
891
        refcount1 = get_refcount(bs, i);
892
        refcount2 = refcount_table[i];
893
        if (refcount1 != refcount2) {
894
            fprintf(stderr, "ERROR cluster %d refcount=%d reference=%d\n",
895
                   i, refcount1, refcount2);
896
            errors++;
897
        }
898
    }
899

    
900
    qemu_free(refcount_table);
901

    
902
    return errors;
903
}
904