Statistics
| Branch: | Revision:

root / block / qcow2-refcount.c @ 3f6a3ee5

History | View | Annotate | Download (28.8 kB)

1
/*
2
 * Block driver for the QCOW version 2 format
3
 *
4
 * Copyright (c) 2004-2006 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "qemu-common.h"
26
#include "block_int.h"
27
#include "block/qcow2.h"
28

    
29
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
30
static int update_refcount(BlockDriverState *bs,
31
                            int64_t offset, int64_t length,
32
                            int addend);
33

    
34

    
35
static int cache_refcount_updates = 0;
36

    
37
static int write_refcount_block(BDRVQcowState *s)
38
{
39
    size_t size = s->cluster_size;
40

    
41
    if (s->refcount_block_cache_offset == 0) {
42
        return 0;
43
    }
44

    
45
    if (bdrv_pwrite(s->hd, s->refcount_block_cache_offset,
46
            s->refcount_block_cache, size) != size)
47
    {
48
        return -EIO;
49
    }
50

    
51
    return 0;
52
}
53

    
54
/*********************************************************/
55
/* refcount handling */
56

    
57
int qcow2_refcount_init(BlockDriverState *bs)
58
{
59
    BDRVQcowState *s = bs->opaque;
60
    int ret, refcount_table_size2, i;
61

    
62
    s->refcount_block_cache = qemu_malloc(s->cluster_size);
63
    refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
64
    s->refcount_table = qemu_malloc(refcount_table_size2);
65
    if (s->refcount_table_size > 0) {
66
        ret = bdrv_pread(s->hd, s->refcount_table_offset,
67
                         s->refcount_table, refcount_table_size2);
68
        if (ret != refcount_table_size2)
69
            goto fail;
70
        for(i = 0; i < s->refcount_table_size; i++)
71
            be64_to_cpus(&s->refcount_table[i]);
72
    }
73
    return 0;
74
 fail:
75
    return -ENOMEM;
76
}
77

    
78
void qcow2_refcount_close(BlockDriverState *bs)
79
{
80
    BDRVQcowState *s = bs->opaque;
81
    qemu_free(s->refcount_block_cache);
82
    qemu_free(s->refcount_table);
83
}
84

    
85

    
86
static int load_refcount_block(BlockDriverState *bs,
87
                               int64_t refcount_block_offset)
88
{
89
    BDRVQcowState *s = bs->opaque;
90
    int ret;
91

    
92
    if (cache_refcount_updates) {
93
        write_refcount_block(s);
94
    }
95

    
96
    ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
97
                     s->cluster_size);
98
    if (ret != s->cluster_size)
99
        return -EIO;
100
    s->refcount_block_cache_offset = refcount_block_offset;
101
    return 0;
102
}
103

    
104
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
105
{
106
    BDRVQcowState *s = bs->opaque;
107
    int refcount_table_index, block_index;
108
    int64_t refcount_block_offset;
109

    
110
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
111
    if (refcount_table_index >= s->refcount_table_size)
112
        return 0;
113
    refcount_block_offset = s->refcount_table[refcount_table_index];
114
    if (!refcount_block_offset)
115
        return 0;
116
    if (refcount_block_offset != s->refcount_block_cache_offset) {
117
        /* better than nothing: return allocated if read error */
118
        if (load_refcount_block(bs, refcount_block_offset) < 0)
119
            return 1;
120
    }
121
    block_index = cluster_index &
122
        ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
123
    return be16_to_cpu(s->refcount_block_cache[block_index]);
124
}
125

    
126
static int grow_refcount_table(BlockDriverState *bs, int min_size)
127
{
128
    BDRVQcowState *s = bs->opaque;
129
    int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
130
    uint64_t *new_table;
131
    int64_t table_offset;
132
    uint8_t data[12];
133
    int old_table_size;
134
    int64_t old_table_offset;
135

    
136
    if (min_size <= s->refcount_table_size)
137
        return 0;
138
    /* compute new table size */
139
    refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
140
    for(;;) {
141
        if (refcount_table_clusters == 0) {
142
            refcount_table_clusters = 1;
143
        } else {
144
            refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
145
        }
146
        new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
147
        if (min_size <= new_table_size)
148
            break;
149
    }
150
#ifdef DEBUG_ALLOC2
151
    printf("grow_refcount_table from %d to %d\n",
152
           s->refcount_table_size,
153
           new_table_size);
154
#endif
155
    new_table_size2 = new_table_size * sizeof(uint64_t);
156
    new_table = qemu_mallocz(new_table_size2);
157
    memcpy(new_table, s->refcount_table,
158
           s->refcount_table_size * sizeof(uint64_t));
159
    for(i = 0; i < s->refcount_table_size; i++)
160
        cpu_to_be64s(&new_table[i]);
161
    /* Note: we cannot update the refcount now to avoid recursion */
162
    table_offset = alloc_clusters_noref(bs, new_table_size2);
163
    ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
164
    if (ret != new_table_size2)
165
        goto fail;
166
    for(i = 0; i < s->refcount_table_size; i++)
167
        be64_to_cpus(&new_table[i]);
168

    
169
    cpu_to_be64w((uint64_t*)data, table_offset);
170
    cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
171
    if (bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
172
                    data, sizeof(data)) != sizeof(data))
173
        goto fail;
174
    qemu_free(s->refcount_table);
175
    old_table_offset = s->refcount_table_offset;
176
    old_table_size = s->refcount_table_size;
177
    s->refcount_table = new_table;
178
    s->refcount_table_size = new_table_size;
179
    s->refcount_table_offset = table_offset;
180

    
181
    update_refcount(bs, table_offset, new_table_size2, 1);
182
    qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
183
    return 0;
184
 fail:
185
    qcow2_free_clusters(bs, table_offset, new_table_size2);
186
    qemu_free(new_table);
187
    return -EIO;
188
}
189

    
190

    
191
static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
192
{
193
    BDRVQcowState *s = bs->opaque;
194
    int64_t offset, refcount_block_offset;
195
    int ret, refcount_table_index;
196
    uint64_t data64;
197
    int cache = cache_refcount_updates;
198

    
199
    /* Find L1 index and grow refcount table if needed */
200
    refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
201
    if (refcount_table_index >= s->refcount_table_size) {
202
        ret = grow_refcount_table(bs, refcount_table_index + 1);
203
        if (ret < 0)
204
            return ret;
205
    }
206

    
207
    /* Load or allocate the refcount block */
208
    refcount_block_offset = s->refcount_table[refcount_table_index];
209
    if (!refcount_block_offset) {
210
        if (cache_refcount_updates) {
211
            write_refcount_block(s);
212
            cache_refcount_updates = 0;
213
        }
214
        /* create a new refcount block */
215
        /* Note: we cannot update the refcount now to avoid recursion */
216
        offset = alloc_clusters_noref(bs, s->cluster_size);
217
        memset(s->refcount_block_cache, 0, s->cluster_size);
218
        ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
219
        if (ret != s->cluster_size)
220
            return -EINVAL;
221
        s->refcount_table[refcount_table_index] = offset;
222
        data64 = cpu_to_be64(offset);
223
        ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
224
                          refcount_table_index * sizeof(uint64_t),
225
                          &data64, sizeof(data64));
226
        if (ret != sizeof(data64))
227
            return -EINVAL;
228

    
229
        refcount_block_offset = offset;
230
        s->refcount_block_cache_offset = offset;
231
        update_refcount(bs, offset, s->cluster_size, 1);
232
        cache_refcount_updates = cache;
233
    } else {
234
        if (refcount_block_offset != s->refcount_block_cache_offset) {
235
            if (load_refcount_block(bs, refcount_block_offset) < 0)
236
                return -EIO;
237
        }
238
    }
239

    
240
    return refcount_block_offset;
241
}
242

    
243
#define REFCOUNTS_PER_SECTOR (512 >> REFCOUNT_SHIFT)
244
static int write_refcount_block_entries(BDRVQcowState *s,
245
    int64_t refcount_block_offset, int first_index, int last_index)
246
{
247
    size_t size;
248

    
249
    if (cache_refcount_updates) {
250
        return 0;
251
    }
252

    
253
    first_index &= ~(REFCOUNTS_PER_SECTOR - 1);
254
    last_index = (last_index + REFCOUNTS_PER_SECTOR)
255
        & ~(REFCOUNTS_PER_SECTOR - 1);
256

    
257
    size = (last_index - first_index) << REFCOUNT_SHIFT;
258
    if (bdrv_pwrite(s->hd,
259
        refcount_block_offset + (first_index << REFCOUNT_SHIFT),
260
        &s->refcount_block_cache[first_index], size) != size)
261
    {
262
        return -EIO;
263
    }
264

    
265
    return 0;
266
}
267

    
268
/* XXX: cache several refcount block clusters ? */
269
static int update_refcount(BlockDriverState *bs,
270
                            int64_t offset, int64_t length,
271
                            int addend)
272
{
273
    BDRVQcowState *s = bs->opaque;
274
    int64_t start, last, cluster_offset;
275
    int64_t refcount_block_offset = 0;
276
    int64_t table_index = -1, old_table_index;
277
    int first_index = -1, last_index = -1;
278

    
279
#ifdef DEBUG_ALLOC2
280
    printf("update_refcount: offset=%lld size=%lld addend=%d\n",
281
           offset, length, addend);
282
#endif
283
    if (length <= 0)
284
        return -EINVAL;
285
    start = offset & ~(s->cluster_size - 1);
286
    last = (offset + length - 1) & ~(s->cluster_size - 1);
287
    for(cluster_offset = start; cluster_offset <= last;
288
        cluster_offset += s->cluster_size)
289
    {
290
        int block_index, refcount;
291
        int64_t cluster_index = cluster_offset >> s->cluster_bits;
292

    
293
        /* Only write refcount block to disk when we are done with it */
294
        old_table_index = table_index;
295
        table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
296
        if ((old_table_index >= 0) && (table_index != old_table_index)) {
297

    
298
            if (write_refcount_block_entries(s, refcount_block_offset,
299
                first_index, last_index) < 0)
300
            {
301
                return -EIO;
302
            }
303

    
304
            first_index = -1;
305
            last_index = -1;
306
        }
307

    
308
        /* Load the refcount block and allocate it if needed */
309
        refcount_block_offset = alloc_refcount_block(bs, cluster_index);
310
        if (refcount_block_offset < 0) {
311
            return refcount_block_offset;
312
        }
313

    
314
        /* we can update the count and save it */
315
        block_index = cluster_index &
316
            ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
317
        if (first_index == -1 || block_index < first_index) {
318
            first_index = block_index;
319
        }
320
        if (block_index > last_index) {
321
            last_index = block_index;
322
        }
323

    
324
        refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
325
        refcount += addend;
326
        if (refcount < 0 || refcount > 0xffff)
327
            return -EINVAL;
328
        if (refcount == 0 && cluster_index < s->free_cluster_index) {
329
            s->free_cluster_index = cluster_index;
330
        }
331
        s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
332
    }
333

    
334
    /* Write last changed block to disk */
335
    if (refcount_block_offset != 0) {
336
        if (write_refcount_block_entries(s, refcount_block_offset,
337
            first_index, last_index) < 0)
338
        {
339
            return -EIO;
340
        }
341
    }
342

    
343
    return 0;
344
}
345

    
346
/* addend must be 1 or -1 */
347
static int update_cluster_refcount(BlockDriverState *bs,
348
                                   int64_t cluster_index,
349
                                   int addend)
350
{
351
    BDRVQcowState *s = bs->opaque;
352
    int ret;
353

    
354
    ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend);
355
    if (ret < 0) {
356
        return ret;
357
    }
358

    
359
    return get_refcount(bs, cluster_index);
360
}
361

    
362

    
363

    
364
/*********************************************************/
365
/* cluster allocation functions */
366

    
367

    
368

    
369
/* return < 0 if error */
370
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
371
{
372
    BDRVQcowState *s = bs->opaque;
373
    int i, nb_clusters;
374

    
375
    nb_clusters = size_to_clusters(s, size);
376
retry:
377
    for(i = 0; i < nb_clusters; i++) {
378
        int64_t i = s->free_cluster_index++;
379
        if (get_refcount(bs, i) != 0)
380
            goto retry;
381
    }
382
#ifdef DEBUG_ALLOC2
383
    printf("alloc_clusters: size=%lld -> %lld\n",
384
            size,
385
            (s->free_cluster_index - nb_clusters) << s->cluster_bits);
386
#endif
387
    return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
388
}
389

    
390
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
391
{
392
    int64_t offset;
393

    
394
    offset = alloc_clusters_noref(bs, size);
395
    update_refcount(bs, offset, size, 1);
396
    return offset;
397
}
398

    
399
/* only used to allocate compressed sectors. We try to allocate
400
   contiguous sectors. size must be <= cluster_size */
401
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
402
{
403
    BDRVQcowState *s = bs->opaque;
404
    int64_t offset, cluster_offset;
405
    int free_in_cluster;
406

    
407
    assert(size > 0 && size <= s->cluster_size);
408
    if (s->free_byte_offset == 0) {
409
        s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size);
410
    }
411
 redo:
412
    free_in_cluster = s->cluster_size -
413
        (s->free_byte_offset & (s->cluster_size - 1));
414
    if (size <= free_in_cluster) {
415
        /* enough space in current cluster */
416
        offset = s->free_byte_offset;
417
        s->free_byte_offset += size;
418
        free_in_cluster -= size;
419
        if (free_in_cluster == 0)
420
            s->free_byte_offset = 0;
421
        if ((offset & (s->cluster_size - 1)) != 0)
422
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
423
    } else {
424
        offset = qcow2_alloc_clusters(bs, s->cluster_size);
425
        cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
426
        if ((cluster_offset + s->cluster_size) == offset) {
427
            /* we are lucky: contiguous data */
428
            offset = s->free_byte_offset;
429
            update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
430
            s->free_byte_offset += size;
431
        } else {
432
            s->free_byte_offset = offset;
433
            goto redo;
434
        }
435
    }
436
    return offset;
437
}
438

    
439
void qcow2_free_clusters(BlockDriverState *bs,
440
                          int64_t offset, int64_t size)
441
{
442
    update_refcount(bs, offset, size, -1);
443
}
444

    
445
/*
446
 * free_any_clusters
447
 *
448
 * free clusters according to its type: compressed or not
449
 *
450
 */
451

    
452
void qcow2_free_any_clusters(BlockDriverState *bs,
453
    uint64_t cluster_offset, int nb_clusters)
454
{
455
    BDRVQcowState *s = bs->opaque;
456

    
457
    /* free the cluster */
458

    
459
    if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
460
        int nb_csectors;
461
        nb_csectors = ((cluster_offset >> s->csize_shift) &
462
                       s->csize_mask) + 1;
463
        qcow2_free_clusters(bs,
464
            (cluster_offset & s->cluster_offset_mask) & ~511,
465
            nb_csectors * 512);
466
        return;
467
    }
468

    
469
    qcow2_free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
470

    
471
    return;
472
}
473

    
474

    
475

    
476
/*********************************************************/
477
/* snapshots and image creation */
478

    
479

    
480

    
481
void qcow2_create_refcount_update(QCowCreateState *s, int64_t offset,
482
    int64_t size)
483
{
484
    int refcount;
485
    int64_t start, last, cluster_offset;
486
    uint16_t *p;
487

    
488
    start = offset & ~(s->cluster_size - 1);
489
    last = (offset + size - 1)  & ~(s->cluster_size - 1);
490
    for(cluster_offset = start; cluster_offset <= last;
491
        cluster_offset += s->cluster_size) {
492
        p = &s->refcount_block[cluster_offset >> s->cluster_bits];
493
        refcount = be16_to_cpu(*p);
494
        refcount++;
495
        *p = cpu_to_be16(refcount);
496
    }
497
}
498

    
499
/* update the refcounts of snapshots and the copied flag */
500
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
501
    int64_t l1_table_offset, int l1_size, int addend)
502
{
503
    BDRVQcowState *s = bs->opaque;
504
    uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
505
    int64_t old_offset, old_l2_offset;
506
    int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
507

    
508
    qcow2_l2_cache_reset(bs);
509
    cache_refcount_updates = 1;
510

    
511
    l2_table = NULL;
512
    l1_table = NULL;
513
    l1_size2 = l1_size * sizeof(uint64_t);
514
    l1_allocated = 0;
515
    if (l1_table_offset != s->l1_table_offset) {
516
        l1_table = qemu_mallocz(align_offset(l1_size2, 512));
517
        l1_allocated = 1;
518
        if (bdrv_pread(s->hd, l1_table_offset,
519
                       l1_table, l1_size2) != l1_size2)
520
            goto fail;
521
        for(i = 0;i < l1_size; i++)
522
            be64_to_cpus(&l1_table[i]);
523
    } else {
524
        assert(l1_size == s->l1_size);
525
        l1_table = s->l1_table;
526
        l1_allocated = 0;
527
    }
528

    
529
    l2_size = s->l2_size * sizeof(uint64_t);
530
    l2_table = qemu_malloc(l2_size);
531
    l1_modified = 0;
532
    for(i = 0; i < l1_size; i++) {
533
        l2_offset = l1_table[i];
534
        if (l2_offset) {
535
            old_l2_offset = l2_offset;
536
            l2_offset &= ~QCOW_OFLAG_COPIED;
537
            l2_modified = 0;
538
            if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
539
                goto fail;
540
            for(j = 0; j < s->l2_size; j++) {
541
                offset = be64_to_cpu(l2_table[j]);
542
                if (offset != 0) {
543
                    old_offset = offset;
544
                    offset &= ~QCOW_OFLAG_COPIED;
545
                    if (offset & QCOW_OFLAG_COMPRESSED) {
546
                        nb_csectors = ((offset >> s->csize_shift) &
547
                                       s->csize_mask) + 1;
548
                        if (addend != 0)
549
                            update_refcount(bs, (offset & s->cluster_offset_mask) & ~511,
550
                                            nb_csectors * 512, addend);
551
                        /* compressed clusters are never modified */
552
                        refcount = 2;
553
                    } else {
554
                        if (addend != 0) {
555
                            refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
556
                        } else {
557
                            refcount = get_refcount(bs, offset >> s->cluster_bits);
558
                        }
559
                    }
560

    
561
                    if (refcount == 1) {
562
                        offset |= QCOW_OFLAG_COPIED;
563
                    }
564
                    if (offset != old_offset) {
565
                        l2_table[j] = cpu_to_be64(offset);
566
                        l2_modified = 1;
567
                    }
568
                }
569
            }
570
            if (l2_modified) {
571
                if (bdrv_pwrite(s->hd,
572
                                l2_offset, l2_table, l2_size) != l2_size)
573
                    goto fail;
574
            }
575

    
576
            if (addend != 0) {
577
                refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
578
            } else {
579
                refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
580
            }
581
            if (refcount == 1) {
582
                l2_offset |= QCOW_OFLAG_COPIED;
583
            }
584
            if (l2_offset != old_l2_offset) {
585
                l1_table[i] = l2_offset;
586
                l1_modified = 1;
587
            }
588
        }
589
    }
590
    if (l1_modified) {
591
        for(i = 0; i < l1_size; i++)
592
            cpu_to_be64s(&l1_table[i]);
593
        if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
594
                        l1_size2) != l1_size2)
595
            goto fail;
596
        for(i = 0; i < l1_size; i++)
597
            be64_to_cpus(&l1_table[i]);
598
    }
599
    if (l1_allocated)
600
        qemu_free(l1_table);
601
    qemu_free(l2_table);
602
    cache_refcount_updates = 0;
603
    write_refcount_block(s);
604
    return 0;
605
 fail:
606
    if (l1_allocated)
607
        qemu_free(l1_table);
608
    qemu_free(l2_table);
609
    cache_refcount_updates = 0;
610
    write_refcount_block(s);
611
    return -EIO;
612
}
613

    
614

    
615

    
616

    
617
/*********************************************************/
618
/* refcount checking functions */
619

    
620

    
621

    
622
/*
623
 * Increases the refcount for a range of clusters in a given refcount table.
624
 * This is used to construct a temporary refcount table out of L1 and L2 tables
625
 * which can be compared the the refcount table saved in the image.
626
 *
627
 * Returns the number of errors in the image that were found
628
 */
629
static int inc_refcounts(BlockDriverState *bs,
630
                          uint16_t *refcount_table,
631
                          int refcount_table_size,
632
                          int64_t offset, int64_t size)
633
{
634
    BDRVQcowState *s = bs->opaque;
635
    int64_t start, last, cluster_offset;
636
    int k;
637
    int errors = 0;
638

    
639
    if (size <= 0)
640
        return 0;
641

    
642
    start = offset & ~(s->cluster_size - 1);
643
    last = (offset + size - 1) & ~(s->cluster_size - 1);
644
    for(cluster_offset = start; cluster_offset <= last;
645
        cluster_offset += s->cluster_size) {
646
        k = cluster_offset >> s->cluster_bits;
647
        if (k < 0 || k >= refcount_table_size) {
648
            fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
649
                cluster_offset);
650
            errors++;
651
        } else {
652
            if (++refcount_table[k] == 0) {
653
                fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
654
                    "\n", cluster_offset);
655
                errors++;
656
            }
657
        }
658
    }
659

    
660
    return errors;
661
}
662

    
663
/*
664
 * Increases the refcount in the given refcount table for the all clusters
665
 * referenced in the L2 table. While doing so, performs some checks on L2
666
 * entries.
667
 *
668
 * Returns the number of errors found by the checks or -errno if an internal
669
 * error occurred.
670
 */
671
static int check_refcounts_l2(BlockDriverState *bs,
672
    uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset,
673
    int check_copied)
674
{
675
    BDRVQcowState *s = bs->opaque;
676
    uint64_t *l2_table, offset;
677
    int i, l2_size, nb_csectors, refcount;
678
    int errors = 0;
679

    
680
    /* Read L2 table from disk */
681
    l2_size = s->l2_size * sizeof(uint64_t);
682
    l2_table = qemu_malloc(l2_size);
683

    
684
    if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
685
        goto fail;
686

    
687
    /* Do the actual checks */
688
    for(i = 0; i < s->l2_size; i++) {
689
        offset = be64_to_cpu(l2_table[i]);
690
        if (offset != 0) {
691
            if (offset & QCOW_OFLAG_COMPRESSED) {
692
                /* Compressed clusters don't have QCOW_OFLAG_COPIED */
693
                if (offset & QCOW_OFLAG_COPIED) {
694
                    fprintf(stderr, "ERROR: cluster %" PRId64 ": "
695
                        "copied flag must never be set for compressed "
696
                        "clusters\n", offset >> s->cluster_bits);
697
                    offset &= ~QCOW_OFLAG_COPIED;
698
                    errors++;
699
                }
700

    
701
                /* Mark cluster as used */
702
                nb_csectors = ((offset >> s->csize_shift) &
703
                               s->csize_mask) + 1;
704
                offset &= s->cluster_offset_mask;
705
                errors += inc_refcounts(bs, refcount_table,
706
                              refcount_table_size,
707
                              offset & ~511, nb_csectors * 512);
708
            } else {
709
                /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
710
                if (check_copied) {
711
                    uint64_t entry = offset;
712
                    offset &= ~QCOW_OFLAG_COPIED;
713
                    refcount = get_refcount(bs, offset >> s->cluster_bits);
714
                    if ((refcount == 1) != ((entry & QCOW_OFLAG_COPIED) != 0)) {
715
                        fprintf(stderr, "ERROR OFLAG_COPIED: offset=%"
716
                            PRIx64 " refcount=%d\n", entry, refcount);
717
                        errors++;
718
                    }
719
                }
720

    
721
                /* Mark cluster as used */
722
                offset &= ~QCOW_OFLAG_COPIED;
723
                errors += inc_refcounts(bs, refcount_table,
724
                              refcount_table_size,
725
                              offset, s->cluster_size);
726

    
727
                /* Correct offsets are cluster aligned */
728
                if (offset & (s->cluster_size - 1)) {
729
                    fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
730
                        "properly aligned; L2 entry corrupted.\n", offset);
731
                    errors++;
732
                }
733
            }
734
        }
735
    }
736

    
737
    qemu_free(l2_table);
738
    return errors;
739

    
740
fail:
741
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
742
    qemu_free(l2_table);
743
    return -EIO;
744
}
745

    
746
/*
747
 * Increases the refcount for the L1 table, its L2 tables and all referenced
748
 * clusters in the given refcount table. While doing so, performs some checks
749
 * on L1 and L2 entries.
750
 *
751
 * Returns the number of errors found by the checks or -errno if an internal
752
 * error occurred.
753
 */
754
static int check_refcounts_l1(BlockDriverState *bs,
755
                              uint16_t *refcount_table,
756
                              int refcount_table_size,
757
                              int64_t l1_table_offset, int l1_size,
758
                              int check_copied)
759
{
760
    BDRVQcowState *s = bs->opaque;
761
    uint64_t *l1_table, l2_offset, l1_size2;
762
    int i, refcount, ret;
763
    int errors = 0;
764

    
765
    l1_size2 = l1_size * sizeof(uint64_t);
766

    
767
    /* Mark L1 table as used */
768
    errors += inc_refcounts(bs, refcount_table, refcount_table_size,
769
                  l1_table_offset, l1_size2);
770

    
771
    /* Read L1 table entries from disk */
772
    l1_table = qemu_malloc(l1_size2);
773
    if (bdrv_pread(s->hd, l1_table_offset,
774
                   l1_table, l1_size2) != l1_size2)
775
        goto fail;
776
    for(i = 0;i < l1_size; i++)
777
        be64_to_cpus(&l1_table[i]);
778

    
779
    /* Do the actual checks */
780
    for(i = 0; i < l1_size; i++) {
781
        l2_offset = l1_table[i];
782
        if (l2_offset) {
783
            /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */
784
            if (check_copied) {
785
                refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED)
786
                    >> s->cluster_bits);
787
                if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
788
                    fprintf(stderr, "ERROR OFLAG_COPIED: l2_offset=%" PRIx64
789
                        " refcount=%d\n", l2_offset, refcount);
790
                    errors++;
791
                }
792
            }
793

    
794
            /* Mark L2 table as used */
795
            l2_offset &= ~QCOW_OFLAG_COPIED;
796
            errors += inc_refcounts(bs, refcount_table,
797
                          refcount_table_size,
798
                          l2_offset,
799
                          s->cluster_size);
800

    
801
            /* L2 tables are cluster aligned */
802
            if (l2_offset & (s->cluster_size - 1)) {
803
                fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
804
                    "cluster aligned; L1 entry corrupted\n", l2_offset);
805
                errors++;
806
            }
807

    
808
            /* Process and check L2 entries */
809
            ret = check_refcounts_l2(bs, refcount_table, refcount_table_size,
810
                l2_offset, check_copied);
811
            if (ret < 0) {
812
                goto fail;
813
            }
814
            errors += ret;
815
        }
816
    }
817
    qemu_free(l1_table);
818
    return errors;
819

    
820
fail:
821
    fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
822
    qemu_free(l1_table);
823
    return -EIO;
824
}
825

    
826
/*
827
 * Checks an image for refcount consistency.
828
 *
829
 * Returns 0 if no errors are found, the number of errors in case the image is
830
 * detected as corrupted, and -errno when an internal error occured.
831
 */
832
int qcow2_check_refcounts(BlockDriverState *bs)
833
{
834
    BDRVQcowState *s = bs->opaque;
835
    int64_t size;
836
    int nb_clusters, refcount1, refcount2, i;
837
    QCowSnapshot *sn;
838
    uint16_t *refcount_table;
839
    int ret, errors = 0;
840

    
841
    size = bdrv_getlength(s->hd);
842
    nb_clusters = size_to_clusters(s, size);
843
    refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
844

    
845
    /* header */
846
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
847
                  0, s->cluster_size);
848

    
849
    /* current L1 table */
850
    ret = check_refcounts_l1(bs, refcount_table, nb_clusters,
851
                       s->l1_table_offset, s->l1_size, 1);
852
    if (ret < 0) {
853
        return ret;
854
    }
855
    errors += ret;
856

    
857
    /* snapshots */
858
    for(i = 0; i < s->nb_snapshots; i++) {
859
        sn = s->snapshots + i;
860
        check_refcounts_l1(bs, refcount_table, nb_clusters,
861
                           sn->l1_table_offset, sn->l1_size, 0);
862
    }
863
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
864
                  s->snapshots_offset, s->snapshots_size);
865

    
866
    /* refcount data */
867
    errors += inc_refcounts(bs, refcount_table, nb_clusters,
868
                  s->refcount_table_offset,
869
                  s->refcount_table_size * sizeof(uint64_t));
870
    for(i = 0; i < s->refcount_table_size; i++) {
871
        int64_t offset;
872
        offset = s->refcount_table[i];
873
        if (offset != 0) {
874
            errors += inc_refcounts(bs, refcount_table, nb_clusters,
875
                          offset, s->cluster_size);
876
        }
877
    }
878

    
879
    /* compare ref counts */
880
    for(i = 0; i < nb_clusters; i++) {
881
        refcount1 = get_refcount(bs, i);
882
        refcount2 = refcount_table[i];
883
        if (refcount1 != refcount2) {
884
            fprintf(stderr, "ERROR cluster %d refcount=%d reference=%d\n",
885
                   i, refcount1, refcount2);
886
            errors++;
887
        }
888
    }
889

    
890
    qemu_free(refcount_table);
891

    
892
    return errors;
893
}
894