Revision c794b4e0 block/vdi.c

b/block/vdi.c
114 114
 */
115 115
#define VDI_TEXT "<<< QEMU VM Virtual Disk Image >>>\n"
116 116

  
117
/* Unallocated blocks use this index (no need to convert endianness). */
118
#define VDI_UNALLOCATED UINT32_MAX
117
/* A never-allocated block; semantically arbitrary content. */
118
#define VDI_UNALLOCATED 0xffffffffU
119

  
120
/* A discarded (no longer allocated) block; semantically zero-filled. */
121
#define VDI_DISCARDED   0xfffffffeU
122

  
123
#define VDI_IS_ALLOCATED(X) ((X) < VDI_DISCARDED)
119 124

  
120 125
#if !defined(CONFIG_UUID)
121 126
void uuid_generate(uuid_t out)
......
307 312
    /* Check block map and value of blocks_allocated. */
308 313
    for (block = 0; block < s->header.blocks_in_image; block++) {
309 314
        uint32_t bmap_entry = le32_to_cpu(s->bmap[block]);
310
        if (bmap_entry != VDI_UNALLOCATED) {
315
        if (VDI_IS_ALLOCATED(bmap_entry)) {
311 316
            if (bmap_entry < s->header.blocks_in_image) {
312 317
                blocks_allocated++;
313
                if (bmap[bmap_entry] == VDI_UNALLOCATED) {
318
                if (!VDI_IS_ALLOCATED(bmap[bmap_entry])) {
314 319
                    bmap[bmap_entry] = bmap_entry;
315 320
                } else {
316 321
                    fprintf(stderr, "ERROR: block index %" PRIu32
......
472 477
        n_sectors = nb_sectors;
473 478
    }
474 479
    *pnum = n_sectors;
475
    return bmap_entry != VDI_UNALLOCATED;
480
    return VDI_IS_ALLOCATED(bmap_entry);
476 481
}
477 482

  
478 483
static void vdi_aio_cancel(BlockDriverAIOCB *blockacb)
......
603 608
    /* prepare next AIO request */
604 609
    acb->n_sectors = n_sectors;
605 610
    bmap_entry = le32_to_cpu(s->bmap[block_index]);
606
    if (bmap_entry == VDI_UNALLOCATED) {
611
    if (!VDI_IS_ALLOCATED(bmap_entry)) {
607 612
        /* Block not allocated, return zeros, no need to wait. */
608 613
        memset(acb->buf, 0, n_sectors * SECTOR_SIZE);
609 614
        ret = vdi_schedule_bh(vdi_aio_rw_bh, acb);
......
685 690
        if (acb->header_modified) {
686 691
            VdiHeader *header = acb->block_buffer;
687 692
            logout("now writing modified header\n");
688
            assert(acb->bmap_first != VDI_UNALLOCATED);
693
            assert(VDI_IS_ALLOCATED(acb->bmap_first));
689 694
            *header = s->header;
690 695
            vdi_header_to_le(header);
691 696
            acb->header_modified = 0;
......
699 704
                goto done;
700 705
            }
701 706
            return;
702
        } else if (acb->bmap_first != VDI_UNALLOCATED) {
707
        } else if (VDI_IS_ALLOCATED(acb->bmap_first)) {
703 708
            /* One or more new blocks were allocated. */
704 709
            uint64_t offset;
705 710
            uint32_t bmap_first;
......
749 754
    /* prepare next AIO request */
750 755
    acb->n_sectors = n_sectors;
751 756
    bmap_entry = le32_to_cpu(s->bmap[block_index]);
752
    if (bmap_entry == VDI_UNALLOCATED) {
757
    if (!VDI_IS_ALLOCATED(bmap_entry)) {
753 758
        /* Allocate new block and write to it. */
754 759
        uint64_t offset;
755 760
        uint8_t *block;

Also available in: Unified diff