Revision eb9566d1 block/vdi.c

b/block/vdi.c
474 474
    uint32_t block_index;
475 475
    uint32_t sector_in_block;
476 476
    uint32_t n_sectors;
477
    int ret;
477
    int ret = 0;
478 478

  
479 479
    logout("\n");
480 480

  
481
restart:
482
    block_index = sector_num / s->block_sectors;
483
    sector_in_block = sector_num % s->block_sectors;
484
    n_sectors = s->block_sectors - sector_in_block;
485
    if (n_sectors > nb_sectors) {
486
        n_sectors = nb_sectors;
487
    }
488

  
489
    logout("will read %u sectors starting at sector %" PRIu64 "\n",
490
           n_sectors, sector_num);
481
    while (ret >= 0 && nb_sectors > 0) {
482
        block_index = sector_num / s->block_sectors;
483
        sector_in_block = sector_num % s->block_sectors;
484
        n_sectors = s->block_sectors - sector_in_block;
485
        if (n_sectors > nb_sectors) {
486
            n_sectors = nb_sectors;
487
        }
491 488

  
492
    /* prepare next AIO request */
493
    bmap_entry = le32_to_cpu(s->bmap[block_index]);
494
    if (!VDI_IS_ALLOCATED(bmap_entry)) {
495
        /* Block not allocated, return zeros, no need to wait. */
496
        memset(buf, 0, n_sectors * SECTOR_SIZE);
497
        ret = 0;
498
    } else {
499
        uint64_t offset = s->header.offset_data / SECTOR_SIZE +
500
                          (uint64_t)bmap_entry * s->block_sectors +
501
                          sector_in_block;
502
        ret = bdrv_read(bs->file, offset, buf, n_sectors);
503
    }
504
    logout("%u sectors read\n", n_sectors);
489
        logout("will read %u sectors starting at sector %" PRIu64 "\n",
490
               n_sectors, sector_num);
505 491

  
506
    nb_sectors -= n_sectors;
507
    sector_num += n_sectors;
508
    buf += n_sectors * SECTOR_SIZE;
492
        /* prepare next AIO request */
493
        bmap_entry = le32_to_cpu(s->bmap[block_index]);
494
        if (!VDI_IS_ALLOCATED(bmap_entry)) {
495
            /* Block not allocated, return zeros, no need to wait. */
496
            memset(buf, 0, n_sectors * SECTOR_SIZE);
497
            ret = 0;
498
        } else {
499
            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
500
                              (uint64_t)bmap_entry * s->block_sectors +
501
                              sector_in_block;
502
            ret = bdrv_read(bs->file, offset, buf, n_sectors);
503
        }
504
        logout("%u sectors read\n", n_sectors);
509 505

  
510
    if (ret >= 0 && nb_sectors > 0) {
511
        goto restart;
506
        nb_sectors -= n_sectors;
507
        sector_num += n_sectors;
508
        buf += n_sectors * SECTOR_SIZE;
512 509
    }
513 510

  
514 511
    return ret;
......
525 522
    uint32_t bmap_first = VDI_UNALLOCATED;
526 523
    uint32_t bmap_last = VDI_UNALLOCATED;
527 524
    uint8_t *block = NULL;
528
    int ret;
525
    int ret = 0;
529 526

  
530 527
    logout("\n");
531 528

  
532
restart:
533
    block_index = sector_num / s->block_sectors;
534
    sector_in_block = sector_num % s->block_sectors;
535
    n_sectors = s->block_sectors - sector_in_block;
536
    if (n_sectors > nb_sectors) {
537
        n_sectors = nb_sectors;
538
    }
539

  
540
    logout("will write %u sectors starting at sector %" PRIu64 "\n",
541
           n_sectors, sector_num);
529
    while (ret >= 0 && nb_sectors > 0) {
530
        block_index = sector_num / s->block_sectors;
531
        sector_in_block = sector_num % s->block_sectors;
532
        n_sectors = s->block_sectors - sector_in_block;
533
        if (n_sectors > nb_sectors) {
534
            n_sectors = nb_sectors;
535
        }
542 536

  
543
    /* prepare next AIO request */
544
    bmap_entry = le32_to_cpu(s->bmap[block_index]);
545
    if (!VDI_IS_ALLOCATED(bmap_entry)) {
546
        /* Allocate new block and write to it. */
547
        uint64_t offset;
548
        bmap_entry = s->header.blocks_allocated;
549
        s->bmap[block_index] = cpu_to_le32(bmap_entry);
550
        s->header.blocks_allocated++;
551
        offset = s->header.offset_data / SECTOR_SIZE +
552
                 (uint64_t)bmap_entry * s->block_sectors;
553
        if (block == NULL) {
554
            block = g_malloc(s->block_size);
555
            bmap_first = block_index;
537
        logout("will write %u sectors starting at sector %" PRIu64 "\n",
538
               n_sectors, sector_num);
539

  
540
        /* prepare next AIO request */
541
        bmap_entry = le32_to_cpu(s->bmap[block_index]);
542
        if (!VDI_IS_ALLOCATED(bmap_entry)) {
543
            /* Allocate new block and write to it. */
544
            uint64_t offset;
545
            bmap_entry = s->header.blocks_allocated;
546
            s->bmap[block_index] = cpu_to_le32(bmap_entry);
547
            s->header.blocks_allocated++;
548
            offset = s->header.offset_data / SECTOR_SIZE +
549
                     (uint64_t)bmap_entry * s->block_sectors;
550
            if (block == NULL) {
551
                block = g_malloc(s->block_size);
552
                bmap_first = block_index;
553
            }
554
            bmap_last = block_index;
555
            /* Copy data to be written to new block and zero unused parts. */
556
            memset(block, 0, sector_in_block * SECTOR_SIZE);
557
            memcpy(block + sector_in_block * SECTOR_SIZE,
558
                   buf, n_sectors * SECTOR_SIZE);
559
            memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
560
                   (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
561
            ret = bdrv_write(bs->file, offset, block, s->block_sectors);
562
        } else {
563
            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
564
                              (uint64_t)bmap_entry * s->block_sectors +
565
                              sector_in_block;
566
            ret = bdrv_write(bs->file, offset, buf, n_sectors);
556 567
        }
557
        bmap_last = block_index;
558
        /* Copy data to be written to new block and zero unused parts. */
559
        memset(block, 0, sector_in_block * SECTOR_SIZE);
560
        memcpy(block + sector_in_block * SECTOR_SIZE,
561
               buf, n_sectors * SECTOR_SIZE);
562
        memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
563
               (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
564
        ret = bdrv_write(bs->file, offset, block, s->block_sectors);
565
    } else {
566
        uint64_t offset = s->header.offset_data / SECTOR_SIZE +
567
                          (uint64_t)bmap_entry * s->block_sectors +
568
                          sector_in_block;
569
        ret = bdrv_write(bs->file, offset, buf, n_sectors);
570
    }
571 568

  
572
    nb_sectors -= n_sectors;
573
    sector_num += n_sectors;
574
    buf += n_sectors * SECTOR_SIZE;
569
        nb_sectors -= n_sectors;
570
        sector_num += n_sectors;
571
        buf += n_sectors * SECTOR_SIZE;
575 572

  
576
    logout("%u sectors written\n", n_sectors);
577
    if (ret >= 0 && nb_sectors > 0) {
578
        goto restart;
573
        logout("%u sectors written\n", n_sectors);
579 574
    }
580 575

  
581 576
    logout("finished data write\n");

Also available in: Unified diff