Revision 05203524 block-qcow2.c

b/block-qcow2.c
606 606
    return l2_table;
607 607
}
608 608

  
609
static uint64_t get_cluster_offset(BlockDriverState *bs,
610
                                   uint64_t offset, int allocate,
611
                                   int compressed_size,
612
                                   int n_start, int n_end)
609
/*
610
 * get_cluster_offset
611
 *
612
 * For a given offset of the disk image, return cluster offset in
613
 * qcow2 file.
614
 *
615
 * Return 1, if the offset is found
616
 * Return 0, otherwise.
617
 *
618
 */
619

  
620
static uint64_t get_cluster_offset(BlockDriverState *bs, uint64_t offset)
621
{
622
    BDRVQcowState *s = bs->opaque;
623
    int l1_index, l2_index;
624
    uint64_t l2_offset, *l2_table, cluster_offset;
625

  
626
    /* seek the the l2 offset in the l1 table */
627

  
628
    l1_index = offset >> (s->l2_bits + s->cluster_bits);
629
    if (l1_index >= s->l1_size)
630
        return 0;
631

  
632
    l2_offset = s->l1_table[l1_index];
633

  
634
    /* seek the l2 table of the given l2 offset */
635

  
636
    if (!l2_offset)
637
        return 0;
638

  
639
    /* load the l2 table in memory */
640

  
641
    l2_offset &= ~QCOW_OFLAG_COPIED;
642
    l2_table = l2_load(bs, l2_offset);
643
    if (l2_table == NULL)
644
        return 0;
645

  
646
    /* find the cluster offset for the given disk offset */
647

  
648
    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
649
    cluster_offset = be64_to_cpu(l2_table[l2_index]);
650

  
651
    return cluster_offset & ~QCOW_OFLAG_COPIED;
652
}
653

  
654
/*
655
 * alloc_cluster_offset
656
 *
657
 * For a given offset of the disk image, return cluster offset in
658
 * qcow2 file.
659
 *
660
 * If the offset is not found, allocate a new cluster.
661
 *
662
 * Return the cluster offset if successful,
663
 * Return 0, otherwise.
664
 *
665
 */
666

  
667
static uint64_t alloc_cluster_offset(BlockDriverState *bs,
668
                                     uint64_t offset,
669
                                     int compressed_size,
670
                                     int n_start, int n_end)
613 671
{
614 672
    BDRVQcowState *s = bs->opaque;
615 673
    int l1_index, l2_index, ret;
616
    uint64_t l2_offset, *l2_table, cluster_offset, tmp;
674
    uint64_t l2_offset, *l2_table, cluster_offset;
617 675

  
618 676
    /* seek the the l2 offset in the l1 table */
619 677

  
620 678
    l1_index = offset >> (s->l2_bits + s->cluster_bits);
621 679
    if (l1_index >= s->l1_size) {
622
        /* outside l1 table is allowed: we grow the table if needed */
623
        if (!allocate)
624
            return 0;
625 680
        ret = grow_l1_table(bs, l1_index + 1);
626 681
        if (ret < 0)
627 682
            return 0;
......
630 685

  
631 686
    /* seek the l2 table of the given l2 offset */
632 687

  
633
    if (!l2_offset) {
634
        /* the l2 table doesn't exist */
635
        if (!allocate)
688
    if (l2_offset & QCOW_OFLAG_COPIED) {
689
        /* load the l2 table in memory */
690
        l2_offset &= ~QCOW_OFLAG_COPIED;
691
        l2_table = l2_load(bs, l2_offset);
692
        if (l2_table == NULL)
636 693
            return 0;
637
        /* allocate a new l2 table for this offset */
694
    } else {
695
        if (l2_offset)
696
            free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
638 697
        l2_table = l2_allocate(bs, l1_index);
639 698
        if (l2_table == NULL)
640 699
            return 0;
641 700
        l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
642
    } else {
643
        /* the l2 table exists */
644
        if (!(l2_offset & QCOW_OFLAG_COPIED) && allocate) {
645
            /* duplicate the l2 table, and free the old table */
646
            free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
647
            l2_table = l2_allocate(bs, l1_index);
648
            if (l2_table == NULL)
649
                return 0;
650
            l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
651
        } else {
652
            /* load the l2 table in memory */
653
            l2_offset &= ~QCOW_OFLAG_COPIED;
654
            l2_table = l2_load(bs, l2_offset);
655
            if (l2_table == NULL)
656
                return 0;
657
        }
658 701
    }
659 702

  
660 703
    /* find the cluster offset for the given disk offset */
661 704

  
662 705
    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
663 706
    cluster_offset = be64_to_cpu(l2_table[l2_index]);
664
    if (!cluster_offset) {
665
        /* cluster doesn't exist */
666
        if (!allocate)
667
            return 0;
668
    } else if (!(cluster_offset & QCOW_OFLAG_COPIED)) {
669
        if (!allocate)
670
            return cluster_offset;
707

  
708
    if (cluster_offset & QCOW_OFLAG_COPIED)
709
        return cluster_offset & ~QCOW_OFLAG_COPIED;
710

  
711
    if (cluster_offset) {
671 712
        /* free the cluster */
672 713
        if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
673 714
            int nb_csectors;
......
678 719
        } else {
679 720
            free_clusters(bs, cluster_offset, s->cluster_size);
680 721
        }
681
    } else {
682
        cluster_offset &= ~QCOW_OFLAG_COPIED;
683
        return cluster_offset;
684 722
    }
685 723

  
686
    if (allocate == 1) {
687
        /* allocate a new cluster */
688
        cluster_offset = alloc_clusters(bs, s->cluster_size);
689

  
690
        /* we must initialize the cluster content which won't be
691
           written */
692
        if ((n_end - n_start) < s->cluster_sectors) {
693
            uint64_t start_sect;
694

  
695
            start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
696
            ret = copy_sectors(bs, start_sect,
697
                               cluster_offset, 0, n_start);
698
            if (ret < 0)
699
                return 0;
700
            ret = copy_sectors(bs, start_sect,
701
                               cluster_offset, n_end, s->cluster_sectors);
702
            if (ret < 0)
703
                return 0;
704
        }
705
        tmp = cpu_to_be64(cluster_offset | QCOW_OFLAG_COPIED);
706
    } else {
724
    if (compressed_size) {
707 725
        int nb_csectors;
726

  
708 727
        cluster_offset = alloc_bytes(bs, compressed_size);
709 728
        nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
710
            (cluster_offset >> 9);
729
                      (cluster_offset >> 9);
730

  
711 731
        cluster_offset |= QCOW_OFLAG_COMPRESSED |
712
            ((uint64_t)nb_csectors << s->csize_shift);
732
                          ((uint64_t)nb_csectors << s->csize_shift);
733

  
734
        /* update L2 table */
735

  
713 736
        /* compressed clusters never have the copied flag */
714
        tmp = cpu_to_be64(cluster_offset);
737

  
738
        l2_table[l2_index] = cpu_to_be64(cluster_offset);
739
        if (bdrv_pwrite(s->hd,
740
                        l2_offset + l2_index * sizeof(uint64_t),
741
                        l2_table + l2_index,
742
                        sizeof(uint64_t)) != sizeof(uint64_t))
743
            return 0;
744

  
745
        return cluster_offset;
746
    }
747

  
748
    /* allocate a new cluster */
749

  
750
    cluster_offset = alloc_clusters(bs, s->cluster_size);
751

  
752
    /* we must initialize the cluster content which won't be
753
       written */
754

  
755
    if ((n_end - n_start) < s->cluster_sectors) {
756
        uint64_t start_sect;
757

  
758
        start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
759
        ret = copy_sectors(bs, start_sect,
760
                           cluster_offset, 0, n_start);
761
        if (ret < 0)
762
            return 0;
763
        ret = copy_sectors(bs, start_sect,
764
                           cluster_offset, n_end, s->cluster_sectors);
765
        if (ret < 0)
766
            return 0;
715 767
    }
768

  
716 769
    /* update L2 table */
717
    l2_table[l2_index] = tmp;
770

  
771
    l2_table[l2_index] = cpu_to_be64(cluster_offset | QCOW_OFLAG_COPIED);
718 772
    if (bdrv_pwrite(s->hd,
719
                    l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp))
773
                    l2_offset + l2_index * sizeof(uint64_t),
774
                    l2_table + l2_index,
775
                    sizeof(uint64_t)) != sizeof(uint64_t))
720 776
        return 0;
777

  
721 778
    return cluster_offset;
722 779
}
723 780

  
......
728 785
    int index_in_cluster, n;
729 786
    uint64_t cluster_offset;
730 787

  
731
    cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
788
    cluster_offset = get_cluster_offset(bs, sector_num << 9);
732 789
    index_in_cluster = sector_num & (s->cluster_sectors - 1);
733 790
    n = s->cluster_sectors - index_in_cluster;
734 791
    if (n > nb_sectors)
......
810 867
    uint64_t cluster_offset;
811 868

  
812 869
    while (nb_sectors > 0) {
813
        cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
870
        cluster_offset = get_cluster_offset(bs, sector_num << 9);
814 871
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
815 872
        n = s->cluster_sectors - index_in_cluster;
816 873
        if (n > nb_sectors)
......
859 916
        n = s->cluster_sectors - index_in_cluster;
860 917
        if (n > nb_sectors)
861 918
            n = nb_sectors;
862
        cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
863
                                            index_in_cluster,
864
                                            index_in_cluster + n);
919
        cluster_offset = alloc_cluster_offset(bs, sector_num << 9, 0,
920
                                              index_in_cluster,
921
                                              index_in_cluster + n);
865 922
        if (!cluster_offset)
866 923
            return -1;
867 924
        if (s->crypt_method) {
......
934 991
    }
935 992

  
936 993
    /* prepare next AIO request */
937
    acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
938
                                             0, 0, 0, 0);
994
    acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9);
939 995
    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
940 996
    acb->n = s->cluster_sectors - index_in_cluster;
941 997
    if (acb->n > acb->nb_sectors)
......
1044 1100
    acb->n = s->cluster_sectors - index_in_cluster;
1045 1101
    if (acb->n > acb->nb_sectors)
1046 1102
        acb->n = acb->nb_sectors;
1047
    cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
1048
                                        index_in_cluster,
1049
                                        index_in_cluster + acb->n);
1103
    cluster_offset = alloc_cluster_offset(bs, acb->sector_num << 9, 0,
1104
                                          index_in_cluster,
1105
                                          index_in_cluster + acb->n);
1050 1106
    if (!cluster_offset || (cluster_offset & 511) != 0) {
1051 1107
        ret = -EIO;
1052 1108
        goto fail;
......
1306 1362
        /* could not compress: write normal cluster */
1307 1363
        qcow_write(bs, sector_num, buf, s->cluster_sectors);
1308 1364
    } else {
1309
        cluster_offset = get_cluster_offset(bs, sector_num << 9, 2,
1310
                                            out_len, 0, 0);
1365
        cluster_offset = alloc_cluster_offset(bs, sector_num << 9,
1366
                                              out_len, 0, 0);
1311 1367
        cluster_offset &= s->cluster_offset_mask;
1312 1368
        if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
1313 1369
            qemu_free(out_buf);

Also available in: Unified diff