Revision 2999097b

b/exec.c
436 436
}
437 437

  
438 438

  
439
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t index,
440
                                uint16_t leaf, int level)
439
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440
                                target_phys_addr_t *nb, uint16_t leaf,
441
                                int level)
441 442
{
442 443
    PhysPageEntry *p;
443 444
    int i;
......
453 454
    } else {
454 455
        p = phys_map_nodes[lp->u.node];
455 456
    }
456
    lp = &p[(index >> (level * L2_BITS)) & (L2_SIZE - 1)];
457
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
457 458

  
458
    if (level == 0) {
459
        lp->u.leaf = leaf;
460
    } else {
461
        phys_page_set_level(lp, index, leaf, level - 1);
459
    while (*nb && lp < &p[L2_SIZE]) {
460
        if (level == 0) {
461
            lp->u.leaf = leaf;
462
            ++*index;
463
            --*nb;
464
        } else {
465
            phys_page_set_level(lp, index, nb, leaf, level - 1);
466
        }
467
        ++lp;
462 468
    }
463 469
}
464 470

  
465
static void phys_page_set(target_phys_addr_t index, uint16_t leaf)
471
static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
472
                          uint16_t leaf)
466 473
{
467
    phys_map_node_reserve(P_L2_LEVELS);
474
    /* Wildly overreserve - it doesn't matter much. */
475
    phys_map_node_reserve((nb + L2_SIZE - 1) / L2_SIZE * P_L2_LEVELS);
468 476

  
469
    phys_page_set_level(&phys_map, index, leaf, P_L2_LEVELS - 1);
477
    phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
470 478
}
471 479

  
472 480
static MemoryRegionSection phys_page_find(target_phys_addr_t index)
......
2630 2638
    if (!(existing.mr->subpage)) {
2631 2639
        subpage = subpage_init(base);
2632 2640
        subsection.mr = &subpage->iomem;
2633
        phys_page_set(base >> TARGET_PAGE_BITS, phys_section_add(&subsection));
2641
        phys_page_set(base >> TARGET_PAGE_BITS, 1,
2642
                      phys_section_add(&subsection));
2634 2643
    } else {
2635 2644
        subpage = container_of(existing.mr, subpage_t, iomem);
2636 2645
    }
......
2644 2653
{
2645 2654
    target_phys_addr_t start_addr = section->offset_within_address_space;
2646 2655
    ram_addr_t size = section->size;
2647
    target_phys_addr_t addr, end_addr;
2656
    target_phys_addr_t addr;
2648 2657
    uint16_t section_index = phys_section_add(section);
2649 2658

  
2650 2659
    assert(size);
2651 2660

  
2652
    end_addr = start_addr + (target_phys_addr_t)size;
2653

  
2654 2661
    addr = start_addr;
2655
    do {
2656
        phys_page_set(addr >> TARGET_PAGE_BITS, section_index);
2657
        addr += TARGET_PAGE_SIZE;
2658
    } while (addr != end_addr);
2662
    phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2663
                  section_index);
2659 2664
}
2660 2665

  
2661 2666
void cpu_register_physical_memory_log(MemoryRegionSection *section,

Also available in: Unified diff