Revision aa102231 exec.c

b/exec.c
191 191
static MemoryRegionSection *phys_sections;
192 192
static unsigned phys_sections_nb, phys_sections_nb_alloc;
193 193
static uint16_t phys_section_unassigned;
194
static uint16_t phys_section_notdirty;
195
static uint16_t phys_section_rom;
196
static uint16_t phys_section_watch;
194 197

  
195 198
struct PhysPageEntry {
196 199
    uint16_t is_leaf : 1;
......
2214 2217
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2215 2218
            + section_addr(section, paddr);
2216 2219
        if (!section->readonly)
2217
            iotlb |= io_mem_notdirty.ram_addr;
2220
            iotlb |= phys_section_notdirty;
2218 2221
        else
2219
            iotlb |= io_mem_rom.ram_addr;
2222
            iotlb |= phys_section_rom;
2220 2223
    } else {
2221 2224
        /* IO handlers are currently passed a physical address.
2222 2225
           It would be nice to pass an offset from the base address
......
2224 2227
           and avoid full address decoding in every device.
2225 2228
           We can't use the high bits of pd for this because
2226 2229
           IO_MEM_ROMD uses these as a ram address.  */
2227
        iotlb = memory_region_get_ram_addr(section->mr) & ~TARGET_PAGE_MASK;
2230
        iotlb = section - phys_sections;
2228 2231
        iotlb += section_addr(section, paddr);
2229 2232
    }
2230 2233

  
......
2235 2238
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2236 2239
            /* Avoid trapping reads of pages with a write breakpoint. */
2237 2240
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2238
                iotlb = io_mem_watch.ram_addr + paddr;
2241
                iotlb = phys_section_watch + paddr;
2239 2242
                address |= TLB_MMIO;
2240 2243
                break;
2241 2244
            }
......
3559 3562
    return phys_section_add(&section);
3560 3563
}
3561 3564

  
3565
target_phys_addr_t section_to_ioaddr(target_phys_addr_t section_io_addr)
3566
{
3567
    MemoryRegionSection *section;
3568

  
3569
    section = &phys_sections[section_io_addr & ~TARGET_PAGE_MASK];
3570
    return (section_io_addr & TARGET_PAGE_MASK)
3571
        | (section->mr->ram_addr & ~TARGET_PAGE_MASK);
3572
}
3573

  
3562 3574
static void io_mem_init(void)
3563 3575
{
3564 3576
    int i;
......
3586 3598
    phys_sections_clear();
3587 3599
    phys_map.ptr = PHYS_MAP_NODE_NIL;
3588 3600
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
3601
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
3602
    phys_section_rom = dummy_section(&io_mem_rom);
3603
    phys_section_watch = dummy_section(&io_mem_watch);
3589 3604
}
3590 3605

  
3591 3606
static void core_commit(MemoryListener *listener)

Also available in: Unified diff