Revision aa102231

b/exec-all.h
299 299

  
300 300
#if !defined(CONFIG_USER_ONLY)
301 301

  
302
target_phys_addr_t section_to_ioaddr(target_phys_addr_t section_io_addr);
302 303
uint64_t io_mem_read(int index, target_phys_addr_t addr, unsigned size);
303 304
void io_mem_write(int index, target_phys_addr_t addr, uint64_t value,
304 305
                  unsigned size);
b/exec.c
191 191
static MemoryRegionSection *phys_sections;
192 192
static unsigned phys_sections_nb, phys_sections_nb_alloc;
193 193
static uint16_t phys_section_unassigned;
194
static uint16_t phys_section_notdirty;
195
static uint16_t phys_section_rom;
196
static uint16_t phys_section_watch;
194 197

  
195 198
struct PhysPageEntry {
196 199
    uint16_t is_leaf : 1;
......
2214 2217
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2215 2218
            + section_addr(section, paddr);
2216 2219
        if (!section->readonly)
2217
            iotlb |= io_mem_notdirty.ram_addr;
2220
            iotlb |= phys_section_notdirty;
2218 2221
        else
2219
            iotlb |= io_mem_rom.ram_addr;
2222
            iotlb |= phys_section_rom;
2220 2223
    } else {
2221 2224
        /* IO handlers are currently passed a physical address.
2222 2225
           It would be nice to pass an offset from the base address
......
2224 2227
           and avoid full address decoding in every device.
2225 2228
           We can't use the high bits of pd for this because
2226 2229
           IO_MEM_ROMD uses these as a ram address.  */
2227
        iotlb = memory_region_get_ram_addr(section->mr) & ~TARGET_PAGE_MASK;
2230
        iotlb = section - phys_sections;
2228 2231
        iotlb += section_addr(section, paddr);
2229 2232
    }
2230 2233

  
......
2235 2238
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2236 2239
            /* Avoid trapping reads of pages with a write breakpoint. */
2237 2240
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2238
                iotlb = io_mem_watch.ram_addr + paddr;
2241
                iotlb = phys_section_watch + paddr;
2239 2242
                address |= TLB_MMIO;
2240 2243
                break;
2241 2244
            }
......
3559 3562
    return phys_section_add(&section);
3560 3563
}
3561 3564

  
3565
target_phys_addr_t section_to_ioaddr(target_phys_addr_t section_io_addr)
3566
{
3567
    MemoryRegionSection *section;
3568

  
3569
    section = &phys_sections[section_io_addr & ~TARGET_PAGE_MASK];
3570
    return (section_io_addr & TARGET_PAGE_MASK)
3571
        | (section->mr->ram_addr & ~TARGET_PAGE_MASK);
3572
}
3573

  
3562 3574
static void io_mem_init(void)
3563 3575
{
3564 3576
    int i;
......
3586 3598
    phys_sections_clear();
3587 3599
    phys_map.ptr = PHYS_MAP_NODE_NIL;
3588 3600
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
3601
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
3602
    phys_section_rom = dummy_section(&io_mem_rom);
3603
    phys_section_watch = dummy_section(&io_mem_watch);
3589 3604
}
3590 3605

  
3591 3606
static void core_commit(MemoryListener *listener)
b/softmmu_template.h
110 110
            if ((addr & (DATA_SIZE - 1)) != 0)
111 111
                goto do_unaligned_access;
112 112
            retaddr = GETPC();
113
            ioaddr = env->iotlb[mmu_idx][index];
113
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
114 114
            res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
115 115
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
116 116
            /* slow unaligned access (it spans two pages or IO) */
......
164 164
            /* IO access */
165 165
            if ((addr & (DATA_SIZE - 1)) != 0)
166 166
                goto do_unaligned_access;
167
            ioaddr = env->iotlb[mmu_idx][index];
167
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
168 168
            res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
169 169
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
170 170
        do_unaligned_access:
......
251 251
            if ((addr & (DATA_SIZE - 1)) != 0)
252 252
                goto do_unaligned_access;
253 253
            retaddr = GETPC();
254
            ioaddr = env->iotlb[mmu_idx][index];
254
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
255 255
            glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
256 256
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
257 257
        do_unaligned_access:
......
303 303
            /* IO access */
304 304
            if ((addr & (DATA_SIZE - 1)) != 0)
305 305
                goto do_unaligned_access;
306
            ioaddr = env->iotlb[mmu_idx][index];
306
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
307 307
            glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
308 308
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
309 309
        do_unaligned_access:

Also available in: Unified diff