Revision aa102231 softmmu_template.h
b/softmmu_template.h | ||
---|---|---|
110 | 110 |
if ((addr & (DATA_SIZE - 1)) != 0) |
111 | 111 |
goto do_unaligned_access; |
112 | 112 |
retaddr = GETPC(); |
113 |
ioaddr = env->iotlb[mmu_idx][index];
|
|
113 |
ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
|
|
114 | 114 |
res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); |
115 | 115 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
116 | 116 |
/* slow unaligned access (it spans two pages or IO) */ |
... | ... | |
164 | 164 |
/* IO access */ |
165 | 165 |
if ((addr & (DATA_SIZE - 1)) != 0) |
166 | 166 |
goto do_unaligned_access; |
167 |
ioaddr = env->iotlb[mmu_idx][index];
|
|
167 |
ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
|
|
168 | 168 |
res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); |
169 | 169 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
170 | 170 |
do_unaligned_access: |
... | ... | |
251 | 251 |
if ((addr & (DATA_SIZE - 1)) != 0) |
252 | 252 |
goto do_unaligned_access; |
253 | 253 |
retaddr = GETPC(); |
254 |
ioaddr = env->iotlb[mmu_idx][index];
|
|
254 |
ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
|
|
255 | 255 |
glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); |
256 | 256 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
257 | 257 |
do_unaligned_access: |
... | ... | |
303 | 303 |
/* IO access */ |
304 | 304 |
if ((addr & (DATA_SIZE - 1)) != 0) |
305 | 305 |
goto do_unaligned_access; |
306 |
ioaddr = env->iotlb[mmu_idx][index];
|
|
306 |
ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
|
|
307 | 307 |
glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); |
308 | 308 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
309 | 309 |
do_unaligned_access: |
Also available in: Unified diff