root / softmmu_template.h @ c48c6522
History | View | Annotate | Download (13.1 kB)
1 |
/*
|
---|---|
2 |
* Software MMU support
|
3 |
*
|
4 |
* Generate helpers used by TCG for qemu_ld/st ops and code load
|
5 |
* functions.
|
6 |
*
|
7 |
* Included from target op helpers and exec.c.
|
8 |
*
|
9 |
* Copyright (c) 2003 Fabrice Bellard
|
10 |
*
|
11 |
* This library is free software; you can redistribute it and/or
|
12 |
* modify it under the terms of the GNU Lesser General Public
|
13 |
* License as published by the Free Software Foundation; either
|
14 |
* version 2 of the License, or (at your option) any later version.
|
15 |
*
|
16 |
* This library is distributed in the hope that it will be useful,
|
17 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
18 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
19 |
* Lesser General Public License for more details.
|
20 |
*
|
21 |
* You should have received a copy of the GNU Lesser General Public
|
22 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
23 |
*/
|
24 |
#include "qemu-timer.h" |
25 |
#include "memory.h" |
26 |
|
27 |
#define DATA_SIZE (1 << SHIFT) |
28 |
|
29 |
#if DATA_SIZE == 8 |
30 |
#define SUFFIX q
|
31 |
#define USUFFIX q
|
32 |
#define DATA_TYPE uint64_t
|
33 |
#elif DATA_SIZE == 4 |
34 |
#define SUFFIX l
|
35 |
#define USUFFIX l
|
36 |
#define DATA_TYPE uint32_t
|
37 |
#elif DATA_SIZE == 2 |
38 |
#define SUFFIX w
|
39 |
#define USUFFIX uw
|
40 |
#define DATA_TYPE uint16_t
|
41 |
#elif DATA_SIZE == 1 |
42 |
#define SUFFIX b
|
43 |
#define USUFFIX ub
|
44 |
#define DATA_TYPE uint8_t
|
45 |
#else
|
46 |
#error unsupported data size
|
47 |
#endif
|
48 |
|
49 |
#ifdef SOFTMMU_CODE_ACCESS
|
50 |
#define READ_ACCESS_TYPE 2 |
51 |
#define ADDR_READ addr_code
|
52 |
#else
|
53 |
#define READ_ACCESS_TYPE 0 |
54 |
#define ADDR_READ addr_read
|
55 |
#endif
|
56 |
|
57 |
#ifndef CONFIG_TCG_PASS_AREG0
|
58 |
#define ENV_PARAM
|
59 |
#define ENV_VAR
|
60 |
#define CPU_PREFIX
|
61 |
#define HELPER_PREFIX __
|
62 |
#else
|
63 |
#define ENV_PARAM CPUArchState *env,
|
64 |
#define ENV_VAR env,
|
65 |
#define CPU_PREFIX cpu_
|
66 |
#define HELPER_PREFIX helper_
|
67 |
#endif
|
68 |
|
69 |
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
|
70 |
target_ulong addr, |
71 |
int mmu_idx,
|
72 |
uintptr_t retaddr); |
73 |
static inline DATA_TYPE glue(io_read, SUFFIX)(ENV_PARAM |
74 |
target_phys_addr_t physaddr, |
75 |
target_ulong addr, |
76 |
uintptr_t retaddr) |
77 |
{ |
78 |
DATA_TYPE res; |
79 |
MemoryRegion *mr = iotlb_to_region(physaddr); |
80 |
|
81 |
physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
82 |
env->mem_io_pc = retaddr; |
83 |
if (mr != &io_mem_ram && mr != &io_mem_rom
|
84 |
&& mr != &io_mem_unassigned |
85 |
&& mr != &io_mem_notdirty |
86 |
&& !can_do_io(env)) { |
87 |
cpu_io_recompile(env, retaddr); |
88 |
} |
89 |
|
90 |
env->mem_io_vaddr = addr; |
91 |
#if SHIFT <= 2 |
92 |
res = io_mem_read(mr, physaddr, 1 << SHIFT);
|
93 |
#else
|
94 |
#ifdef TARGET_WORDS_BIGENDIAN
|
95 |
res = io_mem_read(mr, physaddr, 4) << 32; |
96 |
res |= io_mem_read(mr, physaddr + 4, 4); |
97 |
#else
|
98 |
res = io_mem_read(mr, physaddr, 4);
|
99 |
res |= io_mem_read(mr, physaddr + 4, 4) << 32; |
100 |
#endif
|
101 |
#endif /* SHIFT > 2 */ |
102 |
return res;
|
103 |
} |
104 |
|
105 |
/* handle all cases except unaligned access which span two pages */
|
106 |
DATA_TYPE |
107 |
glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_PARAM |
108 |
target_ulong addr, |
109 |
int mmu_idx)
|
110 |
{ |
111 |
DATA_TYPE res; |
112 |
int index;
|
113 |
target_ulong tlb_addr; |
114 |
target_phys_addr_t ioaddr; |
115 |
uintptr_t retaddr; |
116 |
|
117 |
/* test if there is match for unaligned or IO access */
|
118 |
/* XXX: could done more in memory macro in a non portable way */
|
119 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
120 |
redo:
|
121 |
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
122 |
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
123 |
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
124 |
/* IO access */
|
125 |
if ((addr & (DATA_SIZE - 1)) != 0) |
126 |
goto do_unaligned_access;
|
127 |
retaddr = GETPC(); |
128 |
ioaddr = env->iotlb[mmu_idx][index]; |
129 |
res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr); |
130 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
131 |
/* slow unaligned access (it spans two pages or IO) */
|
132 |
do_unaligned_access:
|
133 |
retaddr = GETPC(); |
134 |
#ifdef ALIGNED_ONLY
|
135 |
do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr); |
136 |
#endif
|
137 |
res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr, |
138 |
mmu_idx, retaddr); |
139 |
} else {
|
140 |
/* unaligned/aligned access in the same page */
|
141 |
uintptr_t addend; |
142 |
#ifdef ALIGNED_ONLY
|
143 |
if ((addr & (DATA_SIZE - 1)) != 0) { |
144 |
retaddr = GETPC(); |
145 |
do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr); |
146 |
} |
147 |
#endif
|
148 |
addend = env->tlb_table[mmu_idx][index].addend; |
149 |
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t) |
150 |
(addr + addend)); |
151 |
} |
152 |
} else {
|
153 |
/* the page is not in the TLB : fill it */
|
154 |
retaddr = GETPC(); |
155 |
#ifdef ALIGNED_ONLY
|
156 |
if ((addr & (DATA_SIZE - 1)) != 0) |
157 |
do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr); |
158 |
#endif
|
159 |
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); |
160 |
goto redo;
|
161 |
} |
162 |
return res;
|
163 |
} |
164 |
|
165 |
/* handle all unaligned cases */
|
166 |
static DATA_TYPE
|
167 |
glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM |
168 |
target_ulong addr, |
169 |
int mmu_idx,
|
170 |
uintptr_t retaddr) |
171 |
{ |
172 |
DATA_TYPE res, res1, res2; |
173 |
int index, shift;
|
174 |
target_phys_addr_t ioaddr; |
175 |
target_ulong tlb_addr, addr1, addr2; |
176 |
|
177 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
178 |
redo:
|
179 |
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; |
180 |
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
181 |
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
182 |
/* IO access */
|
183 |
if ((addr & (DATA_SIZE - 1)) != 0) |
184 |
goto do_unaligned_access;
|
185 |
ioaddr = env->iotlb[mmu_idx][index]; |
186 |
res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr); |
187 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
188 |
do_unaligned_access:
|
189 |
/* slow unaligned access (it spans two pages) */
|
190 |
addr1 = addr & ~(DATA_SIZE - 1);
|
191 |
addr2 = addr1 + DATA_SIZE; |
192 |
res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr1, |
193 |
mmu_idx, retaddr); |
194 |
res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr2, |
195 |
mmu_idx, retaddr); |
196 |
shift = (addr & (DATA_SIZE - 1)) * 8; |
197 |
#ifdef TARGET_WORDS_BIGENDIAN
|
198 |
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
|
199 |
#else
|
200 |
res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
|
201 |
#endif
|
202 |
res = (DATA_TYPE)res; |
203 |
} else {
|
204 |
/* unaligned/aligned access in the same page */
|
205 |
uintptr_t addend = env->tlb_table[mmu_idx][index].addend; |
206 |
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t) |
207 |
(addr + addend)); |
208 |
} |
209 |
} else {
|
210 |
/* the page is not in the TLB : fill it */
|
211 |
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); |
212 |
goto redo;
|
213 |
} |
214 |
return res;
|
215 |
} |
216 |
|
217 |
#ifndef SOFTMMU_CODE_ACCESS
|
218 |
|
219 |
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM |
220 |
target_ulong addr, |
221 |
DATA_TYPE val, |
222 |
int mmu_idx,
|
223 |
uintptr_t retaddr); |
224 |
|
225 |
static inline void glue(io_write, SUFFIX)(ENV_PARAM |
226 |
target_phys_addr_t physaddr, |
227 |
DATA_TYPE val, |
228 |
target_ulong addr, |
229 |
uintptr_t retaddr) |
230 |
{ |
231 |
MemoryRegion *mr = iotlb_to_region(physaddr); |
232 |
|
233 |
physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
234 |
if (mr != &io_mem_ram && mr != &io_mem_rom
|
235 |
&& mr != &io_mem_unassigned |
236 |
&& mr != &io_mem_notdirty |
237 |
&& !can_do_io(env)) { |
238 |
cpu_io_recompile(env, retaddr); |
239 |
} |
240 |
|
241 |
env->mem_io_vaddr = addr; |
242 |
env->mem_io_pc = retaddr; |
243 |
#if SHIFT <= 2 |
244 |
io_mem_write(mr, physaddr, val, 1 << SHIFT);
|
245 |
#else
|
246 |
#ifdef TARGET_WORDS_BIGENDIAN
|
247 |
io_mem_write(mr, physaddr, (val >> 32), 4); |
248 |
io_mem_write(mr, physaddr + 4, (uint32_t)val, 4); |
249 |
#else
|
250 |
io_mem_write(mr, physaddr, (uint32_t)val, 4);
|
251 |
io_mem_write(mr, physaddr + 4, val >> 32, 4); |
252 |
#endif
|
253 |
#endif /* SHIFT > 2 */ |
254 |
} |
255 |
|
256 |
void glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_PARAM
|
257 |
target_ulong addr, |
258 |
DATA_TYPE val, |
259 |
int mmu_idx)
|
260 |
{ |
261 |
target_phys_addr_t ioaddr; |
262 |
target_ulong tlb_addr; |
263 |
uintptr_t retaddr; |
264 |
int index;
|
265 |
|
266 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
267 |
redo:
|
268 |
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
269 |
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
270 |
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
271 |
/* IO access */
|
272 |
if ((addr & (DATA_SIZE - 1)) != 0) |
273 |
goto do_unaligned_access;
|
274 |
retaddr = GETPC(); |
275 |
ioaddr = env->iotlb[mmu_idx][index]; |
276 |
glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr); |
277 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
278 |
do_unaligned_access:
|
279 |
retaddr = GETPC(); |
280 |
#ifdef ALIGNED_ONLY
|
281 |
do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
|
282 |
#endif
|
283 |
glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_VAR addr, val, |
284 |
mmu_idx, retaddr); |
285 |
} else {
|
286 |
/* aligned/unaligned access in the same page */
|
287 |
uintptr_t addend; |
288 |
#ifdef ALIGNED_ONLY
|
289 |
if ((addr & (DATA_SIZE - 1)) != 0) { |
290 |
retaddr = GETPC(); |
291 |
do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
|
292 |
} |
293 |
#endif
|
294 |
addend = env->tlb_table[mmu_idx][index].addend; |
295 |
glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t) |
296 |
(addr + addend), val); |
297 |
} |
298 |
} else {
|
299 |
/* the page is not in the TLB : fill it */
|
300 |
retaddr = GETPC(); |
301 |
#ifdef ALIGNED_ONLY
|
302 |
if ((addr & (DATA_SIZE - 1)) != 0) |
303 |
do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
|
304 |
#endif
|
305 |
tlb_fill(env, addr, 1, mmu_idx, retaddr);
|
306 |
goto redo;
|
307 |
} |
308 |
} |
309 |
|
310 |
/* handles all unaligned cases */
|
311 |
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM |
312 |
target_ulong addr, |
313 |
DATA_TYPE val, |
314 |
int mmu_idx,
|
315 |
uintptr_t retaddr) |
316 |
{ |
317 |
target_phys_addr_t ioaddr; |
318 |
target_ulong tlb_addr; |
319 |
int index, i;
|
320 |
|
321 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
322 |
redo:
|
323 |
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
324 |
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
325 |
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
326 |
/* IO access */
|
327 |
if ((addr & (DATA_SIZE - 1)) != 0) |
328 |
goto do_unaligned_access;
|
329 |
ioaddr = env->iotlb[mmu_idx][index]; |
330 |
glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr); |
331 |
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
332 |
do_unaligned_access:
|
333 |
/* XXX: not efficient, but simple */
|
334 |
/* Note: relies on the fact that tlb_fill() does not remove the
|
335 |
* previous page from the TLB cache. */
|
336 |
for(i = DATA_SIZE - 1; i >= 0; i--) { |
337 |
#ifdef TARGET_WORDS_BIGENDIAN
|
338 |
glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i, |
339 |
val >> (((DATA_SIZE - 1) * 8) - (i * 8)), |
340 |
mmu_idx, retaddr); |
341 |
#else
|
342 |
glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i, |
343 |
val >> (i * 8),
|
344 |
mmu_idx, retaddr); |
345 |
#endif
|
346 |
} |
347 |
} else {
|
348 |
/* aligned/unaligned access in the same page */
|
349 |
uintptr_t addend = env->tlb_table[mmu_idx][index].addend; |
350 |
glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t) |
351 |
(addr + addend), val); |
352 |
} |
353 |
} else {
|
354 |
/* the page is not in the TLB : fill it */
|
355 |
tlb_fill(env, addr, 1, mmu_idx, retaddr);
|
356 |
goto redo;
|
357 |
} |
358 |
} |
359 |
|
360 |
#endif /* !defined(SOFTMMU_CODE_ACCESS) */ |
361 |
|
362 |
#undef READ_ACCESS_TYPE
|
363 |
#undef SHIFT
|
364 |
#undef DATA_TYPE
|
365 |
#undef SUFFIX
|
366 |
#undef USUFFIX
|
367 |
#undef DATA_SIZE
|
368 |
#undef ADDR_READ
|
369 |
#undef ENV_PARAM
|
370 |
#undef ENV_VAR
|
371 |
#undef CPU_PREFIX
|
372 |
#undef HELPER_PREFIX
|