Revision 6ebbf390 softmmu_header.h
b/softmmu_header.h | ||
---|---|---|
39 | 39 |
#error unsupported data size |
40 | 40 |
#endif |
41 | 41 |
|
42 |
#if ACCESS_TYPE == 0
|
|
42 |
#if ACCESS_TYPE < (NB_MMU_MODES)
|
|
43 | 43 |
|
44 |
#define CPU_MEM_INDEX 0
|
|
44 |
#define CPU_MMU_INDEX ACCESS_TYPE
|
|
45 | 45 |
#define MMUSUFFIX _mmu |
46 | 46 |
|
47 |
#elif ACCESS_TYPE == 1
|
|
47 |
#elif ACCESS_TYPE == (NB_MMU_MODES)
|
|
48 | 48 |
|
49 |
#define CPU_MEM_INDEX 1
|
|
49 |
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
|
50 | 50 |
#define MMUSUFFIX _mmu |
51 | 51 |
|
52 |
#elif ACCESS_TYPE == 2 |
|
53 |
|
|
54 |
#ifdef TARGET_I386 |
|
55 |
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3) |
|
56 |
#elif defined (TARGET_PPC) |
|
57 |
#define CPU_MEM_INDEX (msr_pr) |
|
58 |
#elif defined (TARGET_MIPS) |
|
59 |
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM) |
|
60 |
#elif defined (TARGET_SPARC) |
|
61 |
#define CPU_MEM_INDEX ((env->psrs) == 0) |
|
62 |
#elif defined (TARGET_ARM) |
|
63 |
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) |
|
64 |
#elif defined (TARGET_SH4) |
|
65 |
#define CPU_MEM_INDEX ((env->sr & SR_MD) == 0) |
|
66 |
#elif defined (TARGET_ALPHA) |
|
67 |
#define CPU_MEM_INDEX ((env->ps >> 3) & 3) |
|
68 |
#elif defined (TARGET_M68K) |
|
69 |
#define CPU_MEM_INDEX ((env->sr & SR_S) == 0) |
|
70 |
#elif defined (TARGET_CRIS) |
|
71 |
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */ |
|
72 |
#define CPU_MEM_INDEX (0) |
|
73 |
#else |
|
74 |
#error unsupported CPU |
|
75 |
#endif |
|
76 |
#define MMUSUFFIX _mmu |
|
52 |
#elif ACCESS_TYPE == (NB_MMU_MODES + 1) |
|
77 | 53 |
|
78 |
#elif ACCESS_TYPE == 3 |
|
79 |
|
|
80 |
#ifdef TARGET_I386 |
|
81 |
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3) |
|
82 |
#elif defined (TARGET_PPC) |
|
83 |
#define CPU_MEM_INDEX (msr_pr) |
|
84 |
#elif defined (TARGET_MIPS) |
|
85 |
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM) |
|
86 |
#elif defined (TARGET_SPARC) |
|
87 |
#define CPU_MEM_INDEX ((env->psrs) == 0) |
|
88 |
#elif defined (TARGET_ARM) |
|
89 |
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) |
|
90 |
#elif defined (TARGET_SH4) |
|
91 |
#define CPU_MEM_INDEX ((env->sr & SR_MD) == 0) |
|
92 |
#elif defined (TARGET_ALPHA) |
|
93 |
#define CPU_MEM_INDEX ((env->ps >> 3) & 3) |
|
94 |
#elif defined (TARGET_M68K) |
|
95 |
#define CPU_MEM_INDEX ((env->sr & SR_S) == 0) |
|
96 |
#elif defined (TARGET_CRIS) |
|
97 |
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */ |
|
98 |
#define CPU_MEM_INDEX (0) |
|
99 |
#else |
|
100 |
#error unsupported CPU |
|
101 |
#endif |
|
54 |
#define CPU_MMU_INDEX (cpu_mmu_index(env)) |
|
102 | 55 |
#define MMUSUFFIX _cmmu |
103 | 56 |
|
104 | 57 |
#else |
... | ... | |
111 | 64 |
#define RES_TYPE int |
112 | 65 |
#endif |
113 | 66 |
|
114 |
#if ACCESS_TYPE == 3
|
|
67 |
#if ACCESS_TYPE == (NB_MMU_MODES + 1)
|
|
115 | 68 |
#define ADDR_READ addr_code |
116 | 69 |
#else |
117 | 70 |
#define ADDR_READ addr_read |
118 | 71 |
#endif |
119 | 72 |
|
120 | 73 |
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, |
121 |
int is_user);
|
|
122 |
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int is_user);
|
|
74 |
int mmu_idx);
|
|
75 |
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int mmu_idx);
|
|
123 | 76 |
|
124 | 77 |
#if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \ |
125 |
(ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
|
|
78 |
(ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
|
|
126 | 79 |
|
127 | 80 |
#define CPU_TLB_ENTRY_BITS 4 |
128 | 81 |
|
... | ... | |
161 | 114 |
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), |
162 | 115 |
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), |
163 | 116 |
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), |
164 |
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
|
|
165 |
"i" (CPU_MEM_INDEX),
|
|
117 |
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
|
|
118 |
"i" (CPU_MMU_INDEX),
|
|
166 | 119 |
"m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) |
167 | 120 |
: "%eax", "%ecx", "%edx", "memory", "cc"); |
168 | 121 |
return res; |
... | ... | |
208 | 161 |
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), |
209 | 162 |
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), |
210 | 163 |
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), |
211 |
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
|
|
212 |
"i" (CPU_MEM_INDEX),
|
|
164 |
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
|
|
165 |
"i" (CPU_MMU_INDEX),
|
|
213 | 166 |
"m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) |
214 | 167 |
: "%eax", "%ecx", "%edx", "memory", "cc"); |
215 | 168 |
return res; |
... | ... | |
260 | 213 |
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), |
261 | 214 |
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), |
262 | 215 |
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), |
263 |
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_write)),
|
|
264 |
"i" (CPU_MEM_INDEX),
|
|
216 |
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
|
|
217 |
"i" (CPU_MMU_INDEX),
|
|
265 | 218 |
"m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX)) |
266 | 219 |
: "%eax", "%ecx", "%edx", "memory", "cc"); |
267 | 220 |
} |
... | ... | |
276 | 229 |
RES_TYPE res; |
277 | 230 |
target_ulong addr; |
278 | 231 |
unsigned long physaddr; |
279 |
int is_user;
|
|
232 |
int mmu_idx;
|
|
280 | 233 |
|
281 | 234 |
addr = ptr; |
282 | 235 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
283 |
is_user = CPU_MEM_INDEX;
|
|
284 |
if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
|
|
236 |
mmu_idx = CPU_MMU_INDEX;
|
|
237 |
if (__builtin_expect(env->tlb_table[mmu_idx][index].ADDR_READ !=
|
|
285 | 238 |
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
286 |
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
|
|
239 |
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
|
|
287 | 240 |
} else { |
288 |
physaddr = addr + env->tlb_table[is_user][index].addend;
|
|
241 |
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
|
289 | 242 |
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); |
290 | 243 |
} |
291 | 244 |
return res; |
... | ... | |
297 | 250 |
int res, index; |
298 | 251 |
target_ulong addr; |
299 | 252 |
unsigned long physaddr; |
300 |
int is_user;
|
|
253 |
int mmu_idx;
|
|
301 | 254 |
|
302 | 255 |
addr = ptr; |
303 | 256 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
304 |
is_user = CPU_MEM_INDEX;
|
|
305 |
if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
|
|
257 |
mmu_idx = CPU_MMU_INDEX;
|
|
258 |
if (__builtin_expect(env->tlb_table[mmu_idx][index].ADDR_READ !=
|
|
306 | 259 |
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
307 |
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
|
|
260 |
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
|
|
308 | 261 |
} else { |
309 |
physaddr = addr + env->tlb_table[is_user][index].addend;
|
|
262 |
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
|
310 | 263 |
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); |
311 | 264 |
} |
312 | 265 |
return res; |
313 | 266 |
} |
314 | 267 |
#endif |
315 | 268 |
|
316 |
#if ACCESS_TYPE != 3
|
|
269 |
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
|
|
317 | 270 |
|
318 | 271 |
/* generic store macro */ |
319 | 272 |
|
... | ... | |
322 | 275 |
int index; |
323 | 276 |
target_ulong addr; |
324 | 277 |
unsigned long physaddr; |
325 |
int is_user;
|
|
278 |
int mmu_idx;
|
|
326 | 279 |
|
327 | 280 |
addr = ptr; |
328 | 281 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
329 |
is_user = CPU_MEM_INDEX;
|
|
330 |
if (__builtin_expect(env->tlb_table[is_user][index].addr_write !=
|
|
282 |
mmu_idx = CPU_MMU_INDEX;
|
|
283 |
if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_write !=
|
|
331 | 284 |
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
332 |
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user);
|
|
285 |
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
|
|
333 | 286 |
} else { |
334 |
physaddr = addr + env->tlb_table[is_user][index].addend;
|
|
287 |
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
|
335 | 288 |
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); |
336 | 289 |
} |
337 | 290 |
} |
338 | 291 |
|
339 |
#endif /* ACCESS_TYPE != 3 */
|
|
292 |
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
|
|
340 | 293 |
|
341 | 294 |
#endif /* !asm */ |
342 | 295 |
|
343 |
#if ACCESS_TYPE != 3
|
|
296 |
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
|
|
344 | 297 |
|
345 | 298 |
#if DATA_SIZE == 8 |
346 | 299 |
static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr) |
... | ... | |
386 | 339 |
} |
387 | 340 |
#endif /* DATA_SIZE == 4 */ |
388 | 341 |
|
389 |
#endif /* ACCESS_TYPE != 3 */
|
|
342 |
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
|
|
390 | 343 |
|
391 | 344 |
#undef RES_TYPE |
392 | 345 |
#undef DATA_TYPE |
... | ... | |
394 | 347 |
#undef SUFFIX |
395 | 348 |
#undef USUFFIX |
396 | 349 |
#undef DATA_SIZE |
397 |
#undef CPU_MEM_INDEX
|
|
350 |
#undef CPU_MMU_INDEX
|
|
398 | 351 |
#undef MMUSUFFIX |
399 | 352 |
#undef ADDR_READ |
Also available in: Unified diff