root / target-sparc / helper.c @ 7a5e4488
History | View | Annotate | Download (30 kB)
1 |
/*
|
---|---|
2 |
* sparc helpers
|
3 |
*
|
4 |
* Copyright (c) 2003-2005 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#include "cpu.h" |
21 |
#include "host-utils.h" |
22 |
#include "helper.h" |
23 |
#include "sysemu.h" |
24 |
|
25 |
//#define DEBUG_MMU
|
26 |
|
27 |
#ifdef DEBUG_MMU
|
28 |
#define DPRINTF_MMU(fmt, ...) \
|
29 |
do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) |
30 |
#else
|
31 |
#define DPRINTF_MMU(fmt, ...) do {} while (0) |
32 |
#endif
|
33 |
|
34 |
/* Sparc MMU emulation */
|
35 |
|
36 |
#if defined(CONFIG_USER_ONLY)
|
37 |
|
38 |
int cpu_sparc_handle_mmu_fault(CPUState *env1, target_ulong address, int rw, |
39 |
int mmu_idx)
|
40 |
{ |
41 |
if (rw & 2) |
42 |
env1->exception_index = TT_TFAULT; |
43 |
else
|
44 |
env1->exception_index = TT_DFAULT; |
45 |
return 1; |
46 |
} |
47 |
|
48 |
#else
|
49 |
|
50 |
#ifndef TARGET_SPARC64
|
51 |
/*
|
52 |
* Sparc V8 Reference MMU (SRMMU)
|
53 |
*/
|
54 |
static const int access_table[8][8] = { |
55 |
{ 0, 0, 0, 0, 8, 0, 12, 12 }, |
56 |
{ 0, 0, 0, 0, 8, 0, 0, 0 }, |
57 |
{ 8, 8, 0, 0, 0, 8, 12, 12 }, |
58 |
{ 8, 8, 0, 0, 0, 8, 0, 0 }, |
59 |
{ 8, 0, 8, 0, 8, 8, 12, 12 }, |
60 |
{ 8, 0, 8, 0, 8, 0, 8, 0 }, |
61 |
{ 8, 8, 8, 0, 8, 8, 12, 12 }, |
62 |
{ 8, 8, 8, 0, 8, 8, 8, 0 } |
63 |
}; |
64 |
|
65 |
static const int perm_table[2][8] = { |
66 |
{ |
67 |
PAGE_READ, |
68 |
PAGE_READ | PAGE_WRITE, |
69 |
PAGE_READ | PAGE_EXEC, |
70 |
PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
71 |
PAGE_EXEC, |
72 |
PAGE_READ | PAGE_WRITE, |
73 |
PAGE_READ | PAGE_EXEC, |
74 |
PAGE_READ | PAGE_WRITE | PAGE_EXEC |
75 |
}, |
76 |
{ |
77 |
PAGE_READ, |
78 |
PAGE_READ | PAGE_WRITE, |
79 |
PAGE_READ | PAGE_EXEC, |
80 |
PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
81 |
PAGE_EXEC, |
82 |
PAGE_READ, |
83 |
0,
|
84 |
0,
|
85 |
} |
86 |
}; |
87 |
|
88 |
static int get_physical_address(CPUState *env, target_phys_addr_t *physical, |
89 |
int *prot, int *access_index, |
90 |
target_ulong address, int rw, int mmu_idx, |
91 |
target_ulong *page_size) |
92 |
{ |
93 |
int access_perms = 0; |
94 |
target_phys_addr_t pde_ptr; |
95 |
uint32_t pde; |
96 |
int error_code = 0, is_dirty, is_user; |
97 |
unsigned long page_offset; |
98 |
|
99 |
is_user = mmu_idx == MMU_USER_IDX; |
100 |
|
101 |
if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ |
102 |
*page_size = TARGET_PAGE_SIZE; |
103 |
// Boot mode: instruction fetches are taken from PROM
|
104 |
if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { |
105 |
*physical = env->prom_addr | (address & 0x7ffffULL);
|
106 |
*prot = PAGE_READ | PAGE_EXEC; |
107 |
return 0; |
108 |
} |
109 |
*physical = address; |
110 |
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
111 |
return 0; |
112 |
} |
113 |
|
114 |
*access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1); |
115 |
*physical = 0xffffffffffff0000ULL;
|
116 |
|
117 |
/* SPARC reference MMU table walk: Context table->L1->L2->PTE */
|
118 |
/* Context base + context number */
|
119 |
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); |
120 |
pde = ldl_phys(pde_ptr); |
121 |
|
122 |
/* Ctx pde */
|
123 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
124 |
default:
|
125 |
case 0: /* Invalid */ |
126 |
return 1 << 2; |
127 |
case 2: /* L0 PTE, maybe should not happen? */ |
128 |
case 3: /* Reserved */ |
129 |
return 4 << 2; |
130 |
case 1: /* L0 PDE */ |
131 |
pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); |
132 |
pde = ldl_phys(pde_ptr); |
133 |
|
134 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
135 |
default:
|
136 |
case 0: /* Invalid */ |
137 |
return (1 << 8) | (1 << 2); |
138 |
case 3: /* Reserved */ |
139 |
return (1 << 8) | (4 << 2); |
140 |
case 1: /* L1 PDE */ |
141 |
pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); |
142 |
pde = ldl_phys(pde_ptr); |
143 |
|
144 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
145 |
default:
|
146 |
case 0: /* Invalid */ |
147 |
return (2 << 8) | (1 << 2); |
148 |
case 3: /* Reserved */ |
149 |
return (2 << 8) | (4 << 2); |
150 |
case 1: /* L2 PDE */ |
151 |
pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); |
152 |
pde = ldl_phys(pde_ptr); |
153 |
|
154 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
155 |
default:
|
156 |
case 0: /* Invalid */ |
157 |
return (3 << 8) | (1 << 2); |
158 |
case 1: /* PDE, should not happen */ |
159 |
case 3: /* Reserved */ |
160 |
return (3 << 8) | (4 << 2); |
161 |
case 2: /* L3 PTE */ |
162 |
page_offset = (address & TARGET_PAGE_MASK) & |
163 |
(TARGET_PAGE_SIZE - 1);
|
164 |
} |
165 |
*page_size = TARGET_PAGE_SIZE; |
166 |
break;
|
167 |
case 2: /* L2 PTE */ |
168 |
page_offset = address & 0x3ffff;
|
169 |
*page_size = 0x40000;
|
170 |
} |
171 |
break;
|
172 |
case 2: /* L1 PTE */ |
173 |
page_offset = address & 0xffffff;
|
174 |
*page_size = 0x1000000;
|
175 |
} |
176 |
} |
177 |
|
178 |
/* check access */
|
179 |
access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; |
180 |
error_code = access_table[*access_index][access_perms]; |
181 |
if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) |
182 |
return error_code;
|
183 |
|
184 |
/* update page modified and dirty bits */
|
185 |
is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
|
186 |
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
|
187 |
pde |= PG_ACCESSED_MASK; |
188 |
if (is_dirty)
|
189 |
pde |= PG_MODIFIED_MASK; |
190 |
stl_phys_notdirty(pde_ptr, pde); |
191 |
} |
192 |
|
193 |
/* the page can be put in the TLB */
|
194 |
*prot = perm_table[is_user][access_perms]; |
195 |
if (!(pde & PG_MODIFIED_MASK)) {
|
196 |
/* only set write access if already dirty... otherwise wait
|
197 |
for dirty access */
|
198 |
*prot &= ~PAGE_WRITE; |
199 |
} |
200 |
|
201 |
/* Even if large ptes, we map only one 4KB page in the cache to
|
202 |
avoid filling it too fast */
|
203 |
*physical = ((target_phys_addr_t)(pde & PTE_ADDR_MASK) << 4) + page_offset;
|
204 |
return error_code;
|
205 |
} |
206 |
|
207 |
/* Perform address translation */
|
208 |
int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, |
209 |
int mmu_idx)
|
210 |
{ |
211 |
target_phys_addr_t paddr; |
212 |
target_ulong vaddr; |
213 |
target_ulong page_size; |
214 |
int error_code = 0, prot, access_index; |
215 |
|
216 |
error_code = get_physical_address(env, &paddr, &prot, &access_index, |
217 |
address, rw, mmu_idx, &page_size); |
218 |
if (error_code == 0) { |
219 |
vaddr = address & TARGET_PAGE_MASK; |
220 |
paddr &= TARGET_PAGE_MASK; |
221 |
#ifdef DEBUG_MMU
|
222 |
printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr " |
223 |
TARGET_FMT_lx "\n", address, paddr, vaddr);
|
224 |
#endif
|
225 |
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); |
226 |
return 0; |
227 |
} |
228 |
|
229 |
if (env->mmuregs[3]) /* Fault status register */ |
230 |
env->mmuregs[3] = 1; /* overflow (not read before another fault) */ |
231 |
env->mmuregs[3] |= (access_index << 5) | error_code | 2; |
232 |
env->mmuregs[4] = address; /* Fault address register */ |
233 |
|
234 |
if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { |
235 |
// No fault mode: if a mapping is available, just override
|
236 |
// permissions. If no mapping is available, redirect accesses to
|
237 |
// neverland. Fake/overridden mappings will be flushed when
|
238 |
// switching to normal mode.
|
239 |
vaddr = address & TARGET_PAGE_MASK; |
240 |
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
241 |
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); |
242 |
return 0; |
243 |
} else {
|
244 |
if (rw & 2) |
245 |
env->exception_index = TT_TFAULT; |
246 |
else
|
247 |
env->exception_index = TT_DFAULT; |
248 |
return 1; |
249 |
} |
250 |
} |
251 |
|
252 |
target_ulong mmu_probe(CPUState *env, target_ulong address, int mmulev)
|
253 |
{ |
254 |
target_phys_addr_t pde_ptr; |
255 |
uint32_t pde; |
256 |
|
257 |
/* Context base + context number */
|
258 |
pde_ptr = (target_phys_addr_t)(env->mmuregs[1] << 4) + |
259 |
(env->mmuregs[2] << 2); |
260 |
pde = ldl_phys(pde_ptr); |
261 |
|
262 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
263 |
default:
|
264 |
case 0: /* Invalid */ |
265 |
case 2: /* PTE, maybe should not happen? */ |
266 |
case 3: /* Reserved */ |
267 |
return 0; |
268 |
case 1: /* L1 PDE */ |
269 |
if (mmulev == 3) |
270 |
return pde;
|
271 |
pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); |
272 |
pde = ldl_phys(pde_ptr); |
273 |
|
274 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
275 |
default:
|
276 |
case 0: /* Invalid */ |
277 |
case 3: /* Reserved */ |
278 |
return 0; |
279 |
case 2: /* L1 PTE */ |
280 |
return pde;
|
281 |
case 1: /* L2 PDE */ |
282 |
if (mmulev == 2) |
283 |
return pde;
|
284 |
pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); |
285 |
pde = ldl_phys(pde_ptr); |
286 |
|
287 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
288 |
default:
|
289 |
case 0: /* Invalid */ |
290 |
case 3: /* Reserved */ |
291 |
return 0; |
292 |
case 2: /* L2 PTE */ |
293 |
return pde;
|
294 |
case 1: /* L3 PDE */ |
295 |
if (mmulev == 1) |
296 |
return pde;
|
297 |
pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); |
298 |
pde = ldl_phys(pde_ptr); |
299 |
|
300 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
301 |
default:
|
302 |
case 0: /* Invalid */ |
303 |
case 1: /* PDE, should not happen */ |
304 |
case 3: /* Reserved */ |
305 |
return 0; |
306 |
case 2: /* L3 PTE */ |
307 |
return pde;
|
308 |
} |
309 |
} |
310 |
} |
311 |
} |
312 |
return 0; |
313 |
} |
314 |
|
315 |
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env)
|
316 |
{ |
317 |
target_ulong va, va1, va2; |
318 |
unsigned int n, m, o; |
319 |
target_phys_addr_t pde_ptr, pa; |
320 |
uint32_t pde; |
321 |
|
322 |
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); |
323 |
pde = ldl_phys(pde_ptr); |
324 |
(*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", |
325 |
(target_phys_addr_t)env->mmuregs[1] << 4, env->mmuregs[2]); |
326 |
for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { |
327 |
pde = mmu_probe(env, va, 2);
|
328 |
if (pde) {
|
329 |
pa = cpu_get_phys_page_debug(env, va); |
330 |
(*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx |
331 |
" PDE: " TARGET_FMT_lx "\n", va, pa, pde); |
332 |
for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { |
333 |
pde = mmu_probe(env, va1, 1);
|
334 |
if (pde) {
|
335 |
pa = cpu_get_phys_page_debug(env, va1); |
336 |
(*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " |
337 |
TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", |
338 |
va1, pa, pde); |
339 |
for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { |
340 |
pde = mmu_probe(env, va2, 0);
|
341 |
if (pde) {
|
342 |
pa = cpu_get_phys_page_debug(env, va2); |
343 |
(*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " |
344 |
TARGET_FMT_plx " PTE: "
|
345 |
TARGET_FMT_lx "\n",
|
346 |
va2, pa, pde); |
347 |
} |
348 |
} |
349 |
} |
350 |
} |
351 |
} |
352 |
} |
353 |
} |
354 |
|
355 |
/* Gdb expects all registers windows to be flushed in ram. This function handles
|
356 |
* reads (and only reads) in stack frames as if windows were flushed. We assume
|
357 |
* that the sparc ABI is followed.
|
358 |
*/
|
359 |
int target_memory_rw_debug(CPUState *env, target_ulong addr,
|
360 |
uint8_t *buf, int len, int is_write) |
361 |
{ |
362 |
int i;
|
363 |
int len1;
|
364 |
int cwp = env->cwp;
|
365 |
|
366 |
if (!is_write) {
|
367 |
for (i = 0; i < env->nwindows; i++) { |
368 |
int off;
|
369 |
target_ulong fp = env->regbase[cwp * 16 + 22]; |
370 |
|
371 |
/* Assume fp == 0 means end of frame. */
|
372 |
if (fp == 0) { |
373 |
break;
|
374 |
} |
375 |
|
376 |
cwp = cpu_cwp_inc(env, cwp + 1);
|
377 |
|
378 |
/* Invalid window ? */
|
379 |
if (env->wim & (1 << cwp)) { |
380 |
break;
|
381 |
} |
382 |
|
383 |
/* According to the ABI, the stack is growing downward. */
|
384 |
if (addr + len < fp) {
|
385 |
break;
|
386 |
} |
387 |
|
388 |
/* Not in this frame. */
|
389 |
if (addr > fp + 64) { |
390 |
continue;
|
391 |
} |
392 |
|
393 |
/* Handle access before this window. */
|
394 |
if (addr < fp) {
|
395 |
len1 = fp - addr; |
396 |
if (cpu_memory_rw_debug(env, addr, buf, len1, is_write) != 0) { |
397 |
return -1; |
398 |
} |
399 |
addr += len1; |
400 |
len -= len1; |
401 |
buf += len1; |
402 |
} |
403 |
|
404 |
/* Access byte per byte to registers. Not very efficient but speed
|
405 |
* is not critical.
|
406 |
*/
|
407 |
off = addr - fp; |
408 |
len1 = 64 - off;
|
409 |
|
410 |
if (len1 > len) {
|
411 |
len1 = len; |
412 |
} |
413 |
|
414 |
for (; len1; len1--) {
|
415 |
int reg = cwp * 16 + 8 + (off >> 2); |
416 |
union {
|
417 |
uint32_t v; |
418 |
uint8_t c[4];
|
419 |
} u; |
420 |
u.v = cpu_to_be32(env->regbase[reg]); |
421 |
*buf++ = u.c[off & 3];
|
422 |
addr++; |
423 |
len--; |
424 |
off++; |
425 |
} |
426 |
|
427 |
if (len == 0) { |
428 |
return 0; |
429 |
} |
430 |
} |
431 |
} |
432 |
return cpu_memory_rw_debug(env, addr, buf, len, is_write);
|
433 |
} |
434 |
|
435 |
#else /* !TARGET_SPARC64 */ |
436 |
|
437 |
// 41 bit physical address space
|
438 |
static inline target_phys_addr_t ultrasparc_truncate_physical(uint64_t x) |
439 |
{ |
440 |
return x & 0x1ffffffffffULL; |
441 |
} |
442 |
|
443 |
/*
|
444 |
* UltraSparc IIi I/DMMUs
|
445 |
*/
|
446 |
|
447 |
// Returns true if TTE tag is valid and matches virtual address value in context
|
448 |
// requires virtual address mask value calculated from TTE entry size
|
449 |
static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, |
450 |
uint64_t address, uint64_t context, |
451 |
target_phys_addr_t *physical) |
452 |
{ |
453 |
uint64_t mask; |
454 |
|
455 |
switch (TTE_PGSIZE(tlb->tte)) {
|
456 |
default:
|
457 |
case 0x0: // 8k |
458 |
mask = 0xffffffffffffe000ULL;
|
459 |
break;
|
460 |
case 0x1: // 64k |
461 |
mask = 0xffffffffffff0000ULL;
|
462 |
break;
|
463 |
case 0x2: // 512k |
464 |
mask = 0xfffffffffff80000ULL;
|
465 |
break;
|
466 |
case 0x3: // 4M |
467 |
mask = 0xffffffffffc00000ULL;
|
468 |
break;
|
469 |
} |
470 |
|
471 |
// valid, context match, virtual address match?
|
472 |
if (TTE_IS_VALID(tlb->tte) &&
|
473 |
(TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) |
474 |
&& compare_masked(address, tlb->tag, mask)) |
475 |
{ |
476 |
// decode physical address
|
477 |
*physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
|
478 |
return 1; |
479 |
} |
480 |
|
481 |
return 0; |
482 |
} |
483 |
|
484 |
static int get_physical_address_data(CPUState *env, |
485 |
target_phys_addr_t *physical, int *prot,
|
486 |
target_ulong address, int rw, int mmu_idx) |
487 |
{ |
488 |
unsigned int i; |
489 |
uint64_t context; |
490 |
uint64_t sfsr = 0;
|
491 |
|
492 |
int is_user = (mmu_idx == MMU_USER_IDX ||
|
493 |
mmu_idx == MMU_USER_SECONDARY_IDX); |
494 |
|
495 |
if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */ |
496 |
*physical = ultrasparc_truncate_physical(address); |
497 |
*prot = PAGE_READ | PAGE_WRITE; |
498 |
return 0; |
499 |
} |
500 |
|
501 |
switch(mmu_idx) {
|
502 |
case MMU_USER_IDX:
|
503 |
case MMU_KERNEL_IDX:
|
504 |
context = env->dmmu.mmu_primary_context & 0x1fff;
|
505 |
sfsr |= SFSR_CT_PRIMARY; |
506 |
break;
|
507 |
case MMU_USER_SECONDARY_IDX:
|
508 |
case MMU_KERNEL_SECONDARY_IDX:
|
509 |
context = env->dmmu.mmu_secondary_context & 0x1fff;
|
510 |
sfsr |= SFSR_CT_SECONDARY; |
511 |
break;
|
512 |
case MMU_NUCLEUS_IDX:
|
513 |
sfsr |= SFSR_CT_NUCLEUS; |
514 |
/* FALLTHRU */
|
515 |
default:
|
516 |
context = 0;
|
517 |
break;
|
518 |
} |
519 |
|
520 |
if (rw == 1) { |
521 |
sfsr |= SFSR_WRITE_BIT; |
522 |
} else if (rw == 4) { |
523 |
sfsr |= SFSR_NF_BIT; |
524 |
} |
525 |
|
526 |
for (i = 0; i < 64; i++) { |
527 |
// ctx match, vaddr match, valid?
|
528 |
if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
|
529 |
int do_fault = 0; |
530 |
|
531 |
// access ok?
|
532 |
/* multiple bits in SFSR.FT may be set on TT_DFAULT */
|
533 |
if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
|
534 |
do_fault = 1;
|
535 |
sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
|
536 |
|
537 |
DPRINTF_MMU("DFAULT at %" PRIx64 " context %" PRIx64 |
538 |
" mmu_idx=%d tl=%d\n",
|
539 |
address, context, mmu_idx, env->tl); |
540 |
} |
541 |
if (rw == 4) { |
542 |
if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
|
543 |
do_fault = 1;
|
544 |
sfsr |= SFSR_FT_NF_E_BIT; |
545 |
} |
546 |
} else {
|
547 |
if (TTE_IS_NFO(env->dtlb[i].tte)) {
|
548 |
do_fault = 1;
|
549 |
sfsr |= SFSR_FT_NFO_BIT; |
550 |
} |
551 |
} |
552 |
|
553 |
if (do_fault) {
|
554 |
/* faults above are reported with TT_DFAULT. */
|
555 |
env->exception_index = TT_DFAULT; |
556 |
} else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { |
557 |
do_fault = 1;
|
558 |
env->exception_index = TT_DPROT; |
559 |
|
560 |
DPRINTF_MMU("DPROT at %" PRIx64 " context %" PRIx64 |
561 |
" mmu_idx=%d tl=%d\n",
|
562 |
address, context, mmu_idx, env->tl); |
563 |
} |
564 |
|
565 |
if (!do_fault) {
|
566 |
*prot = PAGE_READ; |
567 |
if (TTE_IS_W_OK(env->dtlb[i].tte)) {
|
568 |
*prot |= PAGE_WRITE; |
569 |
} |
570 |
|
571 |
TTE_SET_USED(env->dtlb[i].tte); |
572 |
|
573 |
return 0; |
574 |
} |
575 |
|
576 |
if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ |
577 |
sfsr |= SFSR_OW_BIT; /* overflow (not read before
|
578 |
another fault) */
|
579 |
} |
580 |
|
581 |
if (env->pstate & PS_PRIV) {
|
582 |
sfsr |= SFSR_PR_BIT; |
583 |
} |
584 |
|
585 |
/* FIXME: ASI field in SFSR must be set */
|
586 |
env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; |
587 |
|
588 |
env->dmmu.sfar = address; /* Fault address register */
|
589 |
|
590 |
env->dmmu.tag_access = (address & ~0x1fffULL) | context;
|
591 |
|
592 |
return 1; |
593 |
} |
594 |
} |
595 |
|
596 |
DPRINTF_MMU("DMISS at %" PRIx64 " context %" PRIx64 "\n", |
597 |
address, context); |
598 |
|
599 |
/*
|
600 |
* On MMU misses:
|
601 |
* - UltraSPARC IIi: SFSR and SFAR unmodified
|
602 |
* - JPS1: SFAR updated and some fields of SFSR updated
|
603 |
*/
|
604 |
env->dmmu.tag_access = (address & ~0x1fffULL) | context;
|
605 |
env->exception_index = TT_DMISS; |
606 |
return 1; |
607 |
} |
608 |
|
609 |
static int get_physical_address_code(CPUState *env, |
610 |
target_phys_addr_t *physical, int *prot,
|
611 |
target_ulong address, int mmu_idx)
|
612 |
{ |
613 |
unsigned int i; |
614 |
uint64_t context; |
615 |
|
616 |
int is_user = (mmu_idx == MMU_USER_IDX ||
|
617 |
mmu_idx == MMU_USER_SECONDARY_IDX); |
618 |
|
619 |
if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { |
620 |
/* IMMU disabled */
|
621 |
*physical = ultrasparc_truncate_physical(address); |
622 |
*prot = PAGE_EXEC; |
623 |
return 0; |
624 |
} |
625 |
|
626 |
if (env->tl == 0) { |
627 |
/* PRIMARY context */
|
628 |
context = env->dmmu.mmu_primary_context & 0x1fff;
|
629 |
} else {
|
630 |
/* NUCLEUS context */
|
631 |
context = 0;
|
632 |
} |
633 |
|
634 |
for (i = 0; i < 64; i++) { |
635 |
// ctx match, vaddr match, valid?
|
636 |
if (ultrasparc_tag_match(&env->itlb[i],
|
637 |
address, context, physical)) { |
638 |
// access ok?
|
639 |
if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
|
640 |
/* Fault status register */
|
641 |
if (env->immu.sfsr & SFSR_VALID_BIT) {
|
642 |
env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
|
643 |
another fault) */
|
644 |
} else {
|
645 |
env->immu.sfsr = 0;
|
646 |
} |
647 |
if (env->pstate & PS_PRIV) {
|
648 |
env->immu.sfsr |= SFSR_PR_BIT; |
649 |
} |
650 |
if (env->tl > 0) { |
651 |
env->immu.sfsr |= SFSR_CT_NUCLEUS; |
652 |
} |
653 |
|
654 |
/* FIXME: ASI field in SFSR must be set */
|
655 |
env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; |
656 |
env->exception_index = TT_TFAULT; |
657 |
|
658 |
env->immu.tag_access = (address & ~0x1fffULL) | context;
|
659 |
|
660 |
DPRINTF_MMU("TFAULT at %" PRIx64 " context %" PRIx64 "\n", |
661 |
address, context); |
662 |
|
663 |
return 1; |
664 |
} |
665 |
*prot = PAGE_EXEC; |
666 |
TTE_SET_USED(env->itlb[i].tte); |
667 |
return 0; |
668 |
} |
669 |
} |
670 |
|
671 |
DPRINTF_MMU("TMISS at %" PRIx64 " context %" PRIx64 "\n", |
672 |
address, context); |
673 |
|
674 |
/* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
|
675 |
env->immu.tag_access = (address & ~0x1fffULL) | context;
|
676 |
env->exception_index = TT_TMISS; |
677 |
return 1; |
678 |
} |
679 |
|
680 |
static int get_physical_address(CPUState *env, target_phys_addr_t *physical, |
681 |
int *prot, int *access_index, |
682 |
target_ulong address, int rw, int mmu_idx, |
683 |
target_ulong *page_size) |
684 |
{ |
685 |
/* ??? We treat everything as a small page, then explicitly flush
|
686 |
everything when an entry is evicted. */
|
687 |
*page_size = TARGET_PAGE_SIZE; |
688 |
|
689 |
#if defined (DEBUG_MMU)
|
690 |
/* safety net to catch wrong softmmu index use from dynamic code */
|
691 |
if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { |
692 |
DPRINTF_MMU("get_physical_address %s tl=%d mmu_idx=%d"
|
693 |
" primary context=%" PRIx64
|
694 |
" secondary context=%" PRIx64
|
695 |
" address=%" PRIx64
|
696 |
"\n",
|
697 |
(rw == 2 ? "CODE" : "DATA"), |
698 |
env->tl, mmu_idx, |
699 |
env->dmmu.mmu_primary_context, |
700 |
env->dmmu.mmu_secondary_context, |
701 |
address); |
702 |
} |
703 |
#endif
|
704 |
|
705 |
if (rw == 2) |
706 |
return get_physical_address_code(env, physical, prot, address,
|
707 |
mmu_idx); |
708 |
else
|
709 |
return get_physical_address_data(env, physical, prot, address, rw,
|
710 |
mmu_idx); |
711 |
} |
712 |
|
713 |
/* Perform address translation */
|
714 |
int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, |
715 |
int mmu_idx)
|
716 |
{ |
717 |
target_ulong virt_addr, vaddr; |
718 |
target_phys_addr_t paddr; |
719 |
target_ulong page_size; |
720 |
int error_code = 0, prot, access_index; |
721 |
|
722 |
error_code = get_physical_address(env, &paddr, &prot, &access_index, |
723 |
address, rw, mmu_idx, &page_size); |
724 |
if (error_code == 0) { |
725 |
virt_addr = address & TARGET_PAGE_MASK; |
726 |
vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & |
727 |
(TARGET_PAGE_SIZE - 1));
|
728 |
|
729 |
DPRINTF_MMU("Translate at %" PRIx64 " -> %" PRIx64 "," |
730 |
" vaddr %" PRIx64
|
731 |
" mmu_idx=%d"
|
732 |
" tl=%d"
|
733 |
" primary context=%" PRIx64
|
734 |
" secondary context=%" PRIx64
|
735 |
"\n",
|
736 |
address, paddr, vaddr, mmu_idx, env->tl, |
737 |
env->dmmu.mmu_primary_context, |
738 |
env->dmmu.mmu_secondary_context); |
739 |
|
740 |
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); |
741 |
return 0; |
742 |
} |
743 |
// XXX
|
744 |
return 1; |
745 |
} |
746 |
|
747 |
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env)
|
748 |
{ |
749 |
unsigned int i; |
750 |
const char *mask; |
751 |
|
752 |
(*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" |
753 |
PRId64 "\n",
|
754 |
env->dmmu.mmu_primary_context, |
755 |
env->dmmu.mmu_secondary_context); |
756 |
if ((env->lsu & DMMU_E) == 0) { |
757 |
(*cpu_fprintf)(f, "DMMU disabled\n");
|
758 |
} else {
|
759 |
(*cpu_fprintf)(f, "DMMU dump\n");
|
760 |
for (i = 0; i < 64; i++) { |
761 |
switch (TTE_PGSIZE(env->dtlb[i].tte)) {
|
762 |
default:
|
763 |
case 0x0: |
764 |
mask = " 8k";
|
765 |
break;
|
766 |
case 0x1: |
767 |
mask = " 64k";
|
768 |
break;
|
769 |
case 0x2: |
770 |
mask = "512k";
|
771 |
break;
|
772 |
case 0x3: |
773 |
mask = " 4M";
|
774 |
break;
|
775 |
} |
776 |
if (TTE_IS_VALID(env->dtlb[i].tte)) {
|
777 |
(*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" |
778 |
", %s, %s, %s, %s, ctx %" PRId64 " %s\n", |
779 |
i, |
780 |
env->dtlb[i].tag & (uint64_t)~0x1fffULL,
|
781 |
TTE_PA(env->dtlb[i].tte), |
782 |
mask, |
783 |
TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", |
784 |
TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", |
785 |
TTE_IS_LOCKED(env->dtlb[i].tte) ? |
786 |
"locked" : "unlocked", |
787 |
env->dtlb[i].tag & (uint64_t)0x1fffULL,
|
788 |
TTE_IS_GLOBAL(env->dtlb[i].tte)? |
789 |
"global" : "local"); |
790 |
} |
791 |
} |
792 |
} |
793 |
if ((env->lsu & IMMU_E) == 0) { |
794 |
(*cpu_fprintf)(f, "IMMU disabled\n");
|
795 |
} else {
|
796 |
(*cpu_fprintf)(f, "IMMU dump\n");
|
797 |
for (i = 0; i < 64; i++) { |
798 |
switch (TTE_PGSIZE(env->itlb[i].tte)) {
|
799 |
default:
|
800 |
case 0x0: |
801 |
mask = " 8k";
|
802 |
break;
|
803 |
case 0x1: |
804 |
mask = " 64k";
|
805 |
break;
|
806 |
case 0x2: |
807 |
mask = "512k";
|
808 |
break;
|
809 |
case 0x3: |
810 |
mask = " 4M";
|
811 |
break;
|
812 |
} |
813 |
if (TTE_IS_VALID(env->itlb[i].tte)) {
|
814 |
(*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" |
815 |
", %s, %s, %s, ctx %" PRId64 " %s\n", |
816 |
i, |
817 |
env->itlb[i].tag & (uint64_t)~0x1fffULL,
|
818 |
TTE_PA(env->itlb[i].tte), |
819 |
mask, |
820 |
TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", |
821 |
TTE_IS_LOCKED(env->itlb[i].tte) ? |
822 |
"locked" : "unlocked", |
823 |
env->itlb[i].tag & (uint64_t)0x1fffULL,
|
824 |
TTE_IS_GLOBAL(env->itlb[i].tte)? |
825 |
"global" : "local"); |
826 |
} |
827 |
} |
828 |
} |
829 |
} |
830 |
|
831 |
#endif /* TARGET_SPARC64 */ |
832 |
|
833 |
static int cpu_sparc_get_phys_page(CPUState *env, target_phys_addr_t *phys, |
834 |
target_ulong addr, int rw, int mmu_idx) |
835 |
{ |
836 |
target_ulong page_size; |
837 |
int prot, access_index;
|
838 |
|
839 |
return get_physical_address(env, phys, &prot, &access_index, addr, rw,
|
840 |
mmu_idx, &page_size); |
841 |
} |
842 |
|
843 |
#if defined(TARGET_SPARC64)
|
844 |
target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr, |
845 |
int mmu_idx)
|
846 |
{ |
847 |
target_phys_addr_t phys_addr; |
848 |
|
849 |
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { |
850 |
return -1; |
851 |
} |
852 |
return phys_addr;
|
853 |
} |
854 |
#endif
|
855 |
|
856 |
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) |
857 |
{ |
858 |
target_phys_addr_t phys_addr; |
859 |
int mmu_idx = cpu_mmu_index(env);
|
860 |
|
861 |
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { |
862 |
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { |
863 |
return -1; |
864 |
} |
865 |
} |
866 |
if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED) {
|
867 |
return -1; |
868 |
} |
869 |
return phys_addr;
|
870 |
} |
871 |
#endif
|
872 |
|
873 |
/* misc op helpers */
|
874 |
void helper_raise_exception(CPUState *env, int tt) |
875 |
{ |
876 |
env->exception_index = tt; |
877 |
cpu_loop_exit(env); |
878 |
} |
879 |
|
880 |
void helper_debug(CPUState *env)
|
881 |
{ |
882 |
env->exception_index = EXCP_DEBUG; |
883 |
cpu_loop_exit(env); |
884 |
} |
885 |
|
886 |
void helper_shutdown(void) |
887 |
{ |
888 |
#if !defined(CONFIG_USER_ONLY)
|
889 |
qemu_system_shutdown_request(); |
890 |
#endif
|
891 |
} |
892 |
|
893 |
#ifdef TARGET_SPARC64
|
894 |
target_ulong helper_popc(target_ulong val) |
895 |
{ |
896 |
return ctpop64(val);
|
897 |
} |
898 |
|
899 |
void helper_tick_set_count(void *opaque, uint64_t count) |
900 |
{ |
901 |
#if !defined(CONFIG_USER_ONLY)
|
902 |
cpu_tick_set_count(opaque, count); |
903 |
#endif
|
904 |
} |
905 |
|
906 |
uint64_t helper_tick_get_count(void *opaque)
|
907 |
{ |
908 |
#if !defined(CONFIG_USER_ONLY)
|
909 |
return cpu_tick_get_count(opaque);
|
910 |
#else
|
911 |
return 0; |
912 |
#endif
|
913 |
} |
914 |
|
915 |
void helper_tick_set_limit(void *opaque, uint64_t limit) |
916 |
{ |
917 |
#if !defined(CONFIG_USER_ONLY)
|
918 |
cpu_tick_set_limit(opaque, limit); |
919 |
#endif
|
920 |
} |
921 |
#endif
|
922 |
|
923 |
static target_ulong helper_udiv_common(CPUState *env, target_ulong a,
|
924 |
target_ulong b, int cc)
|
925 |
{ |
926 |
int overflow = 0; |
927 |
uint64_t x0; |
928 |
uint32_t x1; |
929 |
|
930 |
x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); |
931 |
x1 = (b & 0xffffffff);
|
932 |
|
933 |
if (x1 == 0) { |
934 |
helper_raise_exception(env, TT_DIV_ZERO); |
935 |
} |
936 |
|
937 |
x0 = x0 / x1; |
938 |
if (x0 > 0xffffffff) { |
939 |
x0 = 0xffffffff;
|
940 |
overflow = 1;
|
941 |
} |
942 |
|
943 |
if (cc) {
|
944 |
env->cc_dst = x0; |
945 |
env->cc_src2 = overflow; |
946 |
env->cc_op = CC_OP_DIV; |
947 |
} |
948 |
return x0;
|
949 |
} |
950 |
|
951 |
target_ulong helper_udiv(CPUState *env, target_ulong a, target_ulong b) |
952 |
{ |
953 |
return helper_udiv_common(env, a, b, 0); |
954 |
} |
955 |
|
956 |
target_ulong helper_udiv_cc(CPUState *env, target_ulong a, target_ulong b) |
957 |
{ |
958 |
return helper_udiv_common(env, a, b, 1); |
959 |
} |
960 |
|
961 |
static target_ulong helper_sdiv_common(CPUState *env, target_ulong a,
|
962 |
target_ulong b, int cc)
|
963 |
{ |
964 |
int overflow = 0; |
965 |
int64_t x0; |
966 |
int32_t x1; |
967 |
|
968 |
x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); |
969 |
x1 = (b & 0xffffffff);
|
970 |
|
971 |
if (x1 == 0) { |
972 |
helper_raise_exception(env, TT_DIV_ZERO); |
973 |
} |
974 |
|
975 |
x0 = x0 / x1; |
976 |
if ((int32_t) x0 != x0) {
|
977 |
x0 = x0 < 0 ? 0x80000000 : 0x7fffffff; |
978 |
overflow = 1;
|
979 |
} |
980 |
|
981 |
if (cc) {
|
982 |
env->cc_dst = x0; |
983 |
env->cc_src2 = overflow; |
984 |
env->cc_op = CC_OP_DIV; |
985 |
} |
986 |
return x0;
|
987 |
} |
988 |
|
989 |
target_ulong helper_sdiv(CPUState *env, target_ulong a, target_ulong b) |
990 |
{ |
991 |
return helper_sdiv_common(env, a, b, 0); |
992 |
} |
993 |
|
994 |
target_ulong helper_sdiv_cc(CPUState *env, target_ulong a, target_ulong b) |
995 |
{ |
996 |
return helper_sdiv_common(env, a, b, 1); |
997 |
} |