root / target-sparc / helper.c @ 49be8030
History | View | Annotate | Download (11.8 kB)
1 |
/*
|
---|---|
2 |
* sparc helpers
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include "exec.h" |
21 |
|
22 |
//#define DEBUG_PCALL
|
23 |
//#define DEBUG_MMU
|
24 |
|
25 |
/* Sparc MMU emulation */
|
26 |
int cpu_sparc_handle_mmu_fault (CPUState *env, uint32_t address, int rw, |
27 |
int is_user, int is_softmmu); |
28 |
|
29 |
/* thread support */
|
30 |
|
31 |
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; |
32 |
|
33 |
void cpu_lock(void) |
34 |
{ |
35 |
spin_lock(&global_cpu_lock); |
36 |
} |
37 |
|
38 |
void cpu_unlock(void) |
39 |
{ |
40 |
spin_unlock(&global_cpu_lock); |
41 |
} |
42 |
|
43 |
#if !defined(CONFIG_USER_ONLY)
|
44 |
|
45 |
#define MMUSUFFIX _mmu
|
46 |
#define GETPC() (__builtin_return_address(0)) |
47 |
|
48 |
#define SHIFT 0 |
49 |
#include "softmmu_template.h" |
50 |
|
51 |
#define SHIFT 1 |
52 |
#include "softmmu_template.h" |
53 |
|
54 |
#define SHIFT 2 |
55 |
#include "softmmu_template.h" |
56 |
|
57 |
#define SHIFT 3 |
58 |
#include "softmmu_template.h" |
59 |
|
60 |
|
61 |
/* try to fill the TLB and return an exception if error. If retaddr is
|
62 |
NULL, it means that the function was called in C code (i.e. not
|
63 |
from generated code or from helper.c) */
|
64 |
/* XXX: fix it to restore all registers */
|
65 |
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr) |
66 |
{ |
67 |
TranslationBlock *tb; |
68 |
int ret;
|
69 |
unsigned long pc; |
70 |
CPUState *saved_env; |
71 |
|
72 |
/* XXX: hack to restore env in all cases, even if not called from
|
73 |
generated code */
|
74 |
saved_env = env; |
75 |
env = cpu_single_env; |
76 |
|
77 |
ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
78 |
if (ret) {
|
79 |
if (retaddr) {
|
80 |
/* now we have a real cpu fault */
|
81 |
pc = (unsigned long)retaddr; |
82 |
tb = tb_find_pc(pc); |
83 |
if (tb) {
|
84 |
/* the PC is inside the translated code. It means that we have
|
85 |
a virtual CPU fault */
|
86 |
cpu_restore_state(tb, env, pc, NULL);
|
87 |
} |
88 |
} |
89 |
raise_exception_err(ret, env->error_code); |
90 |
} |
91 |
env = saved_env; |
92 |
} |
93 |
#endif
|
94 |
|
95 |
static const int access_table[8][8] = { |
96 |
{ 0, 0, 0, 0, 2, 0, 3, 3 }, |
97 |
{ 0, 0, 0, 0, 2, 0, 0, 0 }, |
98 |
{ 2, 2, 0, 0, 0, 2, 3, 3 }, |
99 |
{ 2, 2, 0, 0, 0, 2, 0, 0 }, |
100 |
{ 2, 0, 2, 0, 2, 2, 3, 3 }, |
101 |
{ 2, 0, 2, 0, 2, 0, 2, 0 }, |
102 |
{ 2, 2, 2, 0, 2, 2, 3, 3 }, |
103 |
{ 2, 2, 2, 0, 2, 2, 2, 0 } |
104 |
}; |
105 |
|
106 |
/* 1 = write OK */
|
107 |
static const int rw_table[2][8] = { |
108 |
{ 0, 1, 0, 1, 0, 1, 0, 1 }, |
109 |
{ 0, 1, 0, 1, 0, 0, 0, 0 } |
110 |
}; |
111 |
|
112 |
int get_physical_address (CPUState *env, uint32_t *physical, int *prot, |
113 |
int *access_index, uint32_t address, int rw, |
114 |
int is_user)
|
115 |
{ |
116 |
int access_perms = 0; |
117 |
target_phys_addr_t pde_ptr; |
118 |
uint32_t pde, virt_addr; |
119 |
int error_code = 0, is_dirty; |
120 |
unsigned long page_offset; |
121 |
|
122 |
virt_addr = address & TARGET_PAGE_MASK; |
123 |
if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ |
124 |
*physical = address; |
125 |
*prot = PAGE_READ | PAGE_WRITE; |
126 |
return 0; |
127 |
} |
128 |
|
129 |
/* SPARC reference MMU table walk: Context table->L1->L2->PTE */
|
130 |
/* Context base + context number */
|
131 |
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 4); |
132 |
pde = ldl_phys(pde_ptr); |
133 |
|
134 |
/* Ctx pde */
|
135 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
136 |
default:
|
137 |
case 0: /* Invalid */ |
138 |
return 1; |
139 |
case 2: /* L0 PTE, maybe should not happen? */ |
140 |
case 3: /* Reserved */ |
141 |
return 4; |
142 |
case 1: /* L0 PDE */ |
143 |
pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); |
144 |
pde = ldl_phys(pde_ptr); |
145 |
|
146 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
147 |
default:
|
148 |
case 0: /* Invalid */ |
149 |
return 1; |
150 |
case 3: /* Reserved */ |
151 |
return 4; |
152 |
case 1: /* L1 PDE */ |
153 |
pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); |
154 |
pde = ldl_phys(pde_ptr); |
155 |
|
156 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
157 |
default:
|
158 |
case 0: /* Invalid */ |
159 |
return 1; |
160 |
case 3: /* Reserved */ |
161 |
return 4; |
162 |
case 1: /* L2 PDE */ |
163 |
pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); |
164 |
pde = ldl_phys(pde_ptr); |
165 |
|
166 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
167 |
default:
|
168 |
case 0: /* Invalid */ |
169 |
return 1; |
170 |
case 1: /* PDE, should not happen */ |
171 |
case 3: /* Reserved */ |
172 |
return 4; |
173 |
case 2: /* L3 PTE */ |
174 |
virt_addr = address & TARGET_PAGE_MASK; |
175 |
page_offset = (address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1);
|
176 |
} |
177 |
break;
|
178 |
case 2: /* L2 PTE */ |
179 |
virt_addr = address & ~0x3ffff;
|
180 |
page_offset = address & 0x3ffff;
|
181 |
} |
182 |
break;
|
183 |
case 2: /* L1 PTE */ |
184 |
virt_addr = address & ~0xffffff;
|
185 |
page_offset = address & 0xffffff;
|
186 |
} |
187 |
} |
188 |
|
189 |
/* update page modified and dirty bits */
|
190 |
is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
|
191 |
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
|
192 |
pde |= PG_ACCESSED_MASK; |
193 |
if (is_dirty)
|
194 |
pde |= PG_MODIFIED_MASK; |
195 |
stl_phys_notdirty(pde_ptr, pde); |
196 |
} |
197 |
/* check access */
|
198 |
*access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1); |
199 |
access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; |
200 |
error_code = access_table[*access_index][access_perms]; |
201 |
if (error_code)
|
202 |
return error_code;
|
203 |
|
204 |
/* the page can be put in the TLB */
|
205 |
*prot = PAGE_READ; |
206 |
if (pde & PG_MODIFIED_MASK) {
|
207 |
/* only set write access if already dirty... otherwise wait
|
208 |
for dirty access */
|
209 |
if (rw_table[is_user][access_perms])
|
210 |
*prot |= PAGE_WRITE; |
211 |
} |
212 |
|
213 |
/* Even if large ptes, we map only one 4KB page in the cache to
|
214 |
avoid filling it too fast */
|
215 |
*physical = ((pde & PTE_ADDR_MASK) << 4) + page_offset;
|
216 |
return 0; |
217 |
} |
218 |
|
219 |
/* Perform address translation */
|
220 |
int cpu_sparc_handle_mmu_fault (CPUState *env, uint32_t address, int rw, |
221 |
int is_user, int is_softmmu) |
222 |
{ |
223 |
int exception = 0; |
224 |
uint32_t virt_addr, paddr; |
225 |
unsigned long vaddr; |
226 |
int error_code = 0, prot, ret = 0, access_index; |
227 |
|
228 |
if (env->user_mode_only) {
|
229 |
/* user mode only emulation */
|
230 |
error_code = -2;
|
231 |
goto do_fault_user;
|
232 |
} |
233 |
|
234 |
error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, is_user); |
235 |
if (error_code == 0) { |
236 |
virt_addr = address & TARGET_PAGE_MASK; |
237 |
vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1));
|
238 |
ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu); |
239 |
return ret;
|
240 |
} |
241 |
|
242 |
if (env->mmuregs[3]) /* Fault status register */ |
243 |
env->mmuregs[3] = 1; /* overflow (not read before another fault) */ |
244 |
env->mmuregs[3] |= (access_index << 5) | (error_code << 2) | 2; |
245 |
env->mmuregs[4] = address; /* Fault address register */ |
246 |
|
247 |
if (env->mmuregs[0] & MMU_NF || env->psret == 0) // No fault |
248 |
return 0; |
249 |
do_fault_user:
|
250 |
env->exception_index = exception; |
251 |
env->error_code = error_code; |
252 |
return error_code;
|
253 |
} |
254 |
|
255 |
void memcpy32(uint32_t *dst, const uint32_t *src) |
256 |
{ |
257 |
dst[0] = src[0]; |
258 |
dst[1] = src[1]; |
259 |
dst[2] = src[2]; |
260 |
dst[3] = src[3]; |
261 |
dst[4] = src[4]; |
262 |
dst[5] = src[5]; |
263 |
dst[6] = src[6]; |
264 |
dst[7] = src[7]; |
265 |
} |
266 |
|
267 |
void set_cwp(int new_cwp) |
268 |
{ |
269 |
/* put the modified wrap registers at their proper location */
|
270 |
if (env->cwp == (NWINDOWS - 1)) |
271 |
memcpy32(env->regbase, env->regbase + NWINDOWS * 16);
|
272 |
env->cwp = new_cwp; |
273 |
/* put the wrap registers at their temporary location */
|
274 |
if (new_cwp == (NWINDOWS - 1)) |
275 |
memcpy32(env->regbase + NWINDOWS * 16, env->regbase);
|
276 |
env->regwptr = env->regbase + (new_cwp * 16);
|
277 |
} |
278 |
|
279 |
void cpu_set_cwp(CPUState *env1, int new_cwp) |
280 |
{ |
281 |
CPUState *saved_env; |
282 |
saved_env = env; |
283 |
env = env1; |
284 |
set_cwp(new_cwp); |
285 |
env = saved_env; |
286 |
} |
287 |
|
288 |
/*
|
289 |
* Begin execution of an interruption. is_int is TRUE if coming from
|
290 |
* the int instruction. next_eip is the EIP value AFTER the interrupt
|
291 |
* instruction. It is only relevant if is_int is TRUE.
|
292 |
*/
|
293 |
void do_interrupt(int intno, int is_int, int error_code, |
294 |
unsigned int next_eip, int is_hw) |
295 |
{ |
296 |
int cwp;
|
297 |
|
298 |
#ifdef DEBUG_PCALL
|
299 |
if (loglevel & CPU_LOG_INT) {
|
300 |
static int count; |
301 |
fprintf(logfile, "%6d: v=%02x e=%04x i=%d pc=%08x npc=%08x SP=%08x\n",
|
302 |
count, intno, error_code, is_int, |
303 |
env->pc, |
304 |
env->npc, env->regwptr[6]);
|
305 |
#if 1 |
306 |
cpu_dump_state(env, logfile, fprintf, 0);
|
307 |
{ |
308 |
int i;
|
309 |
uint8_t *ptr; |
310 |
|
311 |
fprintf(logfile, " code=");
|
312 |
ptr = (uint8_t *)env->pc; |
313 |
for(i = 0; i < 16; i++) { |
314 |
fprintf(logfile, " %02x", ldub(ptr + i));
|
315 |
} |
316 |
fprintf(logfile, "\n");
|
317 |
} |
318 |
#endif
|
319 |
count++; |
320 |
} |
321 |
#endif
|
322 |
#if !defined(CONFIG_USER_ONLY)
|
323 |
if (env->psret == 0) { |
324 |
cpu_abort(cpu_single_env, "Trap while interrupts disabled, Error state");
|
325 |
return;
|
326 |
} |
327 |
#endif
|
328 |
env->psret = 0;
|
329 |
cwp = (env->cwp - 1) & (NWINDOWS - 1); |
330 |
set_cwp(cwp); |
331 |
env->regwptr[9] = env->pc - 4; // XXX? |
332 |
env->regwptr[10] = env->pc;
|
333 |
env->psrps = env->psrs; |
334 |
env->psrs = 1;
|
335 |
env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
|
336 |
env->pc = env->tbr; |
337 |
env->npc = env->pc + 4;
|
338 |
env->exception_index = 0;
|
339 |
} |
340 |
|
341 |
void raise_exception_err(int exception_index, int error_code) |
342 |
{ |
343 |
raise_exception(exception_index); |
344 |
} |
345 |
|
346 |
uint32_t mmu_probe(uint32_t address, int mmulev)
|
347 |
{ |
348 |
target_phys_addr_t pde_ptr; |
349 |
uint32_t pde; |
350 |
|
351 |
/* Context base + context number */
|
352 |
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 4); |
353 |
pde = ldl_phys(pde_ptr); |
354 |
|
355 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
356 |
default:
|
357 |
case 0: /* Invalid */ |
358 |
case 2: /* PTE, maybe should not happen? */ |
359 |
case 3: /* Reserved */ |
360 |
return 0; |
361 |
case 1: /* L1 PDE */ |
362 |
if (mmulev == 3) |
363 |
return pde;
|
364 |
pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); |
365 |
pde = ldl_phys(pde_ptr); |
366 |
|
367 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
368 |
default:
|
369 |
case 0: /* Invalid */ |
370 |
case 3: /* Reserved */ |
371 |
return 0; |
372 |
case 2: /* L1 PTE */ |
373 |
return pde;
|
374 |
case 1: /* L2 PDE */ |
375 |
if (mmulev == 2) |
376 |
return pde;
|
377 |
pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); |
378 |
pde = ldl_phys(pde_ptr); |
379 |
|
380 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
381 |
default:
|
382 |
case 0: /* Invalid */ |
383 |
case 3: /* Reserved */ |
384 |
return 0; |
385 |
case 2: /* L2 PTE */ |
386 |
return pde;
|
387 |
case 1: /* L3 PDE */ |
388 |
if (mmulev == 1) |
389 |
return pde;
|
390 |
pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); |
391 |
pde = ldl_phys(pde_ptr); |
392 |
|
393 |
switch (pde & PTE_ENTRYTYPE_MASK) {
|
394 |
default:
|
395 |
case 0: /* Invalid */ |
396 |
case 1: /* PDE, should not happen */ |
397 |
case 3: /* Reserved */ |
398 |
return 0; |
399 |
case 2: /* L3 PTE */ |
400 |
return pde;
|
401 |
} |
402 |
} |
403 |
} |
404 |
} |
405 |
return 0; |
406 |
} |
407 |
|
408 |
void dump_mmu(void) |
409 |
{ |
410 |
#ifdef DEBUG_MMU
|
411 |
uint32_t pa, va, va1, va2; |
412 |
int n, m, o;
|
413 |
target_phys_addr_t pde_ptr; |
414 |
uint32_t pde; |
415 |
|
416 |
printf("MMU dump:\n");
|
417 |
pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 4); |
418 |
pde = ldl_phys(pde_ptr); |
419 |
printf("Root ptr: 0x%08x, ctx: %d\n", env->mmuregs[1] << 4, env->mmuregs[2]); |
420 |
for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { |
421 |
pde_ptr = mmu_probe(va, 2);
|
422 |
if (pde_ptr) {
|
423 |
pa = cpu_get_phys_page_debug(env, va); |
424 |
printf("VA: 0x%08x, PA: 0x%08x PDE: 0x%08x\n", va, pa, pde_ptr);
|
425 |
for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { |
426 |
pde_ptr = mmu_probe(va1, 1);
|
427 |
if (pde_ptr) {
|
428 |
pa = cpu_get_phys_page_debug(env, va1); |
429 |
printf(" VA: 0x%08x, PA: 0x%08x PDE: 0x%08x\n", va1, pa, pde_ptr);
|
430 |
for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { |
431 |
pde_ptr = mmu_probe(va2, 0);
|
432 |
if (pde_ptr) {
|
433 |
pa = cpu_get_phys_page_debug(env, va2); |
434 |
printf(" VA: 0x%08x, PA: 0x%08x PTE: 0x%08x\n", va2, pa, pde_ptr);
|
435 |
} |
436 |
} |
437 |
} |
438 |
} |
439 |
} |
440 |
} |
441 |
printf("MMU dump ends\n");
|
442 |
#endif
|
443 |
} |