root / cpu-exec.c @ 07ce05ea
History | View | Annotate | Download (23.9 kB)
1 |
/*
|
---|---|
2 |
* i386 emulator main execution loop
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include "config.h" |
21 |
#include "exec.h" |
22 |
#include "disas.h" |
23 |
|
24 |
int tb_invalidated_flag;
|
25 |
|
26 |
//#define DEBUG_EXEC
|
27 |
//#define DEBUG_SIGNAL
|
28 |
|
29 |
#if defined(TARGET_ARM) || defined(TARGET_SPARC)
|
30 |
/* XXX: unify with i386 target */
|
31 |
void cpu_loop_exit(void) |
32 |
{ |
33 |
longjmp(env->jmp_env, 1);
|
34 |
} |
35 |
#endif
|
36 |
|
37 |
/* main execution loop */
|
38 |
|
39 |
int cpu_exec(CPUState *env1)
|
40 |
{ |
41 |
int saved_T0, saved_T1, saved_T2;
|
42 |
CPUState *saved_env; |
43 |
#ifdef reg_EAX
|
44 |
int saved_EAX;
|
45 |
#endif
|
46 |
#ifdef reg_ECX
|
47 |
int saved_ECX;
|
48 |
#endif
|
49 |
#ifdef reg_EDX
|
50 |
int saved_EDX;
|
51 |
#endif
|
52 |
#ifdef reg_EBX
|
53 |
int saved_EBX;
|
54 |
#endif
|
55 |
#ifdef reg_ESP
|
56 |
int saved_ESP;
|
57 |
#endif
|
58 |
#ifdef reg_EBP
|
59 |
int saved_EBP;
|
60 |
#endif
|
61 |
#ifdef reg_ESI
|
62 |
int saved_ESI;
|
63 |
#endif
|
64 |
#ifdef reg_EDI
|
65 |
int saved_EDI;
|
66 |
#endif
|
67 |
#ifdef __sparc__
|
68 |
int saved_i7, tmp_T0;
|
69 |
#endif
|
70 |
int code_gen_size, ret, interrupt_request;
|
71 |
void (*gen_func)(void); |
72 |
TranslationBlock *tb, **ptb; |
73 |
uint8_t *tc_ptr, *cs_base, *pc; |
74 |
unsigned int flags; |
75 |
|
76 |
/* first we save global registers */
|
77 |
saved_T0 = T0; |
78 |
saved_T1 = T1; |
79 |
saved_T2 = T2; |
80 |
saved_env = env; |
81 |
env = env1; |
82 |
#ifdef __sparc__
|
83 |
/* we also save i7 because longjmp may not restore it */
|
84 |
asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); |
85 |
#endif
|
86 |
|
87 |
#if defined(TARGET_I386)
|
88 |
#ifdef reg_EAX
|
89 |
saved_EAX = EAX; |
90 |
EAX = env->regs[R_EAX]; |
91 |
#endif
|
92 |
#ifdef reg_ECX
|
93 |
saved_ECX = ECX; |
94 |
ECX = env->regs[R_ECX]; |
95 |
#endif
|
96 |
#ifdef reg_EDX
|
97 |
saved_EDX = EDX; |
98 |
EDX = env->regs[R_EDX]; |
99 |
#endif
|
100 |
#ifdef reg_EBX
|
101 |
saved_EBX = EBX; |
102 |
EBX = env->regs[R_EBX]; |
103 |
#endif
|
104 |
#ifdef reg_ESP
|
105 |
saved_ESP = ESP; |
106 |
ESP = env->regs[R_ESP]; |
107 |
#endif
|
108 |
#ifdef reg_EBP
|
109 |
saved_EBP = EBP; |
110 |
EBP = env->regs[R_EBP]; |
111 |
#endif
|
112 |
#ifdef reg_ESI
|
113 |
saved_ESI = ESI; |
114 |
ESI = env->regs[R_ESI]; |
115 |
#endif
|
116 |
#ifdef reg_EDI
|
117 |
saved_EDI = EDI; |
118 |
EDI = env->regs[R_EDI]; |
119 |
#endif
|
120 |
|
121 |
/* put eflags in CPU temporary format */
|
122 |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
123 |
DF = 1 - (2 * ((env->eflags >> 10) & 1)); |
124 |
CC_OP = CC_OP_EFLAGS; |
125 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
126 |
#elif defined(TARGET_ARM)
|
127 |
{ |
128 |
unsigned int psr; |
129 |
psr = env->cpsr; |
130 |
env->CF = (psr >> 29) & 1; |
131 |
env->NZF = (psr & 0xc0000000) ^ 0x40000000; |
132 |
env->VF = (psr << 3) & 0x80000000; |
133 |
env->cpsr = psr & ~0xf0000000;
|
134 |
} |
135 |
#elif defined(TARGET_SPARC)
|
136 |
#elif defined(TARGET_PPC)
|
137 |
#else
|
138 |
#error unsupported target CPU
|
139 |
#endif
|
140 |
env->exception_index = -1;
|
141 |
|
142 |
/* prepare setjmp context for exception handling */
|
143 |
for(;;) {
|
144 |
if (setjmp(env->jmp_env) == 0) { |
145 |
env->current_tb = NULL;
|
146 |
/* if an exception is pending, we execute it here */
|
147 |
if (env->exception_index >= 0) { |
148 |
if (env->exception_index >= EXCP_INTERRUPT) {
|
149 |
/* exit request from the cpu execution loop */
|
150 |
ret = env->exception_index; |
151 |
break;
|
152 |
} else if (env->user_mode_only) { |
153 |
/* if user mode only, we simulate a fake exception
|
154 |
which will be hanlded outside the cpu execution
|
155 |
loop */
|
156 |
#if defined(TARGET_I386)
|
157 |
do_interrupt_user(env->exception_index, |
158 |
env->exception_is_int, |
159 |
env->error_code, |
160 |
env->exception_next_eip); |
161 |
#endif
|
162 |
ret = env->exception_index; |
163 |
break;
|
164 |
} else {
|
165 |
#if defined(TARGET_I386)
|
166 |
/* simulate a real cpu exception. On i386, it can
|
167 |
trigger new exceptions, but we do not handle
|
168 |
double or triple faults yet. */
|
169 |
do_interrupt(env->exception_index, |
170 |
env->exception_is_int, |
171 |
env->error_code, |
172 |
env->exception_next_eip, 0);
|
173 |
#elif defined(TARGET_PPC)
|
174 |
do_interrupt(env); |
175 |
#endif
|
176 |
} |
177 |
env->exception_index = -1;
|
178 |
} |
179 |
T0 = 0; /* force lookup of first TB */ |
180 |
for(;;) {
|
181 |
#ifdef __sparc__
|
182 |
/* g1 can be modified by some libc? functions */
|
183 |
tmp_T0 = T0; |
184 |
#endif
|
185 |
interrupt_request = env->interrupt_request; |
186 |
if (__builtin_expect(interrupt_request, 0)) { |
187 |
#if defined(TARGET_I386)
|
188 |
/* if hardware interrupt pending, we execute it */
|
189 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
190 |
(env->eflags & IF_MASK) && |
191 |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
192 |
int intno;
|
193 |
intno = cpu_x86_get_pic_interrupt(env); |
194 |
if (loglevel) {
|
195 |
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
|
196 |
} |
197 |
do_interrupt(intno, 0, 0, 0, 1); |
198 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
199 |
/* ensure that no TB jump will be modified as
|
200 |
the program flow was changed */
|
201 |
#ifdef __sparc__
|
202 |
tmp_T0 = 0;
|
203 |
#else
|
204 |
T0 = 0;
|
205 |
#endif
|
206 |
} |
207 |
#elif defined(TARGET_PPC)
|
208 |
if ((interrupt_request & CPU_INTERRUPT_HARD)) {
|
209 |
do_queue_exception(EXCP_EXTERNAL); |
210 |
if (check_exception_state(env))
|
211 |
do_interrupt(env); |
212 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
213 |
} |
214 |
#endif
|
215 |
if (interrupt_request & CPU_INTERRUPT_EXIT) {
|
216 |
env->interrupt_request &= ~CPU_INTERRUPT_EXIT; |
217 |
env->exception_index = EXCP_INTERRUPT; |
218 |
cpu_loop_exit(); |
219 |
} |
220 |
} |
221 |
#ifdef DEBUG_EXEC
|
222 |
if (loglevel) {
|
223 |
#if defined(TARGET_I386)
|
224 |
/* restore flags in standard format */
|
225 |
env->regs[R_EAX] = EAX; |
226 |
env->regs[R_EBX] = EBX; |
227 |
env->regs[R_ECX] = ECX; |
228 |
env->regs[R_EDX] = EDX; |
229 |
env->regs[R_ESI] = ESI; |
230 |
env->regs[R_EDI] = EDI; |
231 |
env->regs[R_EBP] = EBP; |
232 |
env->regs[R_ESP] = ESP; |
233 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
234 |
cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP); |
235 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
236 |
#elif defined(TARGET_ARM)
|
237 |
env->cpsr = compute_cpsr(); |
238 |
cpu_arm_dump_state(env, logfile, 0);
|
239 |
env->cpsr &= ~0xf0000000;
|
240 |
#elif defined(TARGET_SPARC)
|
241 |
cpu_sparc_dump_state (env, logfile, 0);
|
242 |
#elif defined(TARGET_PPC)
|
243 |
cpu_ppc_dump_state(env, logfile, 0);
|
244 |
#else
|
245 |
#error unsupported target CPU
|
246 |
#endif
|
247 |
} |
248 |
#endif
|
249 |
/* we record a subset of the CPU state. It will
|
250 |
always be the same before a given translated block
|
251 |
is executed. */
|
252 |
#if defined(TARGET_I386)
|
253 |
flags = env->hflags; |
254 |
flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
255 |
cs_base = env->segs[R_CS].base; |
256 |
pc = cs_base + env->eip; |
257 |
#elif defined(TARGET_ARM)
|
258 |
flags = 0;
|
259 |
cs_base = 0;
|
260 |
pc = (uint8_t *)env->regs[15];
|
261 |
#elif defined(TARGET_SPARC)
|
262 |
flags = 0;
|
263 |
cs_base = (uint8_t *)env->npc; |
264 |
pc = (uint8_t *) env->pc; |
265 |
#elif defined(TARGET_PPC)
|
266 |
flags = 0;
|
267 |
cs_base = 0;
|
268 |
pc = (uint8_t *)env->nip; |
269 |
#else
|
270 |
#error unsupported CPU
|
271 |
#endif
|
272 |
tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, |
273 |
flags); |
274 |
if (!tb) {
|
275 |
TranslationBlock **ptb1; |
276 |
unsigned int h; |
277 |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
278 |
|
279 |
|
280 |
spin_lock(&tb_lock); |
281 |
|
282 |
tb_invalidated_flag = 0;
|
283 |
|
284 |
/* find translated block using physical mappings */
|
285 |
phys_pc = get_phys_addr_code(env, (unsigned long)pc); |
286 |
phys_page1 = phys_pc & TARGET_PAGE_MASK; |
287 |
phys_page2 = -1;
|
288 |
h = tb_phys_hash_func(phys_pc); |
289 |
ptb1 = &tb_phys_hash[h]; |
290 |
for(;;) {
|
291 |
tb = *ptb1; |
292 |
if (!tb)
|
293 |
goto not_found;
|
294 |
if (tb->pc == (unsigned long)pc && |
295 |
tb->page_addr[0] == phys_page1 &&
|
296 |
tb->cs_base == (unsigned long)cs_base && |
297 |
tb->flags == flags) { |
298 |
/* check next page if needed */
|
299 |
if (tb->page_addr[1] != -1) { |
300 |
virt_page2 = ((unsigned long)pc & TARGET_PAGE_MASK) + |
301 |
TARGET_PAGE_SIZE; |
302 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
303 |
if (tb->page_addr[1] == phys_page2) |
304 |
goto found;
|
305 |
} else {
|
306 |
goto found;
|
307 |
} |
308 |
} |
309 |
ptb1 = &tb->phys_hash_next; |
310 |
} |
311 |
not_found:
|
312 |
/* if no translated code available, then translate it now */
|
313 |
tb = tb_alloc((unsigned long)pc); |
314 |
if (!tb) {
|
315 |
/* flush must be done */
|
316 |
tb_flush(env); |
317 |
/* cannot fail at this point */
|
318 |
tb = tb_alloc((unsigned long)pc); |
319 |
/* don't forget to invalidate previous TB info */
|
320 |
ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; |
321 |
T0 = 0;
|
322 |
} |
323 |
tc_ptr = code_gen_ptr; |
324 |
tb->tc_ptr = tc_ptr; |
325 |
tb->cs_base = (unsigned long)cs_base; |
326 |
tb->flags = flags; |
327 |
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); |
328 |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
329 |
|
330 |
/* check next page if needed */
|
331 |
virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; |
332 |
phys_page2 = -1;
|
333 |
if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { |
334 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
335 |
} |
336 |
tb_link_phys(tb, phys_pc, phys_page2); |
337 |
|
338 |
found:
|
339 |
if (tb_invalidated_flag) {
|
340 |
/* as some TB could have been invalidated because
|
341 |
of memory exceptions while generating the code, we
|
342 |
must recompute the hash index here */
|
343 |
ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; |
344 |
while (*ptb != NULL) |
345 |
ptb = &(*ptb)->hash_next; |
346 |
T0 = 0;
|
347 |
} |
348 |
/* we add the TB in the virtual pc hash table */
|
349 |
*ptb = tb; |
350 |
tb->hash_next = NULL;
|
351 |
tb_link(tb); |
352 |
spin_unlock(&tb_lock); |
353 |
} |
354 |
#ifdef DEBUG_EXEC
|
355 |
if (loglevel) {
|
356 |
fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n",
|
357 |
(long)tb->tc_ptr, (long)tb->pc, |
358 |
lookup_symbol((void *)tb->pc));
|
359 |
} |
360 |
#endif
|
361 |
#ifdef __sparc__
|
362 |
T0 = tmp_T0; |
363 |
#endif
|
364 |
/* see if we can patch the calling TB. */
|
365 |
if (T0 != 0) { |
366 |
spin_lock(&tb_lock); |
367 |
tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb); |
368 |
spin_unlock(&tb_lock); |
369 |
} |
370 |
tc_ptr = tb->tc_ptr; |
371 |
env->current_tb = tb; |
372 |
/* execute the generated code */
|
373 |
gen_func = (void *)tc_ptr;
|
374 |
#if defined(__sparc__)
|
375 |
__asm__ __volatile__("call %0\n\t"
|
376 |
"mov %%o7,%%i0"
|
377 |
: /* no outputs */
|
378 |
: "r" (gen_func)
|
379 |
: "i0", "i1", "i2", "i3", "i4", "i5"); |
380 |
#elif defined(__arm__)
|
381 |
asm volatile ("mov pc, %0\n\t" |
382 |
".global exec_loop\n\t"
|
383 |
"exec_loop:\n\t"
|
384 |
: /* no outputs */
|
385 |
: "r" (gen_func)
|
386 |
: "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); |
387 |
#else
|
388 |
gen_func(); |
389 |
#endif
|
390 |
env->current_tb = NULL;
|
391 |
/* reset soft MMU for next block (it can currently
|
392 |
only be set by a memory fault) */
|
393 |
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
|
394 |
if (env->hflags & HF_SOFTMMU_MASK) {
|
395 |
env->hflags &= ~HF_SOFTMMU_MASK; |
396 |
/* do not allow linking to another block */
|
397 |
T0 = 0;
|
398 |
} |
399 |
#endif
|
400 |
} |
401 |
} else {
|
402 |
} |
403 |
} /* for(;;) */
|
404 |
|
405 |
|
406 |
#if defined(TARGET_I386)
|
407 |
/* restore flags in standard format */
|
408 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
409 |
|
410 |
/* restore global registers */
|
411 |
#ifdef reg_EAX
|
412 |
EAX = saved_EAX; |
413 |
#endif
|
414 |
#ifdef reg_ECX
|
415 |
ECX = saved_ECX; |
416 |
#endif
|
417 |
#ifdef reg_EDX
|
418 |
EDX = saved_EDX; |
419 |
#endif
|
420 |
#ifdef reg_EBX
|
421 |
EBX = saved_EBX; |
422 |
#endif
|
423 |
#ifdef reg_ESP
|
424 |
ESP = saved_ESP; |
425 |
#endif
|
426 |
#ifdef reg_EBP
|
427 |
EBP = saved_EBP; |
428 |
#endif
|
429 |
#ifdef reg_ESI
|
430 |
ESI = saved_ESI; |
431 |
#endif
|
432 |
#ifdef reg_EDI
|
433 |
EDI = saved_EDI; |
434 |
#endif
|
435 |
#elif defined(TARGET_ARM)
|
436 |
env->cpsr = compute_cpsr(); |
437 |
#elif defined(TARGET_SPARC)
|
438 |
#elif defined(TARGET_PPC)
|
439 |
#else
|
440 |
#error unsupported target CPU
|
441 |
#endif
|
442 |
#ifdef __sparc__
|
443 |
asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); |
444 |
#endif
|
445 |
T0 = saved_T0; |
446 |
T1 = saved_T1; |
447 |
T2 = saved_T2; |
448 |
env = saved_env; |
449 |
return ret;
|
450 |
} |
451 |
|
452 |
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
|
453 |
|
454 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) |
455 |
{ |
456 |
CPUX86State *saved_env; |
457 |
|
458 |
saved_env = env; |
459 |
env = s; |
460 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { |
461 |
selector &= 0xffff;
|
462 |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
463 |
(uint8_t *)(selector << 4), 0xffff, 0); |
464 |
} else {
|
465 |
load_seg(seg_reg, selector); |
466 |
} |
467 |
env = saved_env; |
468 |
} |
469 |
|
470 |
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32) |
471 |
{ |
472 |
CPUX86State *saved_env; |
473 |
|
474 |
saved_env = env; |
475 |
env = s; |
476 |
|
477 |
helper_fsave(ptr, data32); |
478 |
|
479 |
env = saved_env; |
480 |
} |
481 |
|
482 |
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) |
483 |
{ |
484 |
CPUX86State *saved_env; |
485 |
|
486 |
saved_env = env; |
487 |
env = s; |
488 |
|
489 |
helper_frstor(ptr, data32); |
490 |
|
491 |
env = saved_env; |
492 |
} |
493 |
|
494 |
#endif /* TARGET_I386 */ |
495 |
|
496 |
#undef EAX
|
497 |
#undef ECX
|
498 |
#undef EDX
|
499 |
#undef EBX
|
500 |
#undef ESP
|
501 |
#undef EBP
|
502 |
#undef ESI
|
503 |
#undef EDI
|
504 |
#undef EIP
|
505 |
#include <signal.h> |
506 |
#include <sys/ucontext.h> |
507 |
|
508 |
#if defined(TARGET_I386)
|
509 |
|
510 |
/* 'pc' is the host PC at which the exception was raised. 'address' is
|
511 |
the effective address of the memory exception. 'is_write' is 1 if a
|
512 |
write caused the exception and otherwise 0'. 'old_set' is the
|
513 |
signal set which should be restored */
|
514 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
515 |
int is_write, sigset_t *old_set)
|
516 |
{ |
517 |
TranslationBlock *tb; |
518 |
int ret;
|
519 |
|
520 |
if (cpu_single_env)
|
521 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
522 |
#if defined(DEBUG_SIGNAL)
|
523 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
524 |
pc, address, is_write, *(unsigned long *)old_set); |
525 |
#endif
|
526 |
/* XXX: locking issue */
|
527 |
if (is_write && page_unprotect(address)) {
|
528 |
return 1; |
529 |
} |
530 |
/* see if it is an MMU fault */
|
531 |
ret = cpu_x86_handle_mmu_fault(env, address, is_write, |
532 |
((env->hflags & HF_CPL_MASK) == 3), 0); |
533 |
if (ret < 0) |
534 |
return 0; /* not an MMU fault */ |
535 |
if (ret == 0) |
536 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
537 |
/* now we have a real cpu fault */
|
538 |
tb = tb_find_pc(pc); |
539 |
if (tb) {
|
540 |
/* the PC is inside the translated code. It means that we have
|
541 |
a virtual CPU fault */
|
542 |
cpu_restore_state(tb, env, pc); |
543 |
} |
544 |
if (ret == 1) { |
545 |
#if 0
|
546 |
printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
|
547 |
env->eip, env->cr[2], env->error_code);
|
548 |
#endif
|
549 |
/* we restore the process signal mask as the sigreturn should
|
550 |
do it (XXX: use sigsetjmp) */
|
551 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
552 |
raise_exception_err(EXCP0E_PAGE, env->error_code); |
553 |
} else {
|
554 |
/* activate soft MMU for this block */
|
555 |
env->hflags |= HF_SOFTMMU_MASK; |
556 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
557 |
cpu_loop_exit(); |
558 |
} |
559 |
/* never comes here */
|
560 |
return 1; |
561 |
} |
562 |
|
563 |
#elif defined(TARGET_ARM)
|
564 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
565 |
int is_write, sigset_t *old_set)
|
566 |
{ |
567 |
/* XXX: do more */
|
568 |
return 0; |
569 |
} |
570 |
#elif defined(TARGET_SPARC)
|
571 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
572 |
int is_write, sigset_t *old_set)
|
573 |
{ |
574 |
/* XXX: locking issue */
|
575 |
if (is_write && page_unprotect(address)) {
|
576 |
return 1; |
577 |
} |
578 |
return 0; |
579 |
} |
580 |
#elif defined (TARGET_PPC)
|
581 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
582 |
int is_write, sigset_t *old_set)
|
583 |
{ |
584 |
TranslationBlock *tb; |
585 |
int ret;
|
586 |
|
587 |
#if 1 |
588 |
if (cpu_single_env)
|
589 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
590 |
#endif
|
591 |
#if defined(DEBUG_SIGNAL)
|
592 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
593 |
pc, address, is_write, *(unsigned long *)old_set); |
594 |
#endif
|
595 |
/* XXX: locking issue */
|
596 |
if (is_write && page_unprotect(address)) {
|
597 |
return 1; |
598 |
} |
599 |
|
600 |
/* see if it is an MMU fault */
|
601 |
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
|
602 |
if (ret < 0) |
603 |
return 0; /* not an MMU fault */ |
604 |
if (ret == 0) |
605 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
606 |
|
607 |
/* now we have a real cpu fault */
|
608 |
tb = tb_find_pc(pc); |
609 |
if (tb) {
|
610 |
/* the PC is inside the translated code. It means that we have
|
611 |
a virtual CPU fault */
|
612 |
cpu_restore_state(tb, env, pc); |
613 |
} |
614 |
if (ret == 1) { |
615 |
#if 0
|
616 |
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
|
617 |
env->nip, env->error_code, tb);
|
618 |
#endif
|
619 |
/* we restore the process signal mask as the sigreturn should
|
620 |
do it (XXX: use sigsetjmp) */
|
621 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
622 |
do_queue_exception_err(env->exception_index, env->error_code); |
623 |
} else {
|
624 |
/* activate soft MMU for this block */
|
625 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
626 |
cpu_loop_exit(); |
627 |
} |
628 |
/* never comes here */
|
629 |
return 1; |
630 |
} |
631 |
#else
|
632 |
#error unsupported target CPU
|
633 |
#endif
|
634 |
|
635 |
#if defined(__i386__)
|
636 |
|
637 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
638 |
void *puc)
|
639 |
{ |
640 |
struct ucontext *uc = puc;
|
641 |
unsigned long pc; |
642 |
|
643 |
#ifndef REG_EIP
|
644 |
/* for glibc 2.1 */
|
645 |
#define REG_EIP EIP
|
646 |
#define REG_ERR ERR
|
647 |
#define REG_TRAPNO TRAPNO
|
648 |
#endif
|
649 |
pc = uc->uc_mcontext.gregs[REG_EIP]; |
650 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
651 |
uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
|
652 |
(uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, |
653 |
&uc->uc_sigmask); |
654 |
} |
655 |
|
656 |
#elif defined(__powerpc)
|
657 |
|
658 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
659 |
void *puc)
|
660 |
{ |
661 |
struct ucontext *uc = puc;
|
662 |
struct pt_regs *regs = uc->uc_mcontext.regs;
|
663 |
unsigned long pc; |
664 |
int is_write;
|
665 |
|
666 |
pc = regs->nip; |
667 |
is_write = 0;
|
668 |
#if 0
|
669 |
/* ppc 4xx case */
|
670 |
if (regs->dsisr & 0x00800000)
|
671 |
is_write = 1;
|
672 |
#else
|
673 |
if (regs->trap != 0x400 && (regs->dsisr & 0x02000000)) |
674 |
is_write = 1;
|
675 |
#endif
|
676 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
677 |
is_write, &uc->uc_sigmask); |
678 |
} |
679 |
|
680 |
#elif defined(__alpha__)
|
681 |
|
682 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
683 |
void *puc)
|
684 |
{ |
685 |
struct ucontext *uc = puc;
|
686 |
uint32_t *pc = uc->uc_mcontext.sc_pc; |
687 |
uint32_t insn = *pc; |
688 |
int is_write = 0; |
689 |
|
690 |
/* XXX: need kernel patch to get write flag faster */
|
691 |
switch (insn >> 26) { |
692 |
case 0x0d: // stw |
693 |
case 0x0e: // stb |
694 |
case 0x0f: // stq_u |
695 |
case 0x24: // stf |
696 |
case 0x25: // stg |
697 |
case 0x26: // sts |
698 |
case 0x27: // stt |
699 |
case 0x2c: // stl |
700 |
case 0x2d: // stq |
701 |
case 0x2e: // stl_c |
702 |
case 0x2f: // stq_c |
703 |
is_write = 1;
|
704 |
} |
705 |
|
706 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
707 |
is_write, &uc->uc_sigmask); |
708 |
} |
709 |
#elif defined(__sparc__)
|
710 |
|
711 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
712 |
void *puc)
|
713 |
{ |
714 |
uint32_t *regs = (uint32_t *)(info + 1);
|
715 |
void *sigmask = (regs + 20); |
716 |
unsigned long pc; |
717 |
int is_write;
|
718 |
uint32_t insn; |
719 |
|
720 |
/* XXX: is there a standard glibc define ? */
|
721 |
pc = regs[1];
|
722 |
/* XXX: need kernel patch to get write flag faster */
|
723 |
is_write = 0;
|
724 |
insn = *(uint32_t *)pc; |
725 |
if ((insn >> 30) == 3) { |
726 |
switch((insn >> 19) & 0x3f) { |
727 |
case 0x05: // stb |
728 |
case 0x06: // sth |
729 |
case 0x04: // st |
730 |
case 0x07: // std |
731 |
case 0x24: // stf |
732 |
case 0x27: // stdf |
733 |
case 0x25: // stfsr |
734 |
is_write = 1;
|
735 |
break;
|
736 |
} |
737 |
} |
738 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
739 |
is_write, sigmask); |
740 |
} |
741 |
|
742 |
#elif defined(__arm__)
|
743 |
|
744 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
745 |
void *puc)
|
746 |
{ |
747 |
struct ucontext *uc = puc;
|
748 |
unsigned long pc; |
749 |
int is_write;
|
750 |
|
751 |
pc = uc->uc_mcontext.gregs[R15]; |
752 |
/* XXX: compute is_write */
|
753 |
is_write = 0;
|
754 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
755 |
is_write, |
756 |
&uc->uc_sigmask); |
757 |
} |
758 |
|
759 |
#elif defined(__mc68000)
|
760 |
|
761 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
762 |
void *puc)
|
763 |
{ |
764 |
struct ucontext *uc = puc;
|
765 |
unsigned long pc; |
766 |
int is_write;
|
767 |
|
768 |
pc = uc->uc_mcontext.gregs[16];
|
769 |
/* XXX: compute is_write */
|
770 |
is_write = 0;
|
771 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
772 |
is_write, |
773 |
&uc->uc_sigmask); |
774 |
} |
775 |
|
776 |
#else
|
777 |
|
778 |
#error host CPU specific signal handler needed
|
779 |
|
780 |
#endif
|