root / cpu-exec.c @ a8d3431a
History | View | Annotate | Download (33.7 kB)
1 |
/*
|
---|---|
2 |
* i386 emulator main execution loop
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include "config.h" |
21 |
#include "exec.h" |
22 |
#include "disas.h" |
23 |
|
24 |
#if !defined(CONFIG_SOFTMMU)
|
25 |
#undef EAX
|
26 |
#undef ECX
|
27 |
#undef EDX
|
28 |
#undef EBX
|
29 |
#undef ESP
|
30 |
#undef EBP
|
31 |
#undef ESI
|
32 |
#undef EDI
|
33 |
#undef EIP
|
34 |
#include <signal.h> |
35 |
#include <sys/ucontext.h> |
36 |
#endif
|
37 |
|
38 |
int tb_invalidated_flag;
|
39 |
|
40 |
//#define DEBUG_EXEC
|
41 |
//#define DEBUG_SIGNAL
|
42 |
|
43 |
#if defined(TARGET_ARM) || defined(TARGET_SPARC)
|
44 |
/* XXX: unify with i386 target */
|
45 |
void cpu_loop_exit(void) |
46 |
{ |
47 |
longjmp(env->jmp_env, 1);
|
48 |
} |
49 |
#endif
|
50 |
|
51 |
/* exit the current TB from a signal handler. The host registers are
|
52 |
restored in a state compatible with the CPU emulator
|
53 |
*/
|
54 |
void cpu_resume_from_signal(CPUState *env1, void *puc) |
55 |
{ |
56 |
#if !defined(CONFIG_SOFTMMU)
|
57 |
struct ucontext *uc = puc;
|
58 |
#endif
|
59 |
|
60 |
env = env1; |
61 |
|
62 |
/* XXX: restore cpu registers saved in host registers */
|
63 |
|
64 |
#if !defined(CONFIG_SOFTMMU)
|
65 |
if (puc) {
|
66 |
/* XXX: use siglongjmp ? */
|
67 |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
68 |
} |
69 |
#endif
|
70 |
longjmp(env->jmp_env, 1);
|
71 |
} |
72 |
|
73 |
/* main execution loop */
|
74 |
|
75 |
int cpu_exec(CPUState *env1)
|
76 |
{ |
77 |
int saved_T0, saved_T1, saved_T2;
|
78 |
CPUState *saved_env; |
79 |
#ifdef reg_EAX
|
80 |
int saved_EAX;
|
81 |
#endif
|
82 |
#ifdef reg_ECX
|
83 |
int saved_ECX;
|
84 |
#endif
|
85 |
#ifdef reg_EDX
|
86 |
int saved_EDX;
|
87 |
#endif
|
88 |
#ifdef reg_EBX
|
89 |
int saved_EBX;
|
90 |
#endif
|
91 |
#ifdef reg_ESP
|
92 |
int saved_ESP;
|
93 |
#endif
|
94 |
#ifdef reg_EBP
|
95 |
int saved_EBP;
|
96 |
#endif
|
97 |
#ifdef reg_ESI
|
98 |
int saved_ESI;
|
99 |
#endif
|
100 |
#ifdef reg_EDI
|
101 |
int saved_EDI;
|
102 |
#endif
|
103 |
#ifdef __sparc__
|
104 |
int saved_i7, tmp_T0;
|
105 |
#endif
|
106 |
int code_gen_size, ret, interrupt_request;
|
107 |
void (*gen_func)(void); |
108 |
TranslationBlock *tb, **ptb; |
109 |
target_ulong cs_base, pc; |
110 |
uint8_t *tc_ptr; |
111 |
unsigned int flags; |
112 |
|
113 |
/* first we save global registers */
|
114 |
saved_env = env; |
115 |
env = env1; |
116 |
saved_T0 = T0; |
117 |
saved_T1 = T1; |
118 |
saved_T2 = T2; |
119 |
#ifdef __sparc__
|
120 |
/* we also save i7 because longjmp may not restore it */
|
121 |
asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); |
122 |
#endif
|
123 |
|
124 |
#if defined(TARGET_I386)
|
125 |
#ifdef reg_EAX
|
126 |
saved_EAX = EAX; |
127 |
#endif
|
128 |
#ifdef reg_ECX
|
129 |
saved_ECX = ECX; |
130 |
#endif
|
131 |
#ifdef reg_EDX
|
132 |
saved_EDX = EDX; |
133 |
#endif
|
134 |
#ifdef reg_EBX
|
135 |
saved_EBX = EBX; |
136 |
#endif
|
137 |
#ifdef reg_ESP
|
138 |
saved_ESP = ESP; |
139 |
#endif
|
140 |
#ifdef reg_EBP
|
141 |
saved_EBP = EBP; |
142 |
#endif
|
143 |
#ifdef reg_ESI
|
144 |
saved_ESI = ESI; |
145 |
#endif
|
146 |
#ifdef reg_EDI
|
147 |
saved_EDI = EDI; |
148 |
#endif
|
149 |
|
150 |
env_to_regs(); |
151 |
/* put eflags in CPU temporary format */
|
152 |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
153 |
DF = 1 - (2 * ((env->eflags >> 10) & 1)); |
154 |
CC_OP = CC_OP_EFLAGS; |
155 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
156 |
#elif defined(TARGET_ARM)
|
157 |
{ |
158 |
unsigned int psr; |
159 |
psr = env->cpsr; |
160 |
env->CF = (psr >> 29) & 1; |
161 |
env->NZF = (psr & 0xc0000000) ^ 0x40000000; |
162 |
env->VF = (psr << 3) & 0x80000000; |
163 |
env->QF = (psr >> 27) & 1; |
164 |
env->cpsr = psr & ~CACHED_CPSR_BITS; |
165 |
} |
166 |
#elif defined(TARGET_SPARC)
|
167 |
#elif defined(TARGET_PPC)
|
168 |
#else
|
169 |
#error unsupported target CPU
|
170 |
#endif
|
171 |
env->exception_index = -1;
|
172 |
|
173 |
/* prepare setjmp context for exception handling */
|
174 |
for(;;) {
|
175 |
if (setjmp(env->jmp_env) == 0) { |
176 |
env->current_tb = NULL;
|
177 |
/* if an exception is pending, we execute it here */
|
178 |
if (env->exception_index >= 0) { |
179 |
if (env->exception_index >= EXCP_INTERRUPT) {
|
180 |
/* exit request from the cpu execution loop */
|
181 |
ret = env->exception_index; |
182 |
break;
|
183 |
} else if (env->user_mode_only) { |
184 |
/* if user mode only, we simulate a fake exception
|
185 |
which will be hanlded outside the cpu execution
|
186 |
loop */
|
187 |
#if defined(TARGET_I386)
|
188 |
do_interrupt_user(env->exception_index, |
189 |
env->exception_is_int, |
190 |
env->error_code, |
191 |
env->exception_next_eip); |
192 |
#endif
|
193 |
ret = env->exception_index; |
194 |
break;
|
195 |
} else {
|
196 |
#if defined(TARGET_I386)
|
197 |
/* simulate a real cpu exception. On i386, it can
|
198 |
trigger new exceptions, but we do not handle
|
199 |
double or triple faults yet. */
|
200 |
do_interrupt(env->exception_index, |
201 |
env->exception_is_int, |
202 |
env->error_code, |
203 |
env->exception_next_eip, 0);
|
204 |
#elif defined(TARGET_PPC)
|
205 |
do_interrupt(env); |
206 |
#elif defined(TARGET_SPARC)
|
207 |
do_interrupt(env->exception_index, |
208 |
0,
|
209 |
env->error_code, |
210 |
env->exception_next_pc, 0);
|
211 |
#endif
|
212 |
} |
213 |
env->exception_index = -1;
|
214 |
} |
215 |
T0 = 0; /* force lookup of first TB */ |
216 |
for(;;) {
|
217 |
#ifdef __sparc__
|
218 |
/* g1 can be modified by some libc? functions */
|
219 |
tmp_T0 = T0; |
220 |
#endif
|
221 |
interrupt_request = env->interrupt_request; |
222 |
if (__builtin_expect(interrupt_request, 0)) { |
223 |
#if defined(TARGET_I386)
|
224 |
/* if hardware interrupt pending, we execute it */
|
225 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
226 |
(env->eflags & IF_MASK) && |
227 |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
228 |
int intno;
|
229 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
230 |
intno = cpu_get_pic_interrupt(env); |
231 |
if (loglevel & CPU_LOG_TB_IN_ASM) {
|
232 |
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
|
233 |
} |
234 |
do_interrupt(intno, 0, 0, 0, 1); |
235 |
/* ensure that no TB jump will be modified as
|
236 |
the program flow was changed */
|
237 |
#ifdef __sparc__
|
238 |
tmp_T0 = 0;
|
239 |
#else
|
240 |
T0 = 0;
|
241 |
#endif
|
242 |
} |
243 |
#elif defined(TARGET_PPC)
|
244 |
#if 0
|
245 |
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
|
246 |
cpu_ppc_reset(env);
|
247 |
}
|
248 |
#endif
|
249 |
if (msr_ee != 0) { |
250 |
if ((interrupt_request & CPU_INTERRUPT_HARD)) {
|
251 |
/* Raise it */
|
252 |
env->exception_index = EXCP_EXTERNAL; |
253 |
env->error_code = 0;
|
254 |
do_interrupt(env); |
255 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
256 |
} else if ((interrupt_request & CPU_INTERRUPT_TIMER)) { |
257 |
/* Raise it */
|
258 |
env->exception_index = EXCP_DECR; |
259 |
env->error_code = 0;
|
260 |
do_interrupt(env); |
261 |
env->interrupt_request &= ~CPU_INTERRUPT_TIMER; |
262 |
} |
263 |
} |
264 |
#elif defined(TARGET_SPARC)
|
265 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
266 |
do_interrupt(env->interrupt_index, 0, 0, 0, 0); |
267 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
268 |
} else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
269 |
//do_interrupt(0, 0, 0, 0, 0);
|
270 |
env->interrupt_request &= ~CPU_INTERRUPT_TIMER; |
271 |
} |
272 |
#endif
|
273 |
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
|
274 |
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
275 |
/* ensure that no TB jump will be modified as
|
276 |
the program flow was changed */
|
277 |
#ifdef __sparc__
|
278 |
tmp_T0 = 0;
|
279 |
#else
|
280 |
T0 = 0;
|
281 |
#endif
|
282 |
} |
283 |
if (interrupt_request & CPU_INTERRUPT_EXIT) {
|
284 |
env->interrupt_request &= ~CPU_INTERRUPT_EXIT; |
285 |
env->exception_index = EXCP_INTERRUPT; |
286 |
cpu_loop_exit(); |
287 |
} |
288 |
} |
289 |
#ifdef DEBUG_EXEC
|
290 |
if ((loglevel & CPU_LOG_EXEC)) {
|
291 |
#if defined(TARGET_I386)
|
292 |
/* restore flags in standard format */
|
293 |
env->regs[R_EAX] = EAX; |
294 |
env->regs[R_EBX] = EBX; |
295 |
env->regs[R_ECX] = ECX; |
296 |
env->regs[R_EDX] = EDX; |
297 |
env->regs[R_ESI] = ESI; |
298 |
env->regs[R_EDI] = EDI; |
299 |
env->regs[R_EBP] = EBP; |
300 |
env->regs[R_ESP] = ESP; |
301 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
302 |
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); |
303 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
304 |
#elif defined(TARGET_ARM)
|
305 |
env->cpsr = compute_cpsr(); |
306 |
cpu_dump_state(env, logfile, fprintf, 0);
|
307 |
env->cpsr &= ~CACHED_CPSR_BITS; |
308 |
#elif defined(TARGET_SPARC)
|
309 |
cpu_dump_state (env, logfile, fprintf, 0);
|
310 |
#elif defined(TARGET_PPC)
|
311 |
cpu_dump_state(env, logfile, fprintf, 0);
|
312 |
#else
|
313 |
#error unsupported target CPU
|
314 |
#endif
|
315 |
} |
316 |
#endif
|
317 |
/* we record a subset of the CPU state. It will
|
318 |
always be the same before a given translated block
|
319 |
is executed. */
|
320 |
#if defined(TARGET_I386)
|
321 |
flags = env->hflags; |
322 |
flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
323 |
cs_base = env->segs[R_CS].base; |
324 |
pc = cs_base + env->eip; |
325 |
#elif defined(TARGET_ARM)
|
326 |
flags = env->thumb; |
327 |
cs_base = 0;
|
328 |
pc = env->regs[15];
|
329 |
#elif defined(TARGET_SPARC)
|
330 |
flags = 0;
|
331 |
cs_base = env->npc; |
332 |
pc = env->pc; |
333 |
#elif defined(TARGET_PPC)
|
334 |
flags = 0;
|
335 |
cs_base = 0;
|
336 |
pc = env->nip; |
337 |
#else
|
338 |
#error unsupported CPU
|
339 |
#endif
|
340 |
tb = tb_find(&ptb, pc, cs_base, |
341 |
flags); |
342 |
if (!tb) {
|
343 |
TranslationBlock **ptb1; |
344 |
unsigned int h; |
345 |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
346 |
|
347 |
|
348 |
spin_lock(&tb_lock); |
349 |
|
350 |
tb_invalidated_flag = 0;
|
351 |
|
352 |
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
|
353 |
|
354 |
/* find translated block using physical mappings */
|
355 |
phys_pc = get_phys_addr_code(env, pc); |
356 |
phys_page1 = phys_pc & TARGET_PAGE_MASK; |
357 |
phys_page2 = -1;
|
358 |
h = tb_phys_hash_func(phys_pc); |
359 |
ptb1 = &tb_phys_hash[h]; |
360 |
for(;;) {
|
361 |
tb = *ptb1; |
362 |
if (!tb)
|
363 |
goto not_found;
|
364 |
if (tb->pc == pc &&
|
365 |
tb->page_addr[0] == phys_page1 &&
|
366 |
tb->cs_base == cs_base && |
367 |
tb->flags == flags) { |
368 |
/* check next page if needed */
|
369 |
if (tb->page_addr[1] != -1) { |
370 |
virt_page2 = (pc & TARGET_PAGE_MASK) + |
371 |
TARGET_PAGE_SIZE; |
372 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
373 |
if (tb->page_addr[1] == phys_page2) |
374 |
goto found;
|
375 |
} else {
|
376 |
goto found;
|
377 |
} |
378 |
} |
379 |
ptb1 = &tb->phys_hash_next; |
380 |
} |
381 |
not_found:
|
382 |
/* if no translated code available, then translate it now */
|
383 |
tb = tb_alloc(pc); |
384 |
if (!tb) {
|
385 |
/* flush must be done */
|
386 |
tb_flush(env); |
387 |
/* cannot fail at this point */
|
388 |
tb = tb_alloc(pc); |
389 |
/* don't forget to invalidate previous TB info */
|
390 |
ptb = &tb_hash[tb_hash_func(pc)]; |
391 |
T0 = 0;
|
392 |
} |
393 |
tc_ptr = code_gen_ptr; |
394 |
tb->tc_ptr = tc_ptr; |
395 |
tb->cs_base = cs_base; |
396 |
tb->flags = flags; |
397 |
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); |
398 |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
399 |
|
400 |
/* check next page if needed */
|
401 |
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
|
402 |
phys_page2 = -1;
|
403 |
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
|
404 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
405 |
} |
406 |
tb_link_phys(tb, phys_pc, phys_page2); |
407 |
|
408 |
found:
|
409 |
if (tb_invalidated_flag) {
|
410 |
/* as some TB could have been invalidated because
|
411 |
of memory exceptions while generating the code, we
|
412 |
must recompute the hash index here */
|
413 |
ptb = &tb_hash[tb_hash_func(pc)]; |
414 |
while (*ptb != NULL) |
415 |
ptb = &(*ptb)->hash_next; |
416 |
T0 = 0;
|
417 |
} |
418 |
/* we add the TB in the virtual pc hash table */
|
419 |
*ptb = tb; |
420 |
tb->hash_next = NULL;
|
421 |
tb_link(tb); |
422 |
spin_unlock(&tb_lock); |
423 |
} |
424 |
#ifdef DEBUG_EXEC
|
425 |
if ((loglevel & CPU_LOG_EXEC)) {
|
426 |
fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", |
427 |
(long)tb->tc_ptr, tb->pc,
|
428 |
lookup_symbol(tb->pc)); |
429 |
} |
430 |
#endif
|
431 |
#ifdef __sparc__
|
432 |
T0 = tmp_T0; |
433 |
#endif
|
434 |
/* see if we can patch the calling TB. */
|
435 |
{ |
436 |
if (T0 != 0 |
437 |
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
|
438 |
&& (tb->cflags & CF_CODE_COPY) == |
439 |
(((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
|
440 |
#endif
|
441 |
) { |
442 |
spin_lock(&tb_lock); |
443 |
tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb); |
444 |
#if defined(USE_CODE_COPY)
|
445 |
/* propagates the FP use info */
|
446 |
((TranslationBlock *)(T0 & ~3))->cflags |=
|
447 |
(tb->cflags & CF_FP_USED); |
448 |
#endif
|
449 |
spin_unlock(&tb_lock); |
450 |
} |
451 |
} |
452 |
tc_ptr = tb->tc_ptr; |
453 |
env->current_tb = tb; |
454 |
/* execute the generated code */
|
455 |
gen_func = (void *)tc_ptr;
|
456 |
#if defined(__sparc__)
|
457 |
__asm__ __volatile__("call %0\n\t"
|
458 |
"mov %%o7,%%i0"
|
459 |
: /* no outputs */
|
460 |
: "r" (gen_func)
|
461 |
: "i0", "i1", "i2", "i3", "i4", "i5"); |
462 |
#elif defined(__arm__)
|
463 |
asm volatile ("mov pc, %0\n\t" |
464 |
".global exec_loop\n\t"
|
465 |
"exec_loop:\n\t"
|
466 |
: /* no outputs */
|
467 |
: "r" (gen_func)
|
468 |
: "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); |
469 |
#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
|
470 |
{ |
471 |
if (!(tb->cflags & CF_CODE_COPY)) {
|
472 |
if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
|
473 |
save_native_fp_state(env); |
474 |
} |
475 |
gen_func(); |
476 |
} else {
|
477 |
if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
|
478 |
restore_native_fp_state(env); |
479 |
} |
480 |
/* we work with native eflags */
|
481 |
CC_SRC = cc_table[CC_OP].compute_all(); |
482 |
CC_OP = CC_OP_EFLAGS; |
483 |
asm(".globl exec_loop\n" |
484 |
"\n"
|
485 |
"debug1:\n"
|
486 |
" pushl %%ebp\n"
|
487 |
" fs movl %10, %9\n"
|
488 |
" fs movl %11, %%eax\n"
|
489 |
" andl $0x400, %%eax\n"
|
490 |
" fs orl %8, %%eax\n"
|
491 |
" pushl %%eax\n"
|
492 |
" popf\n"
|
493 |
" fs movl %%esp, %12\n"
|
494 |
" fs movl %0, %%eax\n"
|
495 |
" fs movl %1, %%ecx\n"
|
496 |
" fs movl %2, %%edx\n"
|
497 |
" fs movl %3, %%ebx\n"
|
498 |
" fs movl %4, %%esp\n"
|
499 |
" fs movl %5, %%ebp\n"
|
500 |
" fs movl %6, %%esi\n"
|
501 |
" fs movl %7, %%edi\n"
|
502 |
" fs jmp *%9\n"
|
503 |
"exec_loop:\n"
|
504 |
" fs movl %%esp, %4\n"
|
505 |
" fs movl %12, %%esp\n"
|
506 |
" fs movl %%eax, %0\n"
|
507 |
" fs movl %%ecx, %1\n"
|
508 |
" fs movl %%edx, %2\n"
|
509 |
" fs movl %%ebx, %3\n"
|
510 |
" fs movl %%ebp, %5\n"
|
511 |
" fs movl %%esi, %6\n"
|
512 |
" fs movl %%edi, %7\n"
|
513 |
" pushf\n"
|
514 |
" popl %%eax\n"
|
515 |
" movl %%eax, %%ecx\n"
|
516 |
" andl $0x400, %%ecx\n"
|
517 |
" shrl $9, %%ecx\n"
|
518 |
" andl $0x8d5, %%eax\n"
|
519 |
" fs movl %%eax, %8\n"
|
520 |
" movl $1, %%eax\n"
|
521 |
" subl %%ecx, %%eax\n"
|
522 |
" fs movl %%eax, %11\n"
|
523 |
" fs movl %9, %%ebx\n" /* get T0 value */ |
524 |
" popl %%ebp\n"
|
525 |
: |
526 |
: "m" (*(uint8_t *)offsetof(CPUState, regs[0])), |
527 |
"m" (*(uint8_t *)offsetof(CPUState, regs[1])), |
528 |
"m" (*(uint8_t *)offsetof(CPUState, regs[2])), |
529 |
"m" (*(uint8_t *)offsetof(CPUState, regs[3])), |
530 |
"m" (*(uint8_t *)offsetof(CPUState, regs[4])), |
531 |
"m" (*(uint8_t *)offsetof(CPUState, regs[5])), |
532 |
"m" (*(uint8_t *)offsetof(CPUState, regs[6])), |
533 |
"m" (*(uint8_t *)offsetof(CPUState, regs[7])), |
534 |
"m" (*(uint8_t *)offsetof(CPUState, cc_src)),
|
535 |
"m" (*(uint8_t *)offsetof(CPUState, tmp0)),
|
536 |
"a" (gen_func),
|
537 |
"m" (*(uint8_t *)offsetof(CPUState, df)),
|
538 |
"m" (*(uint8_t *)offsetof(CPUState, saved_esp))
|
539 |
: "%ecx", "%edx" |
540 |
); |
541 |
} |
542 |
} |
543 |
#else
|
544 |
gen_func(); |
545 |
#endif
|
546 |
env->current_tb = NULL;
|
547 |
/* reset soft MMU for next block (it can currently
|
548 |
only be set by a memory fault) */
|
549 |
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
|
550 |
if (env->hflags & HF_SOFTMMU_MASK) {
|
551 |
env->hflags &= ~HF_SOFTMMU_MASK; |
552 |
/* do not allow linking to another block */
|
553 |
T0 = 0;
|
554 |
} |
555 |
#endif
|
556 |
} |
557 |
} else {
|
558 |
env_to_regs(); |
559 |
} |
560 |
} /* for(;;) */
|
561 |
|
562 |
|
563 |
#if defined(TARGET_I386)
|
564 |
#if defined(USE_CODE_COPY)
|
565 |
if (env->native_fp_regs) {
|
566 |
save_native_fp_state(env); |
567 |
} |
568 |
#endif
|
569 |
/* restore flags in standard format */
|
570 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
571 |
|
572 |
/* restore global registers */
|
573 |
#ifdef reg_EAX
|
574 |
EAX = saved_EAX; |
575 |
#endif
|
576 |
#ifdef reg_ECX
|
577 |
ECX = saved_ECX; |
578 |
#endif
|
579 |
#ifdef reg_EDX
|
580 |
EDX = saved_EDX; |
581 |
#endif
|
582 |
#ifdef reg_EBX
|
583 |
EBX = saved_EBX; |
584 |
#endif
|
585 |
#ifdef reg_ESP
|
586 |
ESP = saved_ESP; |
587 |
#endif
|
588 |
#ifdef reg_EBP
|
589 |
EBP = saved_EBP; |
590 |
#endif
|
591 |
#ifdef reg_ESI
|
592 |
ESI = saved_ESI; |
593 |
#endif
|
594 |
#ifdef reg_EDI
|
595 |
EDI = saved_EDI; |
596 |
#endif
|
597 |
#elif defined(TARGET_ARM)
|
598 |
env->cpsr = compute_cpsr(); |
599 |
#elif defined(TARGET_SPARC)
|
600 |
#elif defined(TARGET_PPC)
|
601 |
#else
|
602 |
#error unsupported target CPU
|
603 |
#endif
|
604 |
#ifdef __sparc__
|
605 |
asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); |
606 |
#endif
|
607 |
T0 = saved_T0; |
608 |
T1 = saved_T1; |
609 |
T2 = saved_T2; |
610 |
env = saved_env; |
611 |
return ret;
|
612 |
} |
613 |
|
614 |
/* must only be called from the generated code as an exception can be
|
615 |
generated */
|
616 |
void tb_invalidate_page_range(target_ulong start, target_ulong end)
|
617 |
{ |
618 |
/* XXX: cannot enable it yet because it yields to MMU exception
|
619 |
where NIP != read address on PowerPC */
|
620 |
#if 0
|
621 |
target_ulong phys_addr;
|
622 |
phys_addr = get_phys_addr_code(env, start);
|
623 |
tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
|
624 |
#endif
|
625 |
} |
626 |
|
627 |
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
|
628 |
|
629 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) |
630 |
{ |
631 |
CPUX86State *saved_env; |
632 |
|
633 |
saved_env = env; |
634 |
env = s; |
635 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { |
636 |
selector &= 0xffff;
|
637 |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
638 |
(selector << 4), 0xffff, 0); |
639 |
} else {
|
640 |
load_seg(seg_reg, selector); |
641 |
} |
642 |
env = saved_env; |
643 |
} |
644 |
|
645 |
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32) |
646 |
{ |
647 |
CPUX86State *saved_env; |
648 |
|
649 |
saved_env = env; |
650 |
env = s; |
651 |
|
652 |
helper_fsave((target_ulong)ptr, data32); |
653 |
|
654 |
env = saved_env; |
655 |
} |
656 |
|
657 |
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) |
658 |
{ |
659 |
CPUX86State *saved_env; |
660 |
|
661 |
saved_env = env; |
662 |
env = s; |
663 |
|
664 |
helper_frstor((target_ulong)ptr, data32); |
665 |
|
666 |
env = saved_env; |
667 |
} |
668 |
|
669 |
#endif /* TARGET_I386 */ |
670 |
|
671 |
#if !defined(CONFIG_SOFTMMU)
|
672 |
|
673 |
#if defined(TARGET_I386)
|
674 |
|
675 |
/* 'pc' is the host PC at which the exception was raised. 'address' is
|
676 |
the effective address of the memory exception. 'is_write' is 1 if a
|
677 |
write caused the exception and otherwise 0'. 'old_set' is the
|
678 |
signal set which should be restored */
|
679 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
680 |
int is_write, sigset_t *old_set,
|
681 |
void *puc)
|
682 |
{ |
683 |
TranslationBlock *tb; |
684 |
int ret;
|
685 |
|
686 |
if (cpu_single_env)
|
687 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
688 |
#if defined(DEBUG_SIGNAL)
|
689 |
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
690 |
pc, address, is_write, *(unsigned long *)old_set); |
691 |
#endif
|
692 |
/* XXX: locking issue */
|
693 |
if (is_write && page_unprotect(address, pc, puc)) {
|
694 |
return 1; |
695 |
} |
696 |
|
697 |
/* see if it is an MMU fault */
|
698 |
ret = cpu_x86_handle_mmu_fault(env, address, is_write, |
699 |
((env->hflags & HF_CPL_MASK) == 3), 0); |
700 |
if (ret < 0) |
701 |
return 0; /* not an MMU fault */ |
702 |
if (ret == 0) |
703 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
704 |
/* now we have a real cpu fault */
|
705 |
tb = tb_find_pc(pc); |
706 |
if (tb) {
|
707 |
/* the PC is inside the translated code. It means that we have
|
708 |
a virtual CPU fault */
|
709 |
cpu_restore_state(tb, env, pc, puc); |
710 |
} |
711 |
if (ret == 1) { |
712 |
#if 0
|
713 |
printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
|
714 |
env->eip, env->cr[2], env->error_code);
|
715 |
#endif
|
716 |
/* we restore the process signal mask as the sigreturn should
|
717 |
do it (XXX: use sigsetjmp) */
|
718 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
719 |
raise_exception_err(EXCP0E_PAGE, env->error_code); |
720 |
} else {
|
721 |
/* activate soft MMU for this block */
|
722 |
env->hflags |= HF_SOFTMMU_MASK; |
723 |
cpu_resume_from_signal(env, puc); |
724 |
} |
725 |
/* never comes here */
|
726 |
return 1; |
727 |
} |
728 |
|
729 |
#elif defined(TARGET_ARM)
|
730 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
731 |
int is_write, sigset_t *old_set,
|
732 |
void *puc)
|
733 |
{ |
734 |
/* XXX: locking issue */
|
735 |
if (is_write && page_unprotect(address, pc, puc)) {
|
736 |
return 1; |
737 |
} |
738 |
return 0; |
739 |
} |
740 |
#elif defined(TARGET_SPARC)
|
741 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
742 |
int is_write, sigset_t *old_set,
|
743 |
void *puc)
|
744 |
{ |
745 |
/* XXX: locking issue */
|
746 |
if (is_write && page_unprotect(address, pc, puc)) {
|
747 |
return 1; |
748 |
} |
749 |
return 0; |
750 |
} |
751 |
#elif defined (TARGET_PPC)
|
752 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
753 |
int is_write, sigset_t *old_set,
|
754 |
void *puc)
|
755 |
{ |
756 |
TranslationBlock *tb; |
757 |
int ret;
|
758 |
|
759 |
#if 1 |
760 |
if (cpu_single_env)
|
761 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
762 |
#endif
|
763 |
#if defined(DEBUG_SIGNAL)
|
764 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
765 |
pc, address, is_write, *(unsigned long *)old_set); |
766 |
#endif
|
767 |
/* XXX: locking issue */
|
768 |
if (is_write && page_unprotect(address, pc, puc)) {
|
769 |
return 1; |
770 |
} |
771 |
|
772 |
/* see if it is an MMU fault */
|
773 |
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
|
774 |
if (ret < 0) |
775 |
return 0; /* not an MMU fault */ |
776 |
if (ret == 0) |
777 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
778 |
|
779 |
/* now we have a real cpu fault */
|
780 |
tb = tb_find_pc(pc); |
781 |
if (tb) {
|
782 |
/* the PC is inside the translated code. It means that we have
|
783 |
a virtual CPU fault */
|
784 |
cpu_restore_state(tb, env, pc, puc); |
785 |
} |
786 |
if (ret == 1) { |
787 |
#if 0
|
788 |
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
|
789 |
env->nip, env->error_code, tb);
|
790 |
#endif
|
791 |
/* we restore the process signal mask as the sigreturn should
|
792 |
do it (XXX: use sigsetjmp) */
|
793 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
794 |
do_raise_exception_err(env->exception_index, env->error_code); |
795 |
} else {
|
796 |
/* activate soft MMU for this block */
|
797 |
cpu_resume_from_signal(env, puc); |
798 |
} |
799 |
/* never comes here */
|
800 |
return 1; |
801 |
} |
802 |
#else
|
803 |
#error unsupported target CPU
|
804 |
#endif
|
805 |
|
806 |
#if defined(__i386__)
|
807 |
|
808 |
#if defined(USE_CODE_COPY)
|
809 |
static void cpu_send_trap(unsigned long pc, int trap, |
810 |
struct ucontext *uc)
|
811 |
{ |
812 |
TranslationBlock *tb; |
813 |
|
814 |
if (cpu_single_env)
|
815 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
816 |
/* now we have a real cpu fault */
|
817 |
tb = tb_find_pc(pc); |
818 |
if (tb) {
|
819 |
/* the PC is inside the translated code. It means that we have
|
820 |
a virtual CPU fault */
|
821 |
cpu_restore_state(tb, env, pc, uc); |
822 |
} |
823 |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
824 |
raise_exception_err(trap, env->error_code); |
825 |
} |
826 |
#endif
|
827 |
|
828 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
829 |
void *puc)
|
830 |
{ |
831 |
struct ucontext *uc = puc;
|
832 |
unsigned long pc; |
833 |
int trapno;
|
834 |
|
835 |
#ifndef REG_EIP
|
836 |
/* for glibc 2.1 */
|
837 |
#define REG_EIP EIP
|
838 |
#define REG_ERR ERR
|
839 |
#define REG_TRAPNO TRAPNO
|
840 |
#endif
|
841 |
pc = uc->uc_mcontext.gregs[REG_EIP]; |
842 |
trapno = uc->uc_mcontext.gregs[REG_TRAPNO]; |
843 |
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
|
844 |
if (trapno == 0x00 || trapno == 0x05) { |
845 |
/* send division by zero or bound exception */
|
846 |
cpu_send_trap(pc, trapno, uc); |
847 |
return 1; |
848 |
} else
|
849 |
#endif
|
850 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
851 |
trapno == 0xe ?
|
852 |
(uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, |
853 |
&uc->uc_sigmask, puc); |
854 |
} |
855 |
|
856 |
#elif defined(__x86_64__)
|
857 |
|
858 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
859 |
void *puc)
|
860 |
{ |
861 |
struct ucontext *uc = puc;
|
862 |
unsigned long pc; |
863 |
|
864 |
pc = uc->uc_mcontext.gregs[REG_RIP]; |
865 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
866 |
uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
|
867 |
(uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, |
868 |
&uc->uc_sigmask, puc); |
869 |
} |
870 |
|
871 |
#elif defined(__powerpc__)
|
872 |
|
873 |
/***********************************************************************
|
874 |
* signal context platform-specific definitions
|
875 |
* From Wine
|
876 |
*/
|
877 |
#ifdef linux
|
878 |
/* All Registers access - only for local access */
|
879 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
|
880 |
/* Gpr Registers access */
|
881 |
# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
|
882 |
# define IAR_sig(context) REG_sig(nip, context) /* Program counter */ |
883 |
# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */ |
884 |
# define CTR_sig(context) REG_sig(ctr, context) /* Count register */ |
885 |
# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */ |
886 |
# define LR_sig(context) REG_sig(link, context) /* Link register */ |
887 |
# define CR_sig(context) REG_sig(ccr, context) /* Condition register */ |
888 |
/* Float Registers access */
|
889 |
# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num]) |
890 |
# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4))) |
891 |
/* Exception Registers access */
|
892 |
# define DAR_sig(context) REG_sig(dar, context)
|
893 |
# define DSISR_sig(context) REG_sig(dsisr, context)
|
894 |
# define TRAP_sig(context) REG_sig(trap, context)
|
895 |
#endif /* linux */ |
896 |
|
897 |
#ifdef __APPLE__
|
898 |
# include <sys/ucontext.h> |
899 |
typedef struct ucontext SIGCONTEXT; |
900 |
/* All Registers access - only for local access */
|
901 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
|
902 |
# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
|
903 |
# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
|
904 |
# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
|
905 |
/* Gpr Registers access */
|
906 |
# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) |
907 |
# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */ |
908 |
# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */ |
909 |
# define CTR_sig(context) REG_sig(ctr, context)
|
910 |
# define XER_sig(context) REG_sig(xer, context) /* Link register */ |
911 |
# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */ |
912 |
# define CR_sig(context) REG_sig(cr, context) /* Condition register */ |
913 |
/* Float Registers access */
|
914 |
# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
|
915 |
# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context)) |
916 |
/* Exception Registers access */
|
917 |
# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */ |
918 |
# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
|
919 |
# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */ |
920 |
#endif /* __APPLE__ */ |
921 |
|
922 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
923 |
void *puc)
|
924 |
{ |
925 |
struct ucontext *uc = puc;
|
926 |
unsigned long pc; |
927 |
int is_write;
|
928 |
|
929 |
pc = IAR_sig(uc); |
930 |
is_write = 0;
|
931 |
#if 0
|
932 |
/* ppc 4xx case */
|
933 |
if (DSISR_sig(uc) & 0x00800000)
|
934 |
is_write = 1;
|
935 |
#else
|
936 |
if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) |
937 |
is_write = 1;
|
938 |
#endif
|
939 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
940 |
is_write, &uc->uc_sigmask, puc); |
941 |
} |
942 |
|
943 |
#elif defined(__alpha__)
|
944 |
|
945 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
946 |
void *puc)
|
947 |
{ |
948 |
struct ucontext *uc = puc;
|
949 |
uint32_t *pc = uc->uc_mcontext.sc_pc; |
950 |
uint32_t insn = *pc; |
951 |
int is_write = 0; |
952 |
|
953 |
/* XXX: need kernel patch to get write flag faster */
|
954 |
switch (insn >> 26) { |
955 |
case 0x0d: // stw |
956 |
case 0x0e: // stb |
957 |
case 0x0f: // stq_u |
958 |
case 0x24: // stf |
959 |
case 0x25: // stg |
960 |
case 0x26: // sts |
961 |
case 0x27: // stt |
962 |
case 0x2c: // stl |
963 |
case 0x2d: // stq |
964 |
case 0x2e: // stl_c |
965 |
case 0x2f: // stq_c |
966 |
is_write = 1;
|
967 |
} |
968 |
|
969 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
970 |
is_write, &uc->uc_sigmask, puc); |
971 |
} |
972 |
#elif defined(__sparc__)
|
973 |
|
974 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
975 |
void *puc)
|
976 |
{ |
977 |
uint32_t *regs = (uint32_t *)(info + 1);
|
978 |
void *sigmask = (regs + 20); |
979 |
unsigned long pc; |
980 |
int is_write;
|
981 |
uint32_t insn; |
982 |
|
983 |
/* XXX: is there a standard glibc define ? */
|
984 |
pc = regs[1];
|
985 |
/* XXX: need kernel patch to get write flag faster */
|
986 |
is_write = 0;
|
987 |
insn = *(uint32_t *)pc; |
988 |
if ((insn >> 30) == 3) { |
989 |
switch((insn >> 19) & 0x3f) { |
990 |
case 0x05: // stb |
991 |
case 0x06: // sth |
992 |
case 0x04: // st |
993 |
case 0x07: // std |
994 |
case 0x24: // stf |
995 |
case 0x27: // stdf |
996 |
case 0x25: // stfsr |
997 |
is_write = 1;
|
998 |
break;
|
999 |
} |
1000 |
} |
1001 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1002 |
is_write, sigmask, NULL);
|
1003 |
} |
1004 |
|
1005 |
#elif defined(__arm__)
|
1006 |
|
1007 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
1008 |
void *puc)
|
1009 |
{ |
1010 |
struct ucontext *uc = puc;
|
1011 |
unsigned long pc; |
1012 |
int is_write;
|
1013 |
|
1014 |
pc = uc->uc_mcontext.gregs[R15]; |
1015 |
/* XXX: compute is_write */
|
1016 |
is_write = 0;
|
1017 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1018 |
is_write, |
1019 |
&uc->uc_sigmask); |
1020 |
} |
1021 |
|
1022 |
#elif defined(__mc68000)
|
1023 |
|
1024 |
int cpu_signal_handler(int host_signum, struct siginfo *info, |
1025 |
void *puc)
|
1026 |
{ |
1027 |
struct ucontext *uc = puc;
|
1028 |
unsigned long pc; |
1029 |
int is_write;
|
1030 |
|
1031 |
pc = uc->uc_mcontext.gregs[16];
|
1032 |
/* XXX: compute is_write */
|
1033 |
is_write = 0;
|
1034 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1035 |
is_write, |
1036 |
&uc->uc_sigmask, puc); |
1037 |
} |
1038 |
|
1039 |
#else
|
1040 |
|
1041 |
#error host CPU specific signal handler needed
|
1042 |
|
1043 |
#endif
|
1044 |
|
1045 |
#endif /* !defined(CONFIG_SOFTMMU) */ |