root / cpu-exec.c @ eddf68a6
History | View | Annotate | Download (50.4 kB)
1 |
/*
|
---|---|
2 |
* i386 emulator main execution loop
|
3 |
*
|
4 |
* Copyright (c) 2003-2005 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include "config.h" |
21 |
#include "exec.h" |
22 |
#include "disas.h" |
23 |
|
24 |
#if !defined(CONFIG_SOFTMMU)
|
25 |
#undef EAX
|
26 |
#undef ECX
|
27 |
#undef EDX
|
28 |
#undef EBX
|
29 |
#undef ESP
|
30 |
#undef EBP
|
31 |
#undef ESI
|
32 |
#undef EDI
|
33 |
#undef EIP
|
34 |
#include <signal.h> |
35 |
#include <sys/ucontext.h> |
36 |
#endif
|
37 |
|
38 |
int tb_invalidated_flag;
|
39 |
|
40 |
//#define DEBUG_EXEC
|
41 |
//#define DEBUG_SIGNAL
|
42 |
|
43 |
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K) || \
|
44 |
defined(TARGET_ALPHA) |
45 |
/* XXX: unify with i386 target */
|
46 |
void cpu_loop_exit(void) |
47 |
{ |
48 |
longjmp(env->jmp_env, 1);
|
49 |
} |
50 |
#endif
|
51 |
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
|
52 |
#define reg_T2
|
53 |
#endif
|
54 |
|
55 |
/* exit the current TB from a signal handler. The host registers are
|
56 |
restored in a state compatible with the CPU emulator
|
57 |
*/
|
58 |
void cpu_resume_from_signal(CPUState *env1, void *puc) |
59 |
{ |
60 |
#if !defined(CONFIG_SOFTMMU)
|
61 |
struct ucontext *uc = puc;
|
62 |
#endif
|
63 |
|
64 |
env = env1; |
65 |
|
66 |
/* XXX: restore cpu registers saved in host registers */
|
67 |
|
68 |
#if !defined(CONFIG_SOFTMMU)
|
69 |
if (puc) {
|
70 |
/* XXX: use siglongjmp ? */
|
71 |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
72 |
} |
73 |
#endif
|
74 |
longjmp(env->jmp_env, 1);
|
75 |
} |
76 |
|
77 |
|
78 |
static TranslationBlock *tb_find_slow(target_ulong pc,
|
79 |
target_ulong cs_base, |
80 |
unsigned int flags) |
81 |
{ |
82 |
TranslationBlock *tb, **ptb1; |
83 |
int code_gen_size;
|
84 |
unsigned int h; |
85 |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
86 |
uint8_t *tc_ptr; |
87 |
|
88 |
spin_lock(&tb_lock); |
89 |
|
90 |
tb_invalidated_flag = 0;
|
91 |
|
92 |
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
|
93 |
|
94 |
/* find translated block using physical mappings */
|
95 |
phys_pc = get_phys_addr_code(env, pc); |
96 |
phys_page1 = phys_pc & TARGET_PAGE_MASK; |
97 |
phys_page2 = -1;
|
98 |
h = tb_phys_hash_func(phys_pc); |
99 |
ptb1 = &tb_phys_hash[h]; |
100 |
for(;;) {
|
101 |
tb = *ptb1; |
102 |
if (!tb)
|
103 |
goto not_found;
|
104 |
if (tb->pc == pc &&
|
105 |
tb->page_addr[0] == phys_page1 &&
|
106 |
tb->cs_base == cs_base && |
107 |
tb->flags == flags) { |
108 |
/* check next page if needed */
|
109 |
if (tb->page_addr[1] != -1) { |
110 |
virt_page2 = (pc & TARGET_PAGE_MASK) + |
111 |
TARGET_PAGE_SIZE; |
112 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
113 |
if (tb->page_addr[1] == phys_page2) |
114 |
goto found;
|
115 |
} else {
|
116 |
goto found;
|
117 |
} |
118 |
} |
119 |
ptb1 = &tb->phys_hash_next; |
120 |
} |
121 |
not_found:
|
122 |
/* if no translated code available, then translate it now */
|
123 |
tb = tb_alloc(pc); |
124 |
if (!tb) {
|
125 |
/* flush must be done */
|
126 |
tb_flush(env); |
127 |
/* cannot fail at this point */
|
128 |
tb = tb_alloc(pc); |
129 |
/* don't forget to invalidate previous TB info */
|
130 |
tb_invalidated_flag = 1;
|
131 |
} |
132 |
tc_ptr = code_gen_ptr; |
133 |
tb->tc_ptr = tc_ptr; |
134 |
tb->cs_base = cs_base; |
135 |
tb->flags = flags; |
136 |
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); |
137 |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
138 |
|
139 |
/* check next page if needed */
|
140 |
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
|
141 |
phys_page2 = -1;
|
142 |
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
|
143 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
144 |
} |
145 |
tb_link_phys(tb, phys_pc, phys_page2); |
146 |
|
147 |
found:
|
148 |
/* we add the TB in the virtual pc hash table */
|
149 |
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; |
150 |
spin_unlock(&tb_lock); |
151 |
return tb;
|
152 |
} |
153 |
|
154 |
static inline TranslationBlock *tb_find_fast(void) |
155 |
{ |
156 |
TranslationBlock *tb; |
157 |
target_ulong cs_base, pc; |
158 |
unsigned int flags; |
159 |
|
160 |
/* we record a subset of the CPU state. It will
|
161 |
always be the same before a given translated block
|
162 |
is executed. */
|
163 |
#if defined(TARGET_I386)
|
164 |
flags = env->hflags; |
165 |
flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
166 |
cs_base = env->segs[R_CS].base; |
167 |
pc = cs_base + env->eip; |
168 |
#elif defined(TARGET_ARM)
|
169 |
flags = env->thumb | (env->vfp.vec_len << 1)
|
170 |
| (env->vfp.vec_stride << 4);
|
171 |
if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
|
172 |
flags |= (1 << 6); |
173 |
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) |
174 |
flags |= (1 << 7); |
175 |
cs_base = 0;
|
176 |
pc = env->regs[15];
|
177 |
#elif defined(TARGET_SPARC)
|
178 |
#ifdef TARGET_SPARC64
|
179 |
// Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
|
180 |
flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2)) |
181 |
| (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
|
182 |
#else
|
183 |
// FPU enable . MMU enabled . MMU no-fault . Supervisor
|
184 |
flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1) |
185 |
| env->psrs; |
186 |
#endif
|
187 |
cs_base = env->npc; |
188 |
pc = env->pc; |
189 |
#elif defined(TARGET_PPC)
|
190 |
flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) | |
191 |
(msr_se << MSR_SE) | (msr_le << MSR_LE); |
192 |
cs_base = 0;
|
193 |
pc = env->nip; |
194 |
#elif defined(TARGET_MIPS)
|
195 |
flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK); |
196 |
cs_base = 0;
|
197 |
pc = env->PC; |
198 |
#elif defined(TARGET_M68K)
|
199 |
flags = env->fpcr & M68K_FPCR_PREC; |
200 |
cs_base = 0;
|
201 |
pc = env->pc; |
202 |
#elif defined(TARGET_SH4)
|
203 |
flags = env->sr & (SR_MD | SR_RB); |
204 |
cs_base = 0; /* XXXXX */ |
205 |
pc = env->pc; |
206 |
#elif defined(TARGET_ALPHA)
|
207 |
flags = env->ps; |
208 |
cs_base = 0;
|
209 |
pc = env->pc; |
210 |
#else
|
211 |
#error unsupported CPU
|
212 |
#endif
|
213 |
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; |
214 |
if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
|
215 |
tb->flags != flags, 0)) {
|
216 |
tb = tb_find_slow(pc, cs_base, flags); |
217 |
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
218 |
doing it in tb_find_slow */
|
219 |
if (tb_invalidated_flag) {
|
220 |
/* as some TB could have been invalidated because
|
221 |
of memory exceptions while generating the code, we
|
222 |
must recompute the hash index here */
|
223 |
T0 = 0;
|
224 |
} |
225 |
} |
226 |
return tb;
|
227 |
} |
228 |
|
229 |
|
230 |
/* main execution loop */
|
231 |
|
232 |
int cpu_exec(CPUState *env1)
|
233 |
{ |
234 |
#define DECLARE_HOST_REGS 1 |
235 |
#include "hostregs_helper.h" |
236 |
#if defined(TARGET_SPARC)
|
237 |
#if defined(reg_REGWPTR)
|
238 |
uint32_t *saved_regwptr; |
239 |
#endif
|
240 |
#endif
|
241 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
242 |
int saved_i7;
|
243 |
target_ulong tmp_T0; |
244 |
#endif
|
245 |
int ret, interrupt_request;
|
246 |
void (*gen_func)(void); |
247 |
TranslationBlock *tb; |
248 |
uint8_t *tc_ptr; |
249 |
|
250 |
#if defined(TARGET_I386)
|
251 |
/* handle exit of HALTED state */
|
252 |
if (env1->hflags & HF_HALTED_MASK) {
|
253 |
/* disable halt condition */
|
254 |
if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
|
255 |
(env1->eflags & IF_MASK)) { |
256 |
env1->hflags &= ~HF_HALTED_MASK; |
257 |
} else {
|
258 |
return EXCP_HALTED;
|
259 |
} |
260 |
} |
261 |
#elif defined(TARGET_PPC)
|
262 |
if (env1->halted) {
|
263 |
if (env1->msr[MSR_EE] &&
|
264 |
(env1->interrupt_request & CPU_INTERRUPT_HARD)) { |
265 |
env1->halted = 0;
|
266 |
} else {
|
267 |
return EXCP_HALTED;
|
268 |
} |
269 |
} |
270 |
#elif defined(TARGET_SPARC)
|
271 |
if (env1->halted) {
|
272 |
if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
|
273 |
(env1->psret != 0)) {
|
274 |
env1->halted = 0;
|
275 |
} else {
|
276 |
return EXCP_HALTED;
|
277 |
} |
278 |
} |
279 |
#elif defined(TARGET_ARM)
|
280 |
if (env1->halted) {
|
281 |
/* An interrupt wakes the CPU even if the I and F CPSR bits are
|
282 |
set. */
|
283 |
if (env1->interrupt_request
|
284 |
& (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) { |
285 |
env1->halted = 0;
|
286 |
} else {
|
287 |
return EXCP_HALTED;
|
288 |
} |
289 |
} |
290 |
#elif defined(TARGET_MIPS)
|
291 |
if (env1->halted) {
|
292 |
if (env1->interrupt_request &
|
293 |
(CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) { |
294 |
env1->halted = 0;
|
295 |
} else {
|
296 |
return EXCP_HALTED;
|
297 |
} |
298 |
} |
299 |
#elif defined(TARGET_ALPHA)
|
300 |
if (env1->halted) {
|
301 |
if (env1->interrupt_request & CPU_INTERRUPT_HARD) {
|
302 |
env1->halted = 0;
|
303 |
} else {
|
304 |
return EXCP_HALTED;
|
305 |
} |
306 |
} |
307 |
#endif
|
308 |
|
309 |
cpu_single_env = env1; |
310 |
|
311 |
/* first we save global registers */
|
312 |
#define SAVE_HOST_REGS 1 |
313 |
#include "hostregs_helper.h" |
314 |
env = env1; |
315 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
316 |
/* we also save i7 because longjmp may not restore it */
|
317 |
asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); |
318 |
#endif
|
319 |
|
320 |
#if defined(TARGET_I386)
|
321 |
env_to_regs(); |
322 |
/* put eflags in CPU temporary format */
|
323 |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
324 |
DF = 1 - (2 * ((env->eflags >> 10) & 1)); |
325 |
CC_OP = CC_OP_EFLAGS; |
326 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
327 |
#elif defined(TARGET_ARM)
|
328 |
#elif defined(TARGET_SPARC)
|
329 |
#if defined(reg_REGWPTR)
|
330 |
saved_regwptr = REGWPTR; |
331 |
#endif
|
332 |
#elif defined(TARGET_PPC)
|
333 |
#elif defined(TARGET_M68K)
|
334 |
env->cc_op = CC_OP_FLAGS; |
335 |
env->cc_dest = env->sr & 0xf;
|
336 |
env->cc_x = (env->sr >> 4) & 1; |
337 |
#elif defined(TARGET_MIPS)
|
338 |
#elif defined(TARGET_SH4)
|
339 |
/* XXXXX */
|
340 |
#elif defined(TARGET_ALPHA)
|
341 |
env_to_regs(); |
342 |
#else
|
343 |
#error unsupported target CPU
|
344 |
#endif
|
345 |
env->exception_index = -1;
|
346 |
|
347 |
/* prepare setjmp context for exception handling */
|
348 |
for(;;) {
|
349 |
if (setjmp(env->jmp_env) == 0) { |
350 |
env->current_tb = NULL;
|
351 |
/* if an exception is pending, we execute it here */
|
352 |
if (env->exception_index >= 0) { |
353 |
if (env->exception_index >= EXCP_INTERRUPT) {
|
354 |
/* exit request from the cpu execution loop */
|
355 |
ret = env->exception_index; |
356 |
break;
|
357 |
} else if (env->user_mode_only) { |
358 |
/* if user mode only, we simulate a fake exception
|
359 |
which will be handled outside the cpu execution
|
360 |
loop */
|
361 |
#if defined(TARGET_I386)
|
362 |
do_interrupt_user(env->exception_index, |
363 |
env->exception_is_int, |
364 |
env->error_code, |
365 |
env->exception_next_eip); |
366 |
#endif
|
367 |
ret = env->exception_index; |
368 |
break;
|
369 |
} else {
|
370 |
#if defined(TARGET_I386)
|
371 |
/* simulate a real cpu exception. On i386, it can
|
372 |
trigger new exceptions, but we do not handle
|
373 |
double or triple faults yet. */
|
374 |
do_interrupt(env->exception_index, |
375 |
env->exception_is_int, |
376 |
env->error_code, |
377 |
env->exception_next_eip, 0);
|
378 |
/* successfully delivered */
|
379 |
env->old_exception = -1;
|
380 |
#elif defined(TARGET_PPC)
|
381 |
do_interrupt(env); |
382 |
#elif defined(TARGET_MIPS)
|
383 |
do_interrupt(env); |
384 |
#elif defined(TARGET_SPARC)
|
385 |
do_interrupt(env->exception_index); |
386 |
#elif defined(TARGET_ARM)
|
387 |
do_interrupt(env); |
388 |
#elif defined(TARGET_SH4)
|
389 |
do_interrupt(env); |
390 |
#elif defined(TARGET_ALPHA)
|
391 |
do_interrupt(env); |
392 |
#endif
|
393 |
} |
394 |
env->exception_index = -1;
|
395 |
} |
396 |
#ifdef USE_KQEMU
|
397 |
if (kqemu_is_ok(env) && env->interrupt_request == 0) { |
398 |
int ret;
|
399 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
400 |
ret = kqemu_cpu_exec(env); |
401 |
/* put eflags in CPU temporary format */
|
402 |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
403 |
DF = 1 - (2 * ((env->eflags >> 10) & 1)); |
404 |
CC_OP = CC_OP_EFLAGS; |
405 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
406 |
if (ret == 1) { |
407 |
/* exception */
|
408 |
longjmp(env->jmp_env, 1);
|
409 |
} else if (ret == 2) { |
410 |
/* softmmu execution needed */
|
411 |
} else {
|
412 |
if (env->interrupt_request != 0) { |
413 |
/* hardware interrupt will be executed just after */
|
414 |
} else {
|
415 |
/* otherwise, we restart */
|
416 |
longjmp(env->jmp_env, 1);
|
417 |
} |
418 |
} |
419 |
} |
420 |
#endif
|
421 |
|
422 |
T0 = 0; /* force lookup of first TB */ |
423 |
for(;;) {
|
424 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
425 |
/* g1 can be modified by some libc? functions */
|
426 |
tmp_T0 = T0; |
427 |
#endif
|
428 |
interrupt_request = env->interrupt_request; |
429 |
if (__builtin_expect(interrupt_request, 0)) { |
430 |
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
431 |
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
432 |
env->exception_index = EXCP_DEBUG; |
433 |
cpu_loop_exit(); |
434 |
} |
435 |
#if defined(TARGET_I386)
|
436 |
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
437 |
!(env->hflags & HF_SMM_MASK)) { |
438 |
env->interrupt_request &= ~CPU_INTERRUPT_SMI; |
439 |
do_smm_enter(); |
440 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
441 |
tmp_T0 = 0;
|
442 |
#else
|
443 |
T0 = 0;
|
444 |
#endif
|
445 |
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
446 |
(env->eflags & IF_MASK) && |
447 |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
448 |
int intno;
|
449 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
450 |
intno = cpu_get_pic_interrupt(env); |
451 |
if (loglevel & CPU_LOG_TB_IN_ASM) {
|
452 |
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
|
453 |
} |
454 |
do_interrupt(intno, 0, 0, 0, 1); |
455 |
/* ensure that no TB jump will be modified as
|
456 |
the program flow was changed */
|
457 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
458 |
tmp_T0 = 0;
|
459 |
#else
|
460 |
T0 = 0;
|
461 |
#endif
|
462 |
} |
463 |
#elif defined(TARGET_PPC)
|
464 |
#if 0
|
465 |
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
|
466 |
cpu_ppc_reset(env);
|
467 |
}
|
468 |
#endif
|
469 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
470 |
if (ppc_hw_interrupt(env) == 1) { |
471 |
/* Some exception was raised */
|
472 |
if (env->pending_interrupts == 0) |
473 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
474 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
475 |
tmp_T0 = 0;
|
476 |
#else
|
477 |
T0 = 0;
|
478 |
#endif
|
479 |
} |
480 |
} |
481 |
#elif defined(TARGET_MIPS)
|
482 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
483 |
(env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) && |
484 |
(env->CP0_Status & (1 << CP0St_IE)) &&
|
485 |
!(env->CP0_Status & (1 << CP0St_EXL)) &&
|
486 |
!(env->CP0_Status & (1 << CP0St_ERL)) &&
|
487 |
!(env->hflags & MIPS_HFLAG_DM)) { |
488 |
/* Raise it */
|
489 |
env->exception_index = EXCP_EXT_INTERRUPT; |
490 |
env->error_code = 0;
|
491 |
do_interrupt(env); |
492 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
493 |
tmp_T0 = 0;
|
494 |
#else
|
495 |
T0 = 0;
|
496 |
#endif
|
497 |
} |
498 |
#elif defined(TARGET_SPARC)
|
499 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
500 |
(env->psret != 0)) {
|
501 |
int pil = env->interrupt_index & 15; |
502 |
int type = env->interrupt_index & 0xf0; |
503 |
|
504 |
if (((type == TT_EXTINT) &&
|
505 |
(pil == 15 || pil > env->psrpil)) ||
|
506 |
type != TT_EXTINT) { |
507 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
508 |
do_interrupt(env->interrupt_index); |
509 |
env->interrupt_index = 0;
|
510 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
511 |
tmp_T0 = 0;
|
512 |
#else
|
513 |
T0 = 0;
|
514 |
#endif
|
515 |
} |
516 |
} else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
517 |
//do_interrupt(0, 0, 0, 0, 0);
|
518 |
env->interrupt_request &= ~CPU_INTERRUPT_TIMER; |
519 |
} else if (interrupt_request & CPU_INTERRUPT_HALT) { |
520 |
env->interrupt_request &= ~CPU_INTERRUPT_HALT; |
521 |
env->halted = 1;
|
522 |
env->exception_index = EXCP_HLT; |
523 |
cpu_loop_exit(); |
524 |
} |
525 |
#elif defined(TARGET_ARM)
|
526 |
if (interrupt_request & CPU_INTERRUPT_FIQ
|
527 |
&& !(env->uncached_cpsr & CPSR_F)) { |
528 |
env->exception_index = EXCP_FIQ; |
529 |
do_interrupt(env); |
530 |
} |
531 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
532 |
&& !(env->uncached_cpsr & CPSR_I)) { |
533 |
env->exception_index = EXCP_IRQ; |
534 |
do_interrupt(env); |
535 |
} |
536 |
#elif defined(TARGET_SH4)
|
537 |
/* XXXXX */
|
538 |
#elif defined(TARGET_ALPHA)
|
539 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
540 |
do_interrupt(env); |
541 |
} |
542 |
#endif
|
543 |
/* Don't use the cached interupt_request value,
|
544 |
do_interrupt may have updated the EXITTB flag. */
|
545 |
if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
|
546 |
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
547 |
/* ensure that no TB jump will be modified as
|
548 |
the program flow was changed */
|
549 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
550 |
tmp_T0 = 0;
|
551 |
#else
|
552 |
T0 = 0;
|
553 |
#endif
|
554 |
} |
555 |
if (interrupt_request & CPU_INTERRUPT_EXIT) {
|
556 |
env->interrupt_request &= ~CPU_INTERRUPT_EXIT; |
557 |
env->exception_index = EXCP_INTERRUPT; |
558 |
cpu_loop_exit(); |
559 |
} |
560 |
} |
561 |
#ifdef DEBUG_EXEC
|
562 |
if ((loglevel & CPU_LOG_TB_CPU)) {
|
563 |
#if defined(TARGET_I386)
|
564 |
/* restore flags in standard format */
|
565 |
#ifdef reg_EAX
|
566 |
env->regs[R_EAX] = EAX; |
567 |
#endif
|
568 |
#ifdef reg_EBX
|
569 |
env->regs[R_EBX] = EBX; |
570 |
#endif
|
571 |
#ifdef reg_ECX
|
572 |
env->regs[R_ECX] = ECX; |
573 |
#endif
|
574 |
#ifdef reg_EDX
|
575 |
env->regs[R_EDX] = EDX; |
576 |
#endif
|
577 |
#ifdef reg_ESI
|
578 |
env->regs[R_ESI] = ESI; |
579 |
#endif
|
580 |
#ifdef reg_EDI
|
581 |
env->regs[R_EDI] = EDI; |
582 |
#endif
|
583 |
#ifdef reg_EBP
|
584 |
env->regs[R_EBP] = EBP; |
585 |
#endif
|
586 |
#ifdef reg_ESP
|
587 |
env->regs[R_ESP] = ESP; |
588 |
#endif
|
589 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
590 |
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); |
591 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
592 |
#elif defined(TARGET_ARM)
|
593 |
cpu_dump_state(env, logfile, fprintf, 0);
|
594 |
#elif defined(TARGET_SPARC)
|
595 |
REGWPTR = env->regbase + (env->cwp * 16);
|
596 |
env->regwptr = REGWPTR; |
597 |
cpu_dump_state(env, logfile, fprintf, 0);
|
598 |
#elif defined(TARGET_PPC)
|
599 |
cpu_dump_state(env, logfile, fprintf, 0);
|
600 |
#elif defined(TARGET_M68K)
|
601 |
cpu_m68k_flush_flags(env, env->cc_op); |
602 |
env->cc_op = CC_OP_FLAGS; |
603 |
env->sr = (env->sr & 0xffe0)
|
604 |
| env->cc_dest | (env->cc_x << 4);
|
605 |
cpu_dump_state(env, logfile, fprintf, 0);
|
606 |
#elif defined(TARGET_MIPS)
|
607 |
cpu_dump_state(env, logfile, fprintf, 0);
|
608 |
#elif defined(TARGET_SH4)
|
609 |
cpu_dump_state(env, logfile, fprintf, 0);
|
610 |
#elif defined(TARGET_ALPHA)
|
611 |
cpu_dump_state(env, logfile, fprintf, 0);
|
612 |
#else
|
613 |
#error unsupported target CPU
|
614 |
#endif
|
615 |
} |
616 |
#endif
|
617 |
tb = tb_find_fast(); |
618 |
#ifdef DEBUG_EXEC
|
619 |
if ((loglevel & CPU_LOG_EXEC)) {
|
620 |
fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", |
621 |
(long)tb->tc_ptr, tb->pc,
|
622 |
lookup_symbol(tb->pc)); |
623 |
} |
624 |
#endif
|
625 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
626 |
T0 = tmp_T0; |
627 |
#endif
|
628 |
/* see if we can patch the calling TB. When the TB
|
629 |
spans two pages, we cannot safely do a direct
|
630 |
jump. */
|
631 |
{ |
632 |
if (T0 != 0 && |
633 |
#if USE_KQEMU
|
634 |
(env->kqemu_enabled != 2) &&
|
635 |
#endif
|
636 |
tb->page_addr[1] == -1 |
637 |
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
|
638 |
&& (tb->cflags & CF_CODE_COPY) == |
639 |
(((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
|
640 |
#endif
|
641 |
) { |
642 |
spin_lock(&tb_lock); |
643 |
tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb); |
644 |
#if defined(USE_CODE_COPY)
|
645 |
/* propagates the FP use info */
|
646 |
((TranslationBlock *)(T0 & ~3))->cflags |=
|
647 |
(tb->cflags & CF_FP_USED); |
648 |
#endif
|
649 |
spin_unlock(&tb_lock); |
650 |
} |
651 |
} |
652 |
tc_ptr = tb->tc_ptr; |
653 |
env->current_tb = tb; |
654 |
/* execute the generated code */
|
655 |
gen_func = (void *)tc_ptr;
|
656 |
#if defined(__sparc__)
|
657 |
__asm__ __volatile__("call %0\n\t"
|
658 |
"mov %%o7,%%i0"
|
659 |
: /* no outputs */
|
660 |
: "r" (gen_func)
|
661 |
: "i0", "i1", "i2", "i3", "i4", "i5", |
662 |
"o0", "o1", "o2", "o3", "o4", "o5", |
663 |
"l0", "l1", "l2", "l3", "l4", "l5", |
664 |
"l6", "l7"); |
665 |
#elif defined(__arm__)
|
666 |
asm volatile ("mov pc, %0\n\t" |
667 |
".global exec_loop\n\t"
|
668 |
"exec_loop:\n\t"
|
669 |
: /* no outputs */
|
670 |
: "r" (gen_func)
|
671 |
: "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); |
672 |
#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
|
673 |
{ |
674 |
if (!(tb->cflags & CF_CODE_COPY)) {
|
675 |
if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
|
676 |
save_native_fp_state(env); |
677 |
} |
678 |
gen_func(); |
679 |
} else {
|
680 |
if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
|
681 |
restore_native_fp_state(env); |
682 |
} |
683 |
/* we work with native eflags */
|
684 |
CC_SRC = cc_table[CC_OP].compute_all(); |
685 |
CC_OP = CC_OP_EFLAGS; |
686 |
asm(".globl exec_loop\n" |
687 |
"\n"
|
688 |
"debug1:\n"
|
689 |
" pushl %%ebp\n"
|
690 |
" fs movl %10, %9\n"
|
691 |
" fs movl %11, %%eax\n"
|
692 |
" andl $0x400, %%eax\n"
|
693 |
" fs orl %8, %%eax\n"
|
694 |
" pushl %%eax\n"
|
695 |
" popf\n"
|
696 |
" fs movl %%esp, %12\n"
|
697 |
" fs movl %0, %%eax\n"
|
698 |
" fs movl %1, %%ecx\n"
|
699 |
" fs movl %2, %%edx\n"
|
700 |
" fs movl %3, %%ebx\n"
|
701 |
" fs movl %4, %%esp\n"
|
702 |
" fs movl %5, %%ebp\n"
|
703 |
" fs movl %6, %%esi\n"
|
704 |
" fs movl %7, %%edi\n"
|
705 |
" fs jmp *%9\n"
|
706 |
"exec_loop:\n"
|
707 |
" fs movl %%esp, %4\n"
|
708 |
" fs movl %12, %%esp\n"
|
709 |
" fs movl %%eax, %0\n"
|
710 |
" fs movl %%ecx, %1\n"
|
711 |
" fs movl %%edx, %2\n"
|
712 |
" fs movl %%ebx, %3\n"
|
713 |
" fs movl %%ebp, %5\n"
|
714 |
" fs movl %%esi, %6\n"
|
715 |
" fs movl %%edi, %7\n"
|
716 |
" pushf\n"
|
717 |
" popl %%eax\n"
|
718 |
" movl %%eax, %%ecx\n"
|
719 |
" andl $0x400, %%ecx\n"
|
720 |
" shrl $9, %%ecx\n"
|
721 |
" andl $0x8d5, %%eax\n"
|
722 |
" fs movl %%eax, %8\n"
|
723 |
" movl $1, %%eax\n"
|
724 |
" subl %%ecx, %%eax\n"
|
725 |
" fs movl %%eax, %11\n"
|
726 |
" fs movl %9, %%ebx\n" /* get T0 value */ |
727 |
" popl %%ebp\n"
|
728 |
: |
729 |
: "m" (*(uint8_t *)offsetof(CPUState, regs[0])), |
730 |
"m" (*(uint8_t *)offsetof(CPUState, regs[1])), |
731 |
"m" (*(uint8_t *)offsetof(CPUState, regs[2])), |
732 |
"m" (*(uint8_t *)offsetof(CPUState, regs[3])), |
733 |
"m" (*(uint8_t *)offsetof(CPUState, regs[4])), |
734 |
"m" (*(uint8_t *)offsetof(CPUState, regs[5])), |
735 |
"m" (*(uint8_t *)offsetof(CPUState, regs[6])), |
736 |
"m" (*(uint8_t *)offsetof(CPUState, regs[7])), |
737 |
"m" (*(uint8_t *)offsetof(CPUState, cc_src)),
|
738 |
"m" (*(uint8_t *)offsetof(CPUState, tmp0)),
|
739 |
"a" (gen_func),
|
740 |
"m" (*(uint8_t *)offsetof(CPUState, df)),
|
741 |
"m" (*(uint8_t *)offsetof(CPUState, saved_esp))
|
742 |
: "%ecx", "%edx" |
743 |
); |
744 |
} |
745 |
} |
746 |
#elif defined(__ia64)
|
747 |
struct fptr {
|
748 |
void *ip;
|
749 |
void *gp;
|
750 |
} fp; |
751 |
|
752 |
fp.ip = tc_ptr; |
753 |
fp.gp = code_gen_buffer + 2 * (1 << 20); |
754 |
(*(void (*)(void)) &fp)(); |
755 |
#else
|
756 |
gen_func(); |
757 |
#endif
|
758 |
env->current_tb = NULL;
|
759 |
/* reset soft MMU for next block (it can currently
|
760 |
only be set by a memory fault) */
|
761 |
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
|
762 |
if (env->hflags & HF_SOFTMMU_MASK) {
|
763 |
env->hflags &= ~HF_SOFTMMU_MASK; |
764 |
/* do not allow linking to another block */
|
765 |
T0 = 0;
|
766 |
} |
767 |
#endif
|
768 |
#if defined(USE_KQEMU)
|
769 |
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000) |
770 |
if (kqemu_is_ok(env) &&
|
771 |
(cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) { |
772 |
cpu_loop_exit(); |
773 |
} |
774 |
#endif
|
775 |
} |
776 |
} else {
|
777 |
env_to_regs(); |
778 |
} |
779 |
} /* for(;;) */
|
780 |
|
781 |
|
782 |
#if defined(TARGET_I386)
|
783 |
#if defined(USE_CODE_COPY)
|
784 |
if (env->native_fp_regs) {
|
785 |
save_native_fp_state(env); |
786 |
} |
787 |
#endif
|
788 |
/* restore flags in standard format */
|
789 |
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
790 |
#elif defined(TARGET_ARM)
|
791 |
/* XXX: Save/restore host fpu exception state?. */
|
792 |
#elif defined(TARGET_SPARC)
|
793 |
#if defined(reg_REGWPTR)
|
794 |
REGWPTR = saved_regwptr; |
795 |
#endif
|
796 |
#elif defined(TARGET_PPC)
|
797 |
#elif defined(TARGET_M68K)
|
798 |
cpu_m68k_flush_flags(env, env->cc_op); |
799 |
env->cc_op = CC_OP_FLAGS; |
800 |
env->sr = (env->sr & 0xffe0)
|
801 |
| env->cc_dest | (env->cc_x << 4);
|
802 |
#elif defined(TARGET_MIPS)
|
803 |
#elif defined(TARGET_SH4)
|
804 |
#elif defined(TARGET_ALPHA)
|
805 |
/* XXXXX */
|
806 |
#else
|
807 |
#error unsupported target CPU
|
808 |
#endif
|
809 |
|
810 |
/* restore global registers */
|
811 |
#if defined(__sparc__) && !defined(HOST_SOLARIS)
|
812 |
asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); |
813 |
#endif
|
814 |
#include "hostregs_helper.h" |
815 |
|
816 |
/* fail safe : never use cpu_single_env outside cpu_exec() */
|
817 |
cpu_single_env = NULL;
|
818 |
return ret;
|
819 |
} |
820 |
|
821 |
/* must only be called from the generated code as an exception can be
|
822 |
generated */
|
823 |
void tb_invalidate_page_range(target_ulong start, target_ulong end)
|
824 |
{ |
825 |
/* XXX: cannot enable it yet because it yields to MMU exception
|
826 |
where NIP != read address on PowerPC */
|
827 |
#if 0
|
828 |
target_ulong phys_addr;
|
829 |
phys_addr = get_phys_addr_code(env, start);
|
830 |
tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
|
831 |
#endif
|
832 |
} |
833 |
|
834 |
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
|
835 |
|
836 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) |
837 |
{ |
838 |
CPUX86State *saved_env; |
839 |
|
840 |
saved_env = env; |
841 |
env = s; |
842 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { |
843 |
selector &= 0xffff;
|
844 |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
845 |
(selector << 4), 0xffff, 0); |
846 |
} else {
|
847 |
load_seg(seg_reg, selector); |
848 |
} |
849 |
env = saved_env; |
850 |
} |
851 |
|
852 |
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32) |
853 |
{ |
854 |
CPUX86State *saved_env; |
855 |
|
856 |
saved_env = env; |
857 |
env = s; |
858 |
|
859 |
helper_fsave((target_ulong)ptr, data32); |
860 |
|
861 |
env = saved_env; |
862 |
} |
863 |
|
864 |
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) |
865 |
{ |
866 |
CPUX86State *saved_env; |
867 |
|
868 |
saved_env = env; |
869 |
env = s; |
870 |
|
871 |
helper_frstor((target_ulong)ptr, data32); |
872 |
|
873 |
env = saved_env; |
874 |
} |
875 |
|
876 |
#endif /* TARGET_I386 */ |
877 |
|
878 |
#if !defined(CONFIG_SOFTMMU)
|
879 |
|
880 |
#if defined(TARGET_I386)
|
881 |
|
882 |
/* 'pc' is the host PC at which the exception was raised. 'address' is
|
883 |
the effective address of the memory exception. 'is_write' is 1 if a
|
884 |
write caused the exception and otherwise 0'. 'old_set' is the
|
885 |
signal set which should be restored */
|
886 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
887 |
int is_write, sigset_t *old_set,
|
888 |
void *puc)
|
889 |
{ |
890 |
TranslationBlock *tb; |
891 |
int ret;
|
892 |
|
893 |
if (cpu_single_env)
|
894 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
895 |
#if defined(DEBUG_SIGNAL)
|
896 |
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
897 |
pc, address, is_write, *(unsigned long *)old_set); |
898 |
#endif
|
899 |
/* XXX: locking issue */
|
900 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
901 |
return 1; |
902 |
} |
903 |
|
904 |
/* see if it is an MMU fault */
|
905 |
ret = cpu_x86_handle_mmu_fault(env, address, is_write, |
906 |
((env->hflags & HF_CPL_MASK) == 3), 0); |
907 |
if (ret < 0) |
908 |
return 0; /* not an MMU fault */ |
909 |
if (ret == 0) |
910 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
911 |
/* now we have a real cpu fault */
|
912 |
tb = tb_find_pc(pc); |
913 |
if (tb) {
|
914 |
/* the PC is inside the translated code. It means that we have
|
915 |
a virtual CPU fault */
|
916 |
cpu_restore_state(tb, env, pc, puc); |
917 |
} |
918 |
if (ret == 1) { |
919 |
#if 0
|
920 |
printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
|
921 |
env->eip, env->cr[2], env->error_code);
|
922 |
#endif
|
923 |
/* we restore the process signal mask as the sigreturn should
|
924 |
do it (XXX: use sigsetjmp) */
|
925 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
926 |
raise_exception_err(env->exception_index, env->error_code); |
927 |
} else {
|
928 |
/* activate soft MMU for this block */
|
929 |
env->hflags |= HF_SOFTMMU_MASK; |
930 |
cpu_resume_from_signal(env, puc); |
931 |
} |
932 |
/* never comes here */
|
933 |
return 1; |
934 |
} |
935 |
|
936 |
#elif defined(TARGET_ARM)
|
937 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
938 |
int is_write, sigset_t *old_set,
|
939 |
void *puc)
|
940 |
{ |
941 |
TranslationBlock *tb; |
942 |
int ret;
|
943 |
|
944 |
if (cpu_single_env)
|
945 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
946 |
#if defined(DEBUG_SIGNAL)
|
947 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
948 |
pc, address, is_write, *(unsigned long *)old_set); |
949 |
#endif
|
950 |
/* XXX: locking issue */
|
951 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
952 |
return 1; |
953 |
} |
954 |
/* see if it is an MMU fault */
|
955 |
ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0); |
956 |
if (ret < 0) |
957 |
return 0; /* not an MMU fault */ |
958 |
if (ret == 0) |
959 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
960 |
/* now we have a real cpu fault */
|
961 |
tb = tb_find_pc(pc); |
962 |
if (tb) {
|
963 |
/* the PC is inside the translated code. It means that we have
|
964 |
a virtual CPU fault */
|
965 |
cpu_restore_state(tb, env, pc, puc); |
966 |
} |
967 |
/* we restore the process signal mask as the sigreturn should
|
968 |
do it (XXX: use sigsetjmp) */
|
969 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
970 |
cpu_loop_exit(); |
971 |
} |
972 |
#elif defined(TARGET_SPARC)
|
973 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
974 |
int is_write, sigset_t *old_set,
|
975 |
void *puc)
|
976 |
{ |
977 |
TranslationBlock *tb; |
978 |
int ret;
|
979 |
|
980 |
if (cpu_single_env)
|
981 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
982 |
#if defined(DEBUG_SIGNAL)
|
983 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
984 |
pc, address, is_write, *(unsigned long *)old_set); |
985 |
#endif
|
986 |
/* XXX: locking issue */
|
987 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
988 |
return 1; |
989 |
} |
990 |
/* see if it is an MMU fault */
|
991 |
ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0); |
992 |
if (ret < 0) |
993 |
return 0; /* not an MMU fault */ |
994 |
if (ret == 0) |
995 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
996 |
/* now we have a real cpu fault */
|
997 |
tb = tb_find_pc(pc); |
998 |
if (tb) {
|
999 |
/* the PC is inside the translated code. It means that we have
|
1000 |
a virtual CPU fault */
|
1001 |
cpu_restore_state(tb, env, pc, puc); |
1002 |
} |
1003 |
/* we restore the process signal mask as the sigreturn should
|
1004 |
do it (XXX: use sigsetjmp) */
|
1005 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
1006 |
cpu_loop_exit(); |
1007 |
} |
1008 |
#elif defined (TARGET_PPC)
|
1009 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
1010 |
int is_write, sigset_t *old_set,
|
1011 |
void *puc)
|
1012 |
{ |
1013 |
TranslationBlock *tb; |
1014 |
int ret;
|
1015 |
|
1016 |
if (cpu_single_env)
|
1017 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
1018 |
#if defined(DEBUG_SIGNAL)
|
1019 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
1020 |
pc, address, is_write, *(unsigned long *)old_set); |
1021 |
#endif
|
1022 |
/* XXX: locking issue */
|
1023 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
1024 |
return 1; |
1025 |
} |
1026 |
|
1027 |
/* see if it is an MMU fault */
|
1028 |
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
|
1029 |
if (ret < 0) |
1030 |
return 0; /* not an MMU fault */ |
1031 |
if (ret == 0) |
1032 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
1033 |
|
1034 |
/* now we have a real cpu fault */
|
1035 |
tb = tb_find_pc(pc); |
1036 |
if (tb) {
|
1037 |
/* the PC is inside the translated code. It means that we have
|
1038 |
a virtual CPU fault */
|
1039 |
cpu_restore_state(tb, env, pc, puc); |
1040 |
} |
1041 |
if (ret == 1) { |
1042 |
#if 0
|
1043 |
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
|
1044 |
env->nip, env->error_code, tb);
|
1045 |
#endif
|
1046 |
/* we restore the process signal mask as the sigreturn should
|
1047 |
do it (XXX: use sigsetjmp) */
|
1048 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
1049 |
do_raise_exception_err(env->exception_index, env->error_code); |
1050 |
} else {
|
1051 |
/* activate soft MMU for this block */
|
1052 |
cpu_resume_from_signal(env, puc); |
1053 |
} |
1054 |
/* never comes here */
|
1055 |
return 1; |
1056 |
} |
1057 |
|
1058 |
#elif defined(TARGET_M68K)
|
1059 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
1060 |
int is_write, sigset_t *old_set,
|
1061 |
void *puc)
|
1062 |
{ |
1063 |
TranslationBlock *tb; |
1064 |
int ret;
|
1065 |
|
1066 |
if (cpu_single_env)
|
1067 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
1068 |
#if defined(DEBUG_SIGNAL)
|
1069 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
1070 |
pc, address, is_write, *(unsigned long *)old_set); |
1071 |
#endif
|
1072 |
/* XXX: locking issue */
|
1073 |
if (is_write && page_unprotect(address, pc, puc)) {
|
1074 |
return 1; |
1075 |
} |
1076 |
/* see if it is an MMU fault */
|
1077 |
ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0); |
1078 |
if (ret < 0) |
1079 |
return 0; /* not an MMU fault */ |
1080 |
if (ret == 0) |
1081 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
1082 |
/* now we have a real cpu fault */
|
1083 |
tb = tb_find_pc(pc); |
1084 |
if (tb) {
|
1085 |
/* the PC is inside the translated code. It means that we have
|
1086 |
a virtual CPU fault */
|
1087 |
cpu_restore_state(tb, env, pc, puc); |
1088 |
} |
1089 |
/* we restore the process signal mask as the sigreturn should
|
1090 |
do it (XXX: use sigsetjmp) */
|
1091 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
1092 |
cpu_loop_exit(); |
1093 |
/* never comes here */
|
1094 |
return 1; |
1095 |
} |
1096 |
|
1097 |
#elif defined (TARGET_MIPS)
|
1098 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
1099 |
int is_write, sigset_t *old_set,
|
1100 |
void *puc)
|
1101 |
{ |
1102 |
TranslationBlock *tb; |
1103 |
int ret;
|
1104 |
|
1105 |
if (cpu_single_env)
|
1106 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
1107 |
#if defined(DEBUG_SIGNAL)
|
1108 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
1109 |
pc, address, is_write, *(unsigned long *)old_set); |
1110 |
#endif
|
1111 |
/* XXX: locking issue */
|
1112 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
1113 |
return 1; |
1114 |
} |
1115 |
|
1116 |
/* see if it is an MMU fault */
|
1117 |
ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0); |
1118 |
if (ret < 0) |
1119 |
return 0; /* not an MMU fault */ |
1120 |
if (ret == 0) |
1121 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
1122 |
|
1123 |
/* now we have a real cpu fault */
|
1124 |
tb = tb_find_pc(pc); |
1125 |
if (tb) {
|
1126 |
/* the PC is inside the translated code. It means that we have
|
1127 |
a virtual CPU fault */
|
1128 |
cpu_restore_state(tb, env, pc, puc); |
1129 |
} |
1130 |
if (ret == 1) { |
1131 |
#if 0
|
1132 |
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
|
1133 |
env->nip, env->error_code, tb);
|
1134 |
#endif
|
1135 |
/* we restore the process signal mask as the sigreturn should
|
1136 |
do it (XXX: use sigsetjmp) */
|
1137 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
1138 |
do_raise_exception_err(env->exception_index, env->error_code); |
1139 |
} else {
|
1140 |
/* activate soft MMU for this block */
|
1141 |
cpu_resume_from_signal(env, puc); |
1142 |
} |
1143 |
/* never comes here */
|
1144 |
return 1; |
1145 |
} |
1146 |
|
1147 |
#elif defined (TARGET_SH4)
|
1148 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
1149 |
int is_write, sigset_t *old_set,
|
1150 |
void *puc)
|
1151 |
{ |
1152 |
TranslationBlock *tb; |
1153 |
int ret;
|
1154 |
|
1155 |
if (cpu_single_env)
|
1156 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
1157 |
#if defined(DEBUG_SIGNAL)
|
1158 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
1159 |
pc, address, is_write, *(unsigned long *)old_set); |
1160 |
#endif
|
1161 |
/* XXX: locking issue */
|
1162 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
1163 |
return 1; |
1164 |
} |
1165 |
|
1166 |
/* see if it is an MMU fault */
|
1167 |
ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0); |
1168 |
if (ret < 0) |
1169 |
return 0; /* not an MMU fault */ |
1170 |
if (ret == 0) |
1171 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
1172 |
|
1173 |
/* now we have a real cpu fault */
|
1174 |
tb = tb_find_pc(pc); |
1175 |
if (tb) {
|
1176 |
/* the PC is inside the translated code. It means that we have
|
1177 |
a virtual CPU fault */
|
1178 |
cpu_restore_state(tb, env, pc, puc); |
1179 |
} |
1180 |
#if 0
|
1181 |
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
|
1182 |
env->nip, env->error_code, tb);
|
1183 |
#endif
|
1184 |
/* we restore the process signal mask as the sigreturn should
|
1185 |
do it (XXX: use sigsetjmp) */
|
1186 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
1187 |
cpu_loop_exit(); |
1188 |
/* never comes here */
|
1189 |
return 1; |
1190 |
} |
1191 |
|
1192 |
#elif defined (TARGET_ALPHA)
|
1193 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
1194 |
int is_write, sigset_t *old_set,
|
1195 |
void *puc)
|
1196 |
{ |
1197 |
TranslationBlock *tb; |
1198 |
int ret;
|
1199 |
|
1200 |
if (cpu_single_env)
|
1201 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
1202 |
#if defined(DEBUG_SIGNAL)
|
1203 |
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
1204 |
pc, address, is_write, *(unsigned long *)old_set); |
1205 |
#endif
|
1206 |
/* XXX: locking issue */
|
1207 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
1208 |
return 1; |
1209 |
} |
1210 |
|
1211 |
/* see if it is an MMU fault */
|
1212 |
ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0); |
1213 |
if (ret < 0) |
1214 |
return 0; /* not an MMU fault */ |
1215 |
if (ret == 0) |
1216 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
1217 |
|
1218 |
/* now we have a real cpu fault */
|
1219 |
tb = tb_find_pc(pc); |
1220 |
if (tb) {
|
1221 |
/* the PC is inside the translated code. It means that we have
|
1222 |
a virtual CPU fault */
|
1223 |
cpu_restore_state(tb, env, pc, puc); |
1224 |
} |
1225 |
#if 0
|
1226 |
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
|
1227 |
env->nip, env->error_code, tb);
|
1228 |
#endif
|
1229 |
/* we restore the process signal mask as the sigreturn should
|
1230 |
do it (XXX: use sigsetjmp) */
|
1231 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
1232 |
cpu_loop_exit(); |
1233 |
/* never comes here */
|
1234 |
return 1; |
1235 |
} |
1236 |
#else
|
1237 |
#error unsupported target CPU
|
1238 |
#endif
|
1239 |
|
1240 |
#if defined(__i386__)
|
1241 |
|
1242 |
#if defined(__APPLE__)
|
1243 |
# include <sys/ucontext.h> |
1244 |
|
1245 |
# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip)) |
1246 |
# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
|
1247 |
# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
|
1248 |
#else
|
1249 |
# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
|
1250 |
# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
|
1251 |
# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
|
1252 |
#endif
|
1253 |
|
1254 |
#if defined(USE_CODE_COPY)
|
1255 |
static void cpu_send_trap(unsigned long pc, int trap, |
1256 |
struct ucontext *uc)
|
1257 |
{ |
1258 |
TranslationBlock *tb; |
1259 |
|
1260 |
if (cpu_single_env)
|
1261 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
1262 |
/* now we have a real cpu fault */
|
1263 |
tb = tb_find_pc(pc); |
1264 |
if (tb) {
|
1265 |
/* the PC is inside the translated code. It means that we have
|
1266 |
a virtual CPU fault */
|
1267 |
cpu_restore_state(tb, env, pc, uc); |
1268 |
} |
1269 |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
1270 |
raise_exception_err(trap, env->error_code); |
1271 |
} |
1272 |
#endif
|
1273 |
|
1274 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1275 |
void *puc)
|
1276 |
{ |
1277 |
siginfo_t *info = pinfo; |
1278 |
struct ucontext *uc = puc;
|
1279 |
unsigned long pc; |
1280 |
int trapno;
|
1281 |
|
1282 |
#ifndef REG_EIP
|
1283 |
/* for glibc 2.1 */
|
1284 |
#define REG_EIP EIP
|
1285 |
#define REG_ERR ERR
|
1286 |
#define REG_TRAPNO TRAPNO
|
1287 |
#endif
|
1288 |
pc = EIP_sig(uc); |
1289 |
trapno = TRAP_sig(uc); |
1290 |
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
|
1291 |
if (trapno == 0x00 || trapno == 0x05) { |
1292 |
/* send division by zero or bound exception */
|
1293 |
cpu_send_trap(pc, trapno, uc); |
1294 |
return 1; |
1295 |
} else
|
1296 |
#endif
|
1297 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1298 |
trapno == 0xe ?
|
1299 |
(ERROR_sig(uc) >> 1) & 1 : 0, |
1300 |
&uc->uc_sigmask, puc); |
1301 |
} |
1302 |
|
1303 |
#elif defined(__x86_64__)
|
1304 |
|
1305 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1306 |
void *puc)
|
1307 |
{ |
1308 |
siginfo_t *info = pinfo; |
1309 |
struct ucontext *uc = puc;
|
1310 |
unsigned long pc; |
1311 |
|
1312 |
pc = uc->uc_mcontext.gregs[REG_RIP]; |
1313 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1314 |
uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
|
1315 |
(uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, |
1316 |
&uc->uc_sigmask, puc); |
1317 |
} |
1318 |
|
1319 |
#elif defined(__powerpc__)
|
1320 |
|
1321 |
/***********************************************************************
|
1322 |
* signal context platform-specific definitions
|
1323 |
* From Wine
|
1324 |
*/
|
1325 |
#ifdef linux
|
1326 |
/* All Registers access - only for local access */
|
1327 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
|
1328 |
/* Gpr Registers access */
|
1329 |
# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
|
1330 |
# define IAR_sig(context) REG_sig(nip, context) /* Program counter */ |
1331 |
# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */ |
1332 |
# define CTR_sig(context) REG_sig(ctr, context) /* Count register */ |
1333 |
# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */ |
1334 |
# define LR_sig(context) REG_sig(link, context) /* Link register */ |
1335 |
# define CR_sig(context) REG_sig(ccr, context) /* Condition register */ |
1336 |
/* Float Registers access */
|
1337 |
# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num]) |
1338 |
# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4))) |
1339 |
/* Exception Registers access */
|
1340 |
# define DAR_sig(context) REG_sig(dar, context)
|
1341 |
# define DSISR_sig(context) REG_sig(dsisr, context)
|
1342 |
# define TRAP_sig(context) REG_sig(trap, context)
|
1343 |
#endif /* linux */ |
1344 |
|
1345 |
#ifdef __APPLE__
|
1346 |
# include <sys/ucontext.h> |
1347 |
typedef struct ucontext SIGCONTEXT; |
1348 |
/* All Registers access - only for local access */
|
1349 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
|
1350 |
# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
|
1351 |
# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
|
1352 |
# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
|
1353 |
/* Gpr Registers access */
|
1354 |
# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) |
1355 |
# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */ |
1356 |
# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */ |
1357 |
# define CTR_sig(context) REG_sig(ctr, context)
|
1358 |
# define XER_sig(context) REG_sig(xer, context) /* Link register */ |
1359 |
# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */ |
1360 |
# define CR_sig(context) REG_sig(cr, context) /* Condition register */ |
1361 |
/* Float Registers access */
|
1362 |
# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
|
1363 |
# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context)) |
1364 |
/* Exception Registers access */
|
1365 |
# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */ |
1366 |
# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
|
1367 |
# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */ |
1368 |
#endif /* __APPLE__ */ |
1369 |
|
1370 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1371 |
void *puc)
|
1372 |
{ |
1373 |
siginfo_t *info = pinfo; |
1374 |
struct ucontext *uc = puc;
|
1375 |
unsigned long pc; |
1376 |
int is_write;
|
1377 |
|
1378 |
pc = IAR_sig(uc); |
1379 |
is_write = 0;
|
1380 |
#if 0
|
1381 |
/* ppc 4xx case */
|
1382 |
if (DSISR_sig(uc) & 0x00800000)
|
1383 |
is_write = 1;
|
1384 |
#else
|
1385 |
if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) |
1386 |
is_write = 1;
|
1387 |
#endif
|
1388 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1389 |
is_write, &uc->uc_sigmask, puc); |
1390 |
} |
1391 |
|
1392 |
#elif defined(__alpha__)
|
1393 |
|
1394 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1395 |
void *puc)
|
1396 |
{ |
1397 |
siginfo_t *info = pinfo; |
1398 |
struct ucontext *uc = puc;
|
1399 |
uint32_t *pc = uc->uc_mcontext.sc_pc; |
1400 |
uint32_t insn = *pc; |
1401 |
int is_write = 0; |
1402 |
|
1403 |
/* XXX: need kernel patch to get write flag faster */
|
1404 |
switch (insn >> 26) { |
1405 |
case 0x0d: // stw |
1406 |
case 0x0e: // stb |
1407 |
case 0x0f: // stq_u |
1408 |
case 0x24: // stf |
1409 |
case 0x25: // stg |
1410 |
case 0x26: // sts |
1411 |
case 0x27: // stt |
1412 |
case 0x2c: // stl |
1413 |
case 0x2d: // stq |
1414 |
case 0x2e: // stl_c |
1415 |
case 0x2f: // stq_c |
1416 |
is_write = 1;
|
1417 |
} |
1418 |
|
1419 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1420 |
is_write, &uc->uc_sigmask, puc); |
1421 |
} |
1422 |
#elif defined(__sparc__)
|
1423 |
|
1424 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1425 |
void *puc)
|
1426 |
{ |
1427 |
siginfo_t *info = pinfo; |
1428 |
uint32_t *regs = (uint32_t *)(info + 1);
|
1429 |
void *sigmask = (regs + 20); |
1430 |
unsigned long pc; |
1431 |
int is_write;
|
1432 |
uint32_t insn; |
1433 |
|
1434 |
/* XXX: is there a standard glibc define ? */
|
1435 |
pc = regs[1];
|
1436 |
/* XXX: need kernel patch to get write flag faster */
|
1437 |
is_write = 0;
|
1438 |
insn = *(uint32_t *)pc; |
1439 |
if ((insn >> 30) == 3) { |
1440 |
switch((insn >> 19) & 0x3f) { |
1441 |
case 0x05: // stb |
1442 |
case 0x06: // sth |
1443 |
case 0x04: // st |
1444 |
case 0x07: // std |
1445 |
case 0x24: // stf |
1446 |
case 0x27: // stdf |
1447 |
case 0x25: // stfsr |
1448 |
is_write = 1;
|
1449 |
break;
|
1450 |
} |
1451 |
} |
1452 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1453 |
is_write, sigmask, NULL);
|
1454 |
} |
1455 |
|
1456 |
#elif defined(__arm__)
|
1457 |
|
1458 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1459 |
void *puc)
|
1460 |
{ |
1461 |
siginfo_t *info = pinfo; |
1462 |
struct ucontext *uc = puc;
|
1463 |
unsigned long pc; |
1464 |
int is_write;
|
1465 |
|
1466 |
pc = uc->uc_mcontext.gregs[R15]; |
1467 |
/* XXX: compute is_write */
|
1468 |
is_write = 0;
|
1469 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1470 |
is_write, |
1471 |
&uc->uc_sigmask, puc); |
1472 |
} |
1473 |
|
1474 |
#elif defined(__mc68000)
|
1475 |
|
1476 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1477 |
void *puc)
|
1478 |
{ |
1479 |
siginfo_t *info = pinfo; |
1480 |
struct ucontext *uc = puc;
|
1481 |
unsigned long pc; |
1482 |
int is_write;
|
1483 |
|
1484 |
pc = uc->uc_mcontext.gregs[16];
|
1485 |
/* XXX: compute is_write */
|
1486 |
is_write = 0;
|
1487 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1488 |
is_write, |
1489 |
&uc->uc_sigmask, puc); |
1490 |
} |
1491 |
|
1492 |
#elif defined(__ia64)
|
1493 |
|
1494 |
#ifndef __ISR_VALID
|
1495 |
/* This ought to be in <bits/siginfo.h>... */
|
1496 |
# define __ISR_VALID 1 |
1497 |
#endif
|
1498 |
|
1499 |
int cpu_signal_handler(int host_signum, void *pinfo, void *puc) |
1500 |
{ |
1501 |
siginfo_t *info = pinfo; |
1502 |
struct ucontext *uc = puc;
|
1503 |
unsigned long ip; |
1504 |
int is_write = 0; |
1505 |
|
1506 |
ip = uc->uc_mcontext.sc_ip; |
1507 |
switch (host_signum) {
|
1508 |
case SIGILL:
|
1509 |
case SIGFPE:
|
1510 |
case SIGSEGV:
|
1511 |
case SIGBUS:
|
1512 |
case SIGTRAP:
|
1513 |
if (info->si_code && (info->si_segvflags & __ISR_VALID))
|
1514 |
/* ISR.W (write-access) is bit 33: */
|
1515 |
is_write = (info->si_isr >> 33) & 1; |
1516 |
break;
|
1517 |
|
1518 |
default:
|
1519 |
break;
|
1520 |
} |
1521 |
return handle_cpu_signal(ip, (unsigned long)info->si_addr, |
1522 |
is_write, |
1523 |
&uc->uc_sigmask, puc); |
1524 |
} |
1525 |
|
1526 |
#elif defined(__s390__)
|
1527 |
|
1528 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1529 |
void *puc)
|
1530 |
{ |
1531 |
siginfo_t *info = pinfo; |
1532 |
struct ucontext *uc = puc;
|
1533 |
unsigned long pc; |
1534 |
int is_write;
|
1535 |
|
1536 |
pc = uc->uc_mcontext.psw.addr; |
1537 |
/* XXX: compute is_write */
|
1538 |
is_write = 0;
|
1539 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1540 |
is_write, |
1541 |
&uc->uc_sigmask, puc); |
1542 |
} |
1543 |
|
1544 |
#else
|
1545 |
|
1546 |
#error host CPU specific signal handler needed
|
1547 |
|
1548 |
#endif
|
1549 |
|
1550 |
#endif /* !defined(CONFIG_SOFTMMU) */ |