root / cpu-exec.c @ 27a41543
History | View | Annotate | Download (44.1 kB)
1 |
/*
|
---|---|
2 |
* i386 emulator main execution loop
|
3 |
*
|
4 |
* Copyright (c) 2003-2005 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
#include "config.h" |
20 |
#include "exec.h" |
21 |
#include "disas.h" |
22 |
#include "tcg.h" |
23 |
#include "kvm.h" |
24 |
#include "qemu-barrier.h" |
25 |
|
26 |
#if !defined(CONFIG_SOFTMMU)
|
27 |
#undef EAX
|
28 |
#undef ECX
|
29 |
#undef EDX
|
30 |
#undef EBX
|
31 |
#undef ESP
|
32 |
#undef EBP
|
33 |
#undef ESI
|
34 |
#undef EDI
|
35 |
#undef EIP
|
36 |
#include <signal.h> |
37 |
#ifdef __linux__
|
38 |
#include <sys/ucontext.h> |
39 |
#endif
|
40 |
#endif
|
41 |
|
42 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
43 |
// Work around ugly bugs in glibc that mangle global register contents
|
44 |
#undef env
|
45 |
#define env cpu_single_env
|
46 |
#endif
|
47 |
|
48 |
int tb_invalidated_flag;
|
49 |
|
50 |
//#define CONFIG_DEBUG_EXEC
|
51 |
//#define DEBUG_SIGNAL
|
52 |
|
53 |
int qemu_cpu_has_work(CPUState *env)
|
54 |
{ |
55 |
return cpu_has_work(env);
|
56 |
} |
57 |
|
58 |
void cpu_loop_exit(void) |
59 |
{ |
60 |
env->current_tb = NULL;
|
61 |
longjmp(env->jmp_env, 1);
|
62 |
} |
63 |
|
64 |
/* exit the current TB from a signal handler. The host registers are
|
65 |
restored in a state compatible with the CPU emulator
|
66 |
*/
|
67 |
void cpu_resume_from_signal(CPUState *env1, void *puc) |
68 |
{ |
69 |
#if !defined(CONFIG_SOFTMMU)
|
70 |
#ifdef __linux__
|
71 |
struct ucontext *uc = puc;
|
72 |
#elif defined(__OpenBSD__)
|
73 |
struct sigcontext *uc = puc;
|
74 |
#endif
|
75 |
#endif
|
76 |
|
77 |
env = env1; |
78 |
|
79 |
/* XXX: restore cpu registers saved in host registers */
|
80 |
|
81 |
#if !defined(CONFIG_SOFTMMU)
|
82 |
if (puc) {
|
83 |
/* XXX: use siglongjmp ? */
|
84 |
#ifdef __linux__
|
85 |
#ifdef __ia64
|
86 |
sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
|
87 |
#else
|
88 |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
89 |
#endif
|
90 |
#elif defined(__OpenBSD__)
|
91 |
sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
|
92 |
#endif
|
93 |
} |
94 |
#endif
|
95 |
env->exception_index = -1;
|
96 |
longjmp(env->jmp_env, 1);
|
97 |
} |
98 |
|
99 |
/* Execute the code without caching the generated code. An interpreter
|
100 |
could be used if available. */
|
101 |
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb) |
102 |
{ |
103 |
unsigned long next_tb; |
104 |
TranslationBlock *tb; |
105 |
|
106 |
/* Should never happen.
|
107 |
We only end up here when an existing TB is too long. */
|
108 |
if (max_cycles > CF_COUNT_MASK)
|
109 |
max_cycles = CF_COUNT_MASK; |
110 |
|
111 |
tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, |
112 |
max_cycles); |
113 |
env->current_tb = tb; |
114 |
/* execute the generated code */
|
115 |
next_tb = tcg_qemu_tb_exec(tb->tc_ptr); |
116 |
env->current_tb = NULL;
|
117 |
|
118 |
if ((next_tb & 3) == 2) { |
119 |
/* Restore PC. This may happen if async event occurs before
|
120 |
the TB starts executing. */
|
121 |
cpu_pc_from_tb(env, tb); |
122 |
} |
123 |
tb_phys_invalidate(tb, -1);
|
124 |
tb_free(tb); |
125 |
} |
126 |
|
127 |
static TranslationBlock *tb_find_slow(target_ulong pc,
|
128 |
target_ulong cs_base, |
129 |
uint64_t flags) |
130 |
{ |
131 |
TranslationBlock *tb, **ptb1; |
132 |
unsigned int h; |
133 |
tb_page_addr_t phys_pc, phys_page1, phys_page2; |
134 |
target_ulong virt_page2; |
135 |
|
136 |
tb_invalidated_flag = 0;
|
137 |
|
138 |
/* find translated block using physical mappings */
|
139 |
phys_pc = get_page_addr_code(env, pc); |
140 |
phys_page1 = phys_pc & TARGET_PAGE_MASK; |
141 |
phys_page2 = -1;
|
142 |
h = tb_phys_hash_func(phys_pc); |
143 |
ptb1 = &tb_phys_hash[h]; |
144 |
for(;;) {
|
145 |
tb = *ptb1; |
146 |
if (!tb)
|
147 |
goto not_found;
|
148 |
if (tb->pc == pc &&
|
149 |
tb->page_addr[0] == phys_page1 &&
|
150 |
tb->cs_base == cs_base && |
151 |
tb->flags == flags) { |
152 |
/* check next page if needed */
|
153 |
if (tb->page_addr[1] != -1) { |
154 |
virt_page2 = (pc & TARGET_PAGE_MASK) + |
155 |
TARGET_PAGE_SIZE; |
156 |
phys_page2 = get_page_addr_code(env, virt_page2); |
157 |
if (tb->page_addr[1] == phys_page2) |
158 |
goto found;
|
159 |
} else {
|
160 |
goto found;
|
161 |
} |
162 |
} |
163 |
ptb1 = &tb->phys_hash_next; |
164 |
} |
165 |
not_found:
|
166 |
/* if no translated code available, then translate it now */
|
167 |
tb = tb_gen_code(env, pc, cs_base, flags, 0);
|
168 |
|
169 |
found:
|
170 |
/* Move the last found TB to the head of the list */
|
171 |
if (likely(*ptb1)) {
|
172 |
*ptb1 = tb->phys_hash_next; |
173 |
tb->phys_hash_next = tb_phys_hash[h]; |
174 |
tb_phys_hash[h] = tb; |
175 |
} |
176 |
/* we add the TB in the virtual pc hash table */
|
177 |
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; |
178 |
return tb;
|
179 |
} |
180 |
|
181 |
static inline TranslationBlock *tb_find_fast(void) |
182 |
{ |
183 |
TranslationBlock *tb; |
184 |
target_ulong cs_base, pc; |
185 |
int flags;
|
186 |
|
187 |
/* we record a subset of the CPU state. It will
|
188 |
always be the same before a given translated block
|
189 |
is executed. */
|
190 |
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
191 |
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; |
192 |
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
|
193 |
tb->flags != flags)) { |
194 |
tb = tb_find_slow(pc, cs_base, flags); |
195 |
} |
196 |
return tb;
|
197 |
} |
198 |
|
199 |
static CPUDebugExcpHandler *debug_excp_handler;
|
200 |
|
201 |
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) |
202 |
{ |
203 |
CPUDebugExcpHandler *old_handler = debug_excp_handler; |
204 |
|
205 |
debug_excp_handler = handler; |
206 |
return old_handler;
|
207 |
} |
208 |
|
209 |
static void cpu_handle_debug_exception(CPUState *env) |
210 |
{ |
211 |
CPUWatchpoint *wp; |
212 |
|
213 |
if (!env->watchpoint_hit) {
|
214 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
215 |
wp->flags &= ~BP_WATCHPOINT_HIT; |
216 |
} |
217 |
} |
218 |
if (debug_excp_handler) {
|
219 |
debug_excp_handler(env); |
220 |
} |
221 |
} |
222 |
|
223 |
/* main execution loop */
|
224 |
|
225 |
volatile sig_atomic_t exit_request;
|
226 |
|
227 |
int cpu_exec(CPUState *env1)
|
228 |
{ |
229 |
volatile host_reg_t saved_env_reg;
|
230 |
int ret, interrupt_request;
|
231 |
TranslationBlock *tb; |
232 |
uint8_t *tc_ptr; |
233 |
unsigned long next_tb; |
234 |
|
235 |
if (env1->halted) {
|
236 |
if (!cpu_has_work(env1)) {
|
237 |
return EXCP_HALTED;
|
238 |
} |
239 |
|
240 |
env1->halted = 0;
|
241 |
} |
242 |
|
243 |
cpu_single_env = env1; |
244 |
|
245 |
/* the access to env below is actually saving the global register's
|
246 |
value, so that files not including target-xyz/exec.h are free to
|
247 |
use it. */
|
248 |
QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env)); |
249 |
saved_env_reg = (host_reg_t) env; |
250 |
barrier(); |
251 |
env = env1; |
252 |
|
253 |
if (unlikely(exit_request)) {
|
254 |
env->exit_request = 1;
|
255 |
} |
256 |
|
257 |
#if defined(TARGET_I386)
|
258 |
/* put eflags in CPU temporary format */
|
259 |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
260 |
DF = 1 - (2 * ((env->eflags >> 10) & 1)); |
261 |
CC_OP = CC_OP_EFLAGS; |
262 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
263 |
#elif defined(TARGET_SPARC)
|
264 |
#elif defined(TARGET_M68K)
|
265 |
env->cc_op = CC_OP_FLAGS; |
266 |
env->cc_dest = env->sr & 0xf;
|
267 |
env->cc_x = (env->sr >> 4) & 1; |
268 |
#elif defined(TARGET_ALPHA)
|
269 |
#elif defined(TARGET_ARM)
|
270 |
#elif defined(TARGET_PPC)
|
271 |
#elif defined(TARGET_LM32)
|
272 |
#elif defined(TARGET_MICROBLAZE)
|
273 |
#elif defined(TARGET_MIPS)
|
274 |
#elif defined(TARGET_SH4)
|
275 |
#elif defined(TARGET_CRIS)
|
276 |
#elif defined(TARGET_S390X)
|
277 |
/* XXXXX */
|
278 |
#else
|
279 |
#error unsupported target CPU
|
280 |
#endif
|
281 |
env->exception_index = -1;
|
282 |
|
283 |
/* prepare setjmp context for exception handling */
|
284 |
for(;;) {
|
285 |
if (setjmp(env->jmp_env) == 0) { |
286 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
287 |
#undef env
|
288 |
env = cpu_single_env; |
289 |
#define env cpu_single_env
|
290 |
#endif
|
291 |
/* if an exception is pending, we execute it here */
|
292 |
if (env->exception_index >= 0) { |
293 |
if (env->exception_index >= EXCP_INTERRUPT) {
|
294 |
/* exit request from the cpu execution loop */
|
295 |
ret = env->exception_index; |
296 |
if (ret == EXCP_DEBUG) {
|
297 |
cpu_handle_debug_exception(env); |
298 |
} |
299 |
break;
|
300 |
} else {
|
301 |
#if defined(CONFIG_USER_ONLY)
|
302 |
/* if user mode only, we simulate a fake exception
|
303 |
which will be handled outside the cpu execution
|
304 |
loop */
|
305 |
#if defined(TARGET_I386)
|
306 |
do_interrupt_user(env->exception_index, |
307 |
env->exception_is_int, |
308 |
env->error_code, |
309 |
env->exception_next_eip); |
310 |
/* successfully delivered */
|
311 |
env->old_exception = -1;
|
312 |
#endif
|
313 |
ret = env->exception_index; |
314 |
break;
|
315 |
#else
|
316 |
#if defined(TARGET_I386)
|
317 |
/* simulate a real cpu exception. On i386, it can
|
318 |
trigger new exceptions, but we do not handle
|
319 |
double or triple faults yet. */
|
320 |
do_interrupt(env->exception_index, |
321 |
env->exception_is_int, |
322 |
env->error_code, |
323 |
env->exception_next_eip, 0);
|
324 |
/* successfully delivered */
|
325 |
env->old_exception = -1;
|
326 |
#elif defined(TARGET_PPC)
|
327 |
do_interrupt(env); |
328 |
#elif defined(TARGET_LM32)
|
329 |
do_interrupt(env); |
330 |
#elif defined(TARGET_MICROBLAZE)
|
331 |
do_interrupt(env); |
332 |
#elif defined(TARGET_MIPS)
|
333 |
do_interrupt(env); |
334 |
#elif defined(TARGET_SPARC)
|
335 |
do_interrupt(env); |
336 |
#elif defined(TARGET_ARM)
|
337 |
do_interrupt(env); |
338 |
#elif defined(TARGET_SH4)
|
339 |
do_interrupt(env); |
340 |
#elif defined(TARGET_ALPHA)
|
341 |
do_interrupt(env); |
342 |
#elif defined(TARGET_CRIS)
|
343 |
do_interrupt(env); |
344 |
#elif defined(TARGET_M68K)
|
345 |
do_interrupt(0);
|
346 |
#endif
|
347 |
env->exception_index = -1;
|
348 |
#endif
|
349 |
} |
350 |
} |
351 |
|
352 |
next_tb = 0; /* force lookup of first TB */ |
353 |
for(;;) {
|
354 |
interrupt_request = env->interrupt_request; |
355 |
if (unlikely(interrupt_request)) {
|
356 |
if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
|
357 |
/* Mask out external interrupts for this step. */
|
358 |
interrupt_request &= ~(CPU_INTERRUPT_HARD | |
359 |
CPU_INTERRUPT_FIQ | |
360 |
CPU_INTERRUPT_SMI | |
361 |
CPU_INTERRUPT_NMI); |
362 |
} |
363 |
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
364 |
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
365 |
env->exception_index = EXCP_DEBUG; |
366 |
cpu_loop_exit(); |
367 |
} |
368 |
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
|
369 |
defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ |
370 |
defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) |
371 |
if (interrupt_request & CPU_INTERRUPT_HALT) {
|
372 |
env->interrupt_request &= ~CPU_INTERRUPT_HALT; |
373 |
env->halted = 1;
|
374 |
env->exception_index = EXCP_HLT; |
375 |
cpu_loop_exit(); |
376 |
} |
377 |
#endif
|
378 |
#if defined(TARGET_I386)
|
379 |
if (interrupt_request & CPU_INTERRUPT_INIT) {
|
380 |
svm_check_intercept(SVM_EXIT_INIT); |
381 |
do_cpu_init(env); |
382 |
env->exception_index = EXCP_HALTED; |
383 |
cpu_loop_exit(); |
384 |
} else if (interrupt_request & CPU_INTERRUPT_SIPI) { |
385 |
do_cpu_sipi(env); |
386 |
} else if (env->hflags2 & HF2_GIF_MASK) { |
387 |
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
388 |
!(env->hflags & HF_SMM_MASK)) { |
389 |
svm_check_intercept(SVM_EXIT_SMI); |
390 |
env->interrupt_request &= ~CPU_INTERRUPT_SMI; |
391 |
do_smm_enter(); |
392 |
next_tb = 0;
|
393 |
} else if ((interrupt_request & CPU_INTERRUPT_NMI) && |
394 |
!(env->hflags2 & HF2_NMI_MASK)) { |
395 |
env->interrupt_request &= ~CPU_INTERRUPT_NMI; |
396 |
env->hflags2 |= HF2_NMI_MASK; |
397 |
do_interrupt(EXCP02_NMI, 0, 0, 0, 1); |
398 |
next_tb = 0;
|
399 |
} else if (interrupt_request & CPU_INTERRUPT_MCE) { |
400 |
env->interrupt_request &= ~CPU_INTERRUPT_MCE; |
401 |
do_interrupt(EXCP12_MCHK, 0, 0, 0, 0); |
402 |
next_tb = 0;
|
403 |
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
404 |
(((env->hflags2 & HF2_VINTR_MASK) && |
405 |
(env->hflags2 & HF2_HIF_MASK)) || |
406 |
(!(env->hflags2 & HF2_VINTR_MASK) && |
407 |
(env->eflags & IF_MASK && |
408 |
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) { |
409 |
int intno;
|
410 |
svm_check_intercept(SVM_EXIT_INTR); |
411 |
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); |
412 |
intno = cpu_get_pic_interrupt(env); |
413 |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
|
414 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
415 |
#undef env
|
416 |
env = cpu_single_env; |
417 |
#define env cpu_single_env
|
418 |
#endif
|
419 |
do_interrupt(intno, 0, 0, 0, 1); |
420 |
/* ensure that no TB jump will be modified as
|
421 |
the program flow was changed */
|
422 |
next_tb = 0;
|
423 |
#if !defined(CONFIG_USER_ONLY)
|
424 |
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && |
425 |
(env->eflags & IF_MASK) && |
426 |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
427 |
int intno;
|
428 |
/* FIXME: this should respect TPR */
|
429 |
svm_check_intercept(SVM_EXIT_VINTR); |
430 |
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
|
431 |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
|
432 |
do_interrupt(intno, 0, 0, 0, 1); |
433 |
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
434 |
next_tb = 0;
|
435 |
#endif
|
436 |
} |
437 |
} |
438 |
#elif defined(TARGET_PPC)
|
439 |
#if 0
|
440 |
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
|
441 |
cpu_reset(env);
|
442 |
}
|
443 |
#endif
|
444 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
445 |
ppc_hw_interrupt(env); |
446 |
if (env->pending_interrupts == 0) |
447 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
448 |
next_tb = 0;
|
449 |
} |
450 |
#elif defined(TARGET_LM32)
|
451 |
if ((interrupt_request & CPU_INTERRUPT_HARD)
|
452 |
&& (env->ie & IE_IE)) { |
453 |
env->exception_index = EXCP_IRQ; |
454 |
do_interrupt(env); |
455 |
next_tb = 0;
|
456 |
} |
457 |
#elif defined(TARGET_MICROBLAZE)
|
458 |
if ((interrupt_request & CPU_INTERRUPT_HARD)
|
459 |
&& (env->sregs[SR_MSR] & MSR_IE) |
460 |
&& !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)) |
461 |
&& !(env->iflags & (D_FLAG | IMM_FLAG))) { |
462 |
env->exception_index = EXCP_IRQ; |
463 |
do_interrupt(env); |
464 |
next_tb = 0;
|
465 |
} |
466 |
#elif defined(TARGET_MIPS)
|
467 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
468 |
cpu_mips_hw_interrupts_pending(env)) { |
469 |
/* Raise it */
|
470 |
env->exception_index = EXCP_EXT_INTERRUPT; |
471 |
env->error_code = 0;
|
472 |
do_interrupt(env); |
473 |
next_tb = 0;
|
474 |
} |
475 |
#elif defined(TARGET_SPARC)
|
476 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
477 |
if (cpu_interrupts_enabled(env) &&
|
478 |
env->interrupt_index > 0) {
|
479 |
int pil = env->interrupt_index & 0xf; |
480 |
int type = env->interrupt_index & 0xf0; |
481 |
|
482 |
if (((type == TT_EXTINT) &&
|
483 |
cpu_pil_allowed(env, pil)) || |
484 |
type != TT_EXTINT) { |
485 |
env->exception_index = env->interrupt_index; |
486 |
do_interrupt(env); |
487 |
next_tb = 0;
|
488 |
} |
489 |
} |
490 |
} else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
491 |
//do_interrupt(0, 0, 0, 0, 0);
|
492 |
env->interrupt_request &= ~CPU_INTERRUPT_TIMER; |
493 |
} |
494 |
#elif defined(TARGET_ARM)
|
495 |
if (interrupt_request & CPU_INTERRUPT_FIQ
|
496 |
&& !(env->uncached_cpsr & CPSR_F)) { |
497 |
env->exception_index = EXCP_FIQ; |
498 |
do_interrupt(env); |
499 |
next_tb = 0;
|
500 |
} |
501 |
/* ARMv7-M interrupt return works by loading a magic value
|
502 |
into the PC. On real hardware the load causes the
|
503 |
return to occur. The qemu implementation performs the
|
504 |
jump normally, then does the exception return when the
|
505 |
CPU tries to execute code at the magic address.
|
506 |
This will cause the magic PC value to be pushed to
|
507 |
the stack if an interrupt occured at the wrong time.
|
508 |
We avoid this by disabling interrupts when
|
509 |
pc contains a magic address. */
|
510 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
511 |
&& ((IS_M(env) && env->regs[15] < 0xfffffff0) |
512 |
|| !(env->uncached_cpsr & CPSR_I))) { |
513 |
env->exception_index = EXCP_IRQ; |
514 |
do_interrupt(env); |
515 |
next_tb = 0;
|
516 |
} |
517 |
#elif defined(TARGET_SH4)
|
518 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
519 |
do_interrupt(env); |
520 |
next_tb = 0;
|
521 |
} |
522 |
#elif defined(TARGET_ALPHA)
|
523 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
524 |
do_interrupt(env); |
525 |
next_tb = 0;
|
526 |
} |
527 |
#elif defined(TARGET_CRIS)
|
528 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
529 |
&& (env->pregs[PR_CCS] & I_FLAG) |
530 |
&& !env->locked_irq) { |
531 |
env->exception_index = EXCP_IRQ; |
532 |
do_interrupt(env); |
533 |
next_tb = 0;
|
534 |
} |
535 |
if (interrupt_request & CPU_INTERRUPT_NMI
|
536 |
&& (env->pregs[PR_CCS] & M_FLAG)) { |
537 |
env->exception_index = EXCP_NMI; |
538 |
do_interrupt(env); |
539 |
next_tb = 0;
|
540 |
} |
541 |
#elif defined(TARGET_M68K)
|
542 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
543 |
&& ((env->sr & SR_I) >> SR_I_SHIFT) |
544 |
< env->pending_level) { |
545 |
/* Real hardware gets the interrupt vector via an
|
546 |
IACK cycle at this point. Current emulated
|
547 |
hardware doesn't rely on this, so we
|
548 |
provide/save the vector when the interrupt is
|
549 |
first signalled. */
|
550 |
env->exception_index = env->pending_vector; |
551 |
do_interrupt(1);
|
552 |
next_tb = 0;
|
553 |
} |
554 |
#endif
|
555 |
/* Don't use the cached interupt_request value,
|
556 |
do_interrupt may have updated the EXITTB flag. */
|
557 |
if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
|
558 |
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
559 |
/* ensure that no TB jump will be modified as
|
560 |
the program flow was changed */
|
561 |
next_tb = 0;
|
562 |
} |
563 |
} |
564 |
if (unlikely(env->exit_request)) {
|
565 |
env->exit_request = 0;
|
566 |
env->exception_index = EXCP_INTERRUPT; |
567 |
cpu_loop_exit(); |
568 |
} |
569 |
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
|
570 |
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
571 |
/* restore flags in standard format */
|
572 |
#if defined(TARGET_I386)
|
573 |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
574 |
log_cpu_state(env, X86_DUMP_CCOP); |
575 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
576 |
#elif defined(TARGET_M68K)
|
577 |
cpu_m68k_flush_flags(env, env->cc_op); |
578 |
env->cc_op = CC_OP_FLAGS; |
579 |
env->sr = (env->sr & 0xffe0)
|
580 |
| env->cc_dest | (env->cc_x << 4);
|
581 |
log_cpu_state(env, 0);
|
582 |
#else
|
583 |
log_cpu_state(env, 0);
|
584 |
#endif
|
585 |
} |
586 |
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */ |
587 |
spin_lock(&tb_lock); |
588 |
tb = tb_find_fast(); |
589 |
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
590 |
doing it in tb_find_slow */
|
591 |
if (tb_invalidated_flag) {
|
592 |
/* as some TB could have been invalidated because
|
593 |
of memory exceptions while generating the code, we
|
594 |
must recompute the hash index here */
|
595 |
next_tb = 0;
|
596 |
tb_invalidated_flag = 0;
|
597 |
} |
598 |
#ifdef CONFIG_DEBUG_EXEC
|
599 |
qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", |
600 |
(long)tb->tc_ptr, tb->pc,
|
601 |
lookup_symbol(tb->pc)); |
602 |
#endif
|
603 |
/* see if we can patch the calling TB. When the TB
|
604 |
spans two pages, we cannot safely do a direct
|
605 |
jump. */
|
606 |
if (next_tb != 0 && tb->page_addr[1] == -1) { |
607 |
tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); |
608 |
} |
609 |
spin_unlock(&tb_lock); |
610 |
|
611 |
/* cpu_interrupt might be called while translating the
|
612 |
TB, but before it is linked into a potentially
|
613 |
infinite loop and becomes env->current_tb. Avoid
|
614 |
starting execution if there is a pending interrupt. */
|
615 |
env->current_tb = tb; |
616 |
barrier(); |
617 |
if (likely(!env->exit_request)) {
|
618 |
tc_ptr = tb->tc_ptr; |
619 |
/* execute the generated code */
|
620 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
621 |
#undef env
|
622 |
env = cpu_single_env; |
623 |
#define env cpu_single_env
|
624 |
#endif
|
625 |
next_tb = tcg_qemu_tb_exec(tc_ptr); |
626 |
if ((next_tb & 3) == 2) { |
627 |
/* Instruction counter expired. */
|
628 |
int insns_left;
|
629 |
tb = (TranslationBlock *)(long)(next_tb & ~3); |
630 |
/* Restore PC. */
|
631 |
cpu_pc_from_tb(env, tb); |
632 |
insns_left = env->icount_decr.u32; |
633 |
if (env->icount_extra && insns_left >= 0) { |
634 |
/* Refill decrementer and continue execution. */
|
635 |
env->icount_extra += insns_left; |
636 |
if (env->icount_extra > 0xffff) { |
637 |
insns_left = 0xffff;
|
638 |
} else {
|
639 |
insns_left = env->icount_extra; |
640 |
} |
641 |
env->icount_extra -= insns_left; |
642 |
env->icount_decr.u16.low = insns_left; |
643 |
} else {
|
644 |
if (insns_left > 0) { |
645 |
/* Execute remaining instructions. */
|
646 |
cpu_exec_nocache(insns_left, tb); |
647 |
} |
648 |
env->exception_index = EXCP_INTERRUPT; |
649 |
next_tb = 0;
|
650 |
cpu_loop_exit(); |
651 |
} |
652 |
} |
653 |
} |
654 |
env->current_tb = NULL;
|
655 |
/* reset soft MMU for next block (it can currently
|
656 |
only be set by a memory fault) */
|
657 |
} /* for(;;) */
|
658 |
} |
659 |
} /* for(;;) */
|
660 |
|
661 |
|
662 |
#if defined(TARGET_I386)
|
663 |
/* restore flags in standard format */
|
664 |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
665 |
#elif defined(TARGET_ARM)
|
666 |
/* XXX: Save/restore host fpu exception state?. */
|
667 |
#elif defined(TARGET_SPARC)
|
668 |
#elif defined(TARGET_PPC)
|
669 |
#elif defined(TARGET_LM32)
|
670 |
#elif defined(TARGET_M68K)
|
671 |
cpu_m68k_flush_flags(env, env->cc_op); |
672 |
env->cc_op = CC_OP_FLAGS; |
673 |
env->sr = (env->sr & 0xffe0)
|
674 |
| env->cc_dest | (env->cc_x << 4);
|
675 |
#elif defined(TARGET_MICROBLAZE)
|
676 |
#elif defined(TARGET_MIPS)
|
677 |
#elif defined(TARGET_SH4)
|
678 |
#elif defined(TARGET_ALPHA)
|
679 |
#elif defined(TARGET_CRIS)
|
680 |
#elif defined(TARGET_S390X)
|
681 |
/* XXXXX */
|
682 |
#else
|
683 |
#error unsupported target CPU
|
684 |
#endif
|
685 |
|
686 |
/* restore global registers */
|
687 |
barrier(); |
688 |
env = (void *) saved_env_reg;
|
689 |
|
690 |
/* fail safe : never use cpu_single_env outside cpu_exec() */
|
691 |
cpu_single_env = NULL;
|
692 |
return ret;
|
693 |
} |
694 |
|
695 |
/* must only be called from the generated code as an exception can be
|
696 |
generated */
|
697 |
void tb_invalidate_page_range(target_ulong start, target_ulong end)
|
698 |
{ |
699 |
/* XXX: cannot enable it yet because it yields to MMU exception
|
700 |
where NIP != read address on PowerPC */
|
701 |
#if 0
|
702 |
target_ulong phys_addr;
|
703 |
phys_addr = get_phys_addr_code(env, start);
|
704 |
tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
|
705 |
#endif
|
706 |
} |
707 |
|
708 |
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
|
709 |
|
710 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) |
711 |
{ |
712 |
CPUX86State *saved_env; |
713 |
|
714 |
saved_env = env; |
715 |
env = s; |
716 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { |
717 |
selector &= 0xffff;
|
718 |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
719 |
(selector << 4), 0xffff, 0); |
720 |
} else {
|
721 |
helper_load_seg(seg_reg, selector); |
722 |
} |
723 |
env = saved_env; |
724 |
} |
725 |
|
726 |
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) |
727 |
{ |
728 |
CPUX86State *saved_env; |
729 |
|
730 |
saved_env = env; |
731 |
env = s; |
732 |
|
733 |
helper_fsave(ptr, data32); |
734 |
|
735 |
env = saved_env; |
736 |
} |
737 |
|
738 |
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) |
739 |
{ |
740 |
CPUX86State *saved_env; |
741 |
|
742 |
saved_env = env; |
743 |
env = s; |
744 |
|
745 |
helper_frstor(ptr, data32); |
746 |
|
747 |
env = saved_env; |
748 |
} |
749 |
|
750 |
#endif /* TARGET_I386 */ |
751 |
|
752 |
#if !defined(CONFIG_SOFTMMU)
|
753 |
|
754 |
#if defined(TARGET_I386)
|
755 |
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
|
756 |
#else
|
757 |
#define EXCEPTION_ACTION cpu_loop_exit()
|
758 |
#endif
|
759 |
|
760 |
/* 'pc' is the host PC at which the exception was raised. 'address' is
|
761 |
the effective address of the memory exception. 'is_write' is 1 if a
|
762 |
write caused the exception and otherwise 0'. 'old_set' is the
|
763 |
signal set which should be restored */
|
764 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
765 |
int is_write, sigset_t *old_set,
|
766 |
void *puc)
|
767 |
{ |
768 |
TranslationBlock *tb; |
769 |
int ret;
|
770 |
|
771 |
if (cpu_single_env)
|
772 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
773 |
#if defined(DEBUG_SIGNAL)
|
774 |
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
775 |
pc, address, is_write, *(unsigned long *)old_set); |
776 |
#endif
|
777 |
/* XXX: locking issue */
|
778 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
779 |
return 1; |
780 |
} |
781 |
|
782 |
/* see if it is an MMU fault */
|
783 |
ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
784 |
if (ret < 0) |
785 |
return 0; /* not an MMU fault */ |
786 |
if (ret == 0) |
787 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
788 |
/* now we have a real cpu fault */
|
789 |
tb = tb_find_pc(pc); |
790 |
if (tb) {
|
791 |
/* the PC is inside the translated code. It means that we have
|
792 |
a virtual CPU fault */
|
793 |
cpu_restore_state(tb, env, pc, puc); |
794 |
} |
795 |
|
796 |
/* we restore the process signal mask as the sigreturn should
|
797 |
do it (XXX: use sigsetjmp) */
|
798 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
799 |
EXCEPTION_ACTION; |
800 |
|
801 |
/* never comes here */
|
802 |
return 1; |
803 |
} |
804 |
|
805 |
#if defined(__i386__)
|
806 |
|
807 |
#if defined(__APPLE__)
|
808 |
# include <sys/ucontext.h> |
809 |
|
810 |
# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip)) |
811 |
# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
|
812 |
# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
|
813 |
# define MASK_sig(context) ((context)->uc_sigmask)
|
814 |
#elif defined (__NetBSD__)
|
815 |
# include <ucontext.h> |
816 |
|
817 |
# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
|
818 |
# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
|
819 |
# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
|
820 |
# define MASK_sig(context) ((context)->uc_sigmask)
|
821 |
#elif defined (__FreeBSD__) || defined(__DragonFly__)
|
822 |
# include <ucontext.h> |
823 |
|
824 |
# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip)) |
825 |
# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
|
826 |
# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
|
827 |
# define MASK_sig(context) ((context)->uc_sigmask)
|
828 |
#elif defined(__OpenBSD__)
|
829 |
# define EIP_sig(context) ((context)->sc_eip)
|
830 |
# define TRAP_sig(context) ((context)->sc_trapno)
|
831 |
# define ERROR_sig(context) ((context)->sc_err)
|
832 |
# define MASK_sig(context) ((context)->sc_mask)
|
833 |
#else
|
834 |
# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
|
835 |
# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
|
836 |
# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
|
837 |
# define MASK_sig(context) ((context)->uc_sigmask)
|
838 |
#endif
|
839 |
|
840 |
int cpu_signal_handler(int host_signum, void *pinfo, |
841 |
void *puc)
|
842 |
{ |
843 |
siginfo_t *info = pinfo; |
844 |
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
|
845 |
ucontext_t *uc = puc; |
846 |
#elif defined(__OpenBSD__)
|
847 |
struct sigcontext *uc = puc;
|
848 |
#else
|
849 |
struct ucontext *uc = puc;
|
850 |
#endif
|
851 |
unsigned long pc; |
852 |
int trapno;
|
853 |
|
854 |
#ifndef REG_EIP
|
855 |
/* for glibc 2.1 */
|
856 |
#define REG_EIP EIP
|
857 |
#define REG_ERR ERR
|
858 |
#define REG_TRAPNO TRAPNO
|
859 |
#endif
|
860 |
pc = EIP_sig(uc); |
861 |
trapno = TRAP_sig(uc); |
862 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
863 |
trapno == 0xe ?
|
864 |
(ERROR_sig(uc) >> 1) & 1 : 0, |
865 |
&MASK_sig(uc), puc); |
866 |
} |
867 |
|
868 |
#elif defined(__x86_64__)
|
869 |
|
870 |
#ifdef __NetBSD__
|
871 |
#define PC_sig(context) _UC_MACHINE_PC(context)
|
872 |
#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
|
873 |
#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
|
874 |
#define MASK_sig(context) ((context)->uc_sigmask)
|
875 |
#elif defined(__OpenBSD__)
|
876 |
#define PC_sig(context) ((context)->sc_rip)
|
877 |
#define TRAP_sig(context) ((context)->sc_trapno)
|
878 |
#define ERROR_sig(context) ((context)->sc_err)
|
879 |
#define MASK_sig(context) ((context)->sc_mask)
|
880 |
#elif defined (__FreeBSD__) || defined(__DragonFly__)
|
881 |
#include <ucontext.h> |
882 |
|
883 |
#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip)) |
884 |
#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
|
885 |
#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
|
886 |
#define MASK_sig(context) ((context)->uc_sigmask)
|
887 |
#else
|
888 |
#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
|
889 |
#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
|
890 |
#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
|
891 |
#define MASK_sig(context) ((context)->uc_sigmask)
|
892 |
#endif
|
893 |
|
894 |
int cpu_signal_handler(int host_signum, void *pinfo, |
895 |
void *puc)
|
896 |
{ |
897 |
siginfo_t *info = pinfo; |
898 |
unsigned long pc; |
899 |
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
|
900 |
ucontext_t *uc = puc; |
901 |
#elif defined(__OpenBSD__)
|
902 |
struct sigcontext *uc = puc;
|
903 |
#else
|
904 |
struct ucontext *uc = puc;
|
905 |
#endif
|
906 |
|
907 |
pc = PC_sig(uc); |
908 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
909 |
TRAP_sig(uc) == 0xe ?
|
910 |
(ERROR_sig(uc) >> 1) & 1 : 0, |
911 |
&MASK_sig(uc), puc); |
912 |
} |
913 |
|
914 |
#elif defined(_ARCH_PPC)
|
915 |
|
916 |
/***********************************************************************
|
917 |
* signal context platform-specific definitions
|
918 |
* From Wine
|
919 |
*/
|
920 |
#ifdef linux
|
921 |
/* All Registers access - only for local access */
|
922 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
|
923 |
/* Gpr Registers access */
|
924 |
# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
|
925 |
# define IAR_sig(context) REG_sig(nip, context) /* Program counter */ |
926 |
# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */ |
927 |
# define CTR_sig(context) REG_sig(ctr, context) /* Count register */ |
928 |
# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */ |
929 |
# define LR_sig(context) REG_sig(link, context) /* Link register */ |
930 |
# define CR_sig(context) REG_sig(ccr, context) /* Condition register */ |
931 |
/* Float Registers access */
|
932 |
# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num]) |
933 |
# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4))) |
934 |
/* Exception Registers access */
|
935 |
# define DAR_sig(context) REG_sig(dar, context)
|
936 |
# define DSISR_sig(context) REG_sig(dsisr, context)
|
937 |
# define TRAP_sig(context) REG_sig(trap, context)
|
938 |
#endif /* linux */ |
939 |
|
940 |
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
941 |
#include <ucontext.h> |
942 |
# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
|
943 |
# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
|
944 |
# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
|
945 |
# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
|
946 |
# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
|
947 |
# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
|
948 |
/* Exception Registers access */
|
949 |
# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
|
950 |
# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
|
951 |
# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
|
952 |
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ |
953 |
|
954 |
#ifdef __APPLE__
|
955 |
# include <sys/ucontext.h> |
956 |
typedef struct ucontext SIGCONTEXT; |
957 |
/* All Registers access - only for local access */
|
958 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
|
959 |
# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
|
960 |
# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
|
961 |
# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
|
962 |
/* Gpr Registers access */
|
963 |
# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) |
964 |
# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */ |
965 |
# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */ |
966 |
# define CTR_sig(context) REG_sig(ctr, context)
|
967 |
# define XER_sig(context) REG_sig(xer, context) /* Link register */ |
968 |
# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */ |
969 |
# define CR_sig(context) REG_sig(cr, context) /* Condition register */ |
970 |
/* Float Registers access */
|
971 |
# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
|
972 |
# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context)) |
973 |
/* Exception Registers access */
|
974 |
# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */ |
975 |
# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
|
976 |
# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */ |
977 |
#endif /* __APPLE__ */ |
978 |
|
979 |
int cpu_signal_handler(int host_signum, void *pinfo, |
980 |
void *puc)
|
981 |
{ |
982 |
siginfo_t *info = pinfo; |
983 |
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
984 |
ucontext_t *uc = puc; |
985 |
#else
|
986 |
struct ucontext *uc = puc;
|
987 |
#endif
|
988 |
unsigned long pc; |
989 |
int is_write;
|
990 |
|
991 |
pc = IAR_sig(uc); |
992 |
is_write = 0;
|
993 |
#if 0
|
994 |
/* ppc 4xx case */
|
995 |
if (DSISR_sig(uc) & 0x00800000)
|
996 |
is_write = 1;
|
997 |
#else
|
998 |
if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) |
999 |
is_write = 1;
|
1000 |
#endif
|
1001 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1002 |
is_write, &uc->uc_sigmask, puc); |
1003 |
} |
1004 |
|
1005 |
#elif defined(__alpha__)
|
1006 |
|
1007 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1008 |
void *puc)
|
1009 |
{ |
1010 |
siginfo_t *info = pinfo; |
1011 |
struct ucontext *uc = puc;
|
1012 |
uint32_t *pc = uc->uc_mcontext.sc_pc; |
1013 |
uint32_t insn = *pc; |
1014 |
int is_write = 0; |
1015 |
|
1016 |
/* XXX: need kernel patch to get write flag faster */
|
1017 |
switch (insn >> 26) { |
1018 |
case 0x0d: // stw |
1019 |
case 0x0e: // stb |
1020 |
case 0x0f: // stq_u |
1021 |
case 0x24: // stf |
1022 |
case 0x25: // stg |
1023 |
case 0x26: // sts |
1024 |
case 0x27: // stt |
1025 |
case 0x2c: // stl |
1026 |
case 0x2d: // stq |
1027 |
case 0x2e: // stl_c |
1028 |
case 0x2f: // stq_c |
1029 |
is_write = 1;
|
1030 |
} |
1031 |
|
1032 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1033 |
is_write, &uc->uc_sigmask, puc); |
1034 |
} |
1035 |
#elif defined(__sparc__)
|
1036 |
|
1037 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1038 |
void *puc)
|
1039 |
{ |
1040 |
siginfo_t *info = pinfo; |
1041 |
int is_write;
|
1042 |
uint32_t insn; |
1043 |
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
|
1044 |
uint32_t *regs = (uint32_t *)(info + 1);
|
1045 |
void *sigmask = (regs + 20); |
1046 |
/* XXX: is there a standard glibc define ? */
|
1047 |
unsigned long pc = regs[1]; |
1048 |
#else
|
1049 |
#ifdef __linux__
|
1050 |
struct sigcontext *sc = puc;
|
1051 |
unsigned long pc = sc->sigc_regs.tpc; |
1052 |
void *sigmask = (void *)sc->sigc_mask; |
1053 |
#elif defined(__OpenBSD__)
|
1054 |
struct sigcontext *uc = puc;
|
1055 |
unsigned long pc = uc->sc_pc; |
1056 |
void *sigmask = (void *)(long)uc->sc_mask; |
1057 |
#endif
|
1058 |
#endif
|
1059 |
|
1060 |
/* XXX: need kernel patch to get write flag faster */
|
1061 |
is_write = 0;
|
1062 |
insn = *(uint32_t *)pc; |
1063 |
if ((insn >> 30) == 3) { |
1064 |
switch((insn >> 19) & 0x3f) { |
1065 |
case 0x05: // stb |
1066 |
case 0x15: // stba |
1067 |
case 0x06: // sth |
1068 |
case 0x16: // stha |
1069 |
case 0x04: // st |
1070 |
case 0x14: // sta |
1071 |
case 0x07: // std |
1072 |
case 0x17: // stda |
1073 |
case 0x0e: // stx |
1074 |
case 0x1e: // stxa |
1075 |
case 0x24: // stf |
1076 |
case 0x34: // stfa |
1077 |
case 0x27: // stdf |
1078 |
case 0x37: // stdfa |
1079 |
case 0x26: // stqf |
1080 |
case 0x36: // stqfa |
1081 |
case 0x25: // stfsr |
1082 |
case 0x3c: // casa |
1083 |
case 0x3e: // casxa |
1084 |
is_write = 1;
|
1085 |
break;
|
1086 |
} |
1087 |
} |
1088 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1089 |
is_write, sigmask, NULL);
|
1090 |
} |
1091 |
|
1092 |
#elif defined(__arm__)
|
1093 |
|
1094 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1095 |
void *puc)
|
1096 |
{ |
1097 |
siginfo_t *info = pinfo; |
1098 |
struct ucontext *uc = puc;
|
1099 |
unsigned long pc; |
1100 |
int is_write;
|
1101 |
|
1102 |
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) |
1103 |
pc = uc->uc_mcontext.gregs[R15]; |
1104 |
#else
|
1105 |
pc = uc->uc_mcontext.arm_pc; |
1106 |
#endif
|
1107 |
/* XXX: compute is_write */
|
1108 |
is_write = 0;
|
1109 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1110 |
is_write, |
1111 |
&uc->uc_sigmask, puc); |
1112 |
} |
1113 |
|
1114 |
#elif defined(__mc68000)
|
1115 |
|
1116 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1117 |
void *puc)
|
1118 |
{ |
1119 |
siginfo_t *info = pinfo; |
1120 |
struct ucontext *uc = puc;
|
1121 |
unsigned long pc; |
1122 |
int is_write;
|
1123 |
|
1124 |
pc = uc->uc_mcontext.gregs[16];
|
1125 |
/* XXX: compute is_write */
|
1126 |
is_write = 0;
|
1127 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1128 |
is_write, |
1129 |
&uc->uc_sigmask, puc); |
1130 |
} |
1131 |
|
1132 |
#elif defined(__ia64)
|
1133 |
|
1134 |
#ifndef __ISR_VALID
|
1135 |
/* This ought to be in <bits/siginfo.h>... */
|
1136 |
# define __ISR_VALID 1 |
1137 |
#endif
|
1138 |
|
1139 |
int cpu_signal_handler(int host_signum, void *pinfo, void *puc) |
1140 |
{ |
1141 |
siginfo_t *info = pinfo; |
1142 |
struct ucontext *uc = puc;
|
1143 |
unsigned long ip; |
1144 |
int is_write = 0; |
1145 |
|
1146 |
ip = uc->uc_mcontext.sc_ip; |
1147 |
switch (host_signum) {
|
1148 |
case SIGILL:
|
1149 |
case SIGFPE:
|
1150 |
case SIGSEGV:
|
1151 |
case SIGBUS:
|
1152 |
case SIGTRAP:
|
1153 |
if (info->si_code && (info->si_segvflags & __ISR_VALID))
|
1154 |
/* ISR.W (write-access) is bit 33: */
|
1155 |
is_write = (info->si_isr >> 33) & 1; |
1156 |
break;
|
1157 |
|
1158 |
default:
|
1159 |
break;
|
1160 |
} |
1161 |
return handle_cpu_signal(ip, (unsigned long)info->si_addr, |
1162 |
is_write, |
1163 |
(sigset_t *)&uc->uc_sigmask, puc); |
1164 |
} |
1165 |
|
1166 |
#elif defined(__s390__)
|
1167 |
|
1168 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1169 |
void *puc)
|
1170 |
{ |
1171 |
siginfo_t *info = pinfo; |
1172 |
struct ucontext *uc = puc;
|
1173 |
unsigned long pc; |
1174 |
uint16_t *pinsn; |
1175 |
int is_write = 0; |
1176 |
|
1177 |
pc = uc->uc_mcontext.psw.addr; |
1178 |
|
1179 |
/* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
|
1180 |
of the normal 2 arguments. The 3rd argument contains the "int_code"
|
1181 |
from the hardware which does in fact contain the is_write value.
|
1182 |
The rt signal handler, as far as I can tell, does not give this value
|
1183 |
at all. Not that we could get to it from here even if it were. */
|
1184 |
/* ??? This is not even close to complete, since it ignores all
|
1185 |
of the read-modify-write instructions. */
|
1186 |
pinsn = (uint16_t *)pc; |
1187 |
switch (pinsn[0] >> 8) { |
1188 |
case 0x50: /* ST */ |
1189 |
case 0x42: /* STC */ |
1190 |
case 0x40: /* STH */ |
1191 |
is_write = 1;
|
1192 |
break;
|
1193 |
case 0xc4: /* RIL format insns */ |
1194 |
switch (pinsn[0] & 0xf) { |
1195 |
case 0xf: /* STRL */ |
1196 |
case 0xb: /* STGRL */ |
1197 |
case 0x7: /* STHRL */ |
1198 |
is_write = 1;
|
1199 |
} |
1200 |
break;
|
1201 |
case 0xe3: /* RXY format insns */ |
1202 |
switch (pinsn[2] & 0xff) { |
1203 |
case 0x50: /* STY */ |
1204 |
case 0x24: /* STG */ |
1205 |
case 0x72: /* STCY */ |
1206 |
case 0x70: /* STHY */ |
1207 |
case 0x8e: /* STPQ */ |
1208 |
case 0x3f: /* STRVH */ |
1209 |
case 0x3e: /* STRV */ |
1210 |
case 0x2f: /* STRVG */ |
1211 |
is_write = 1;
|
1212 |
} |
1213 |
break;
|
1214 |
} |
1215 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1216 |
is_write, &uc->uc_sigmask, puc); |
1217 |
} |
1218 |
|
1219 |
#elif defined(__mips__)
|
1220 |
|
1221 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1222 |
void *puc)
|
1223 |
{ |
1224 |
siginfo_t *info = pinfo; |
1225 |
struct ucontext *uc = puc;
|
1226 |
greg_t pc = uc->uc_mcontext.pc; |
1227 |
int is_write;
|
1228 |
|
1229 |
/* XXX: compute is_write */
|
1230 |
is_write = 0;
|
1231 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1232 |
is_write, &uc->uc_sigmask, puc); |
1233 |
} |
1234 |
|
1235 |
#elif defined(__hppa__)
|
1236 |
|
1237 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1238 |
void *puc)
|
1239 |
{ |
1240 |
struct siginfo *info = pinfo;
|
1241 |
struct ucontext *uc = puc;
|
1242 |
unsigned long pc = uc->uc_mcontext.sc_iaoq[0]; |
1243 |
uint32_t insn = *(uint32_t *)pc; |
1244 |
int is_write = 0; |
1245 |
|
1246 |
/* XXX: need kernel patch to get write flag faster. */
|
1247 |
switch (insn >> 26) { |
1248 |
case 0x1a: /* STW */ |
1249 |
case 0x19: /* STH */ |
1250 |
case 0x18: /* STB */ |
1251 |
case 0x1b: /* STWM */ |
1252 |
is_write = 1;
|
1253 |
break;
|
1254 |
|
1255 |
case 0x09: /* CSTWX, FSTWX, FSTWS */ |
1256 |
case 0x0b: /* CSTDX, FSTDX, FSTDS */ |
1257 |
/* Distinguish from coprocessor load ... */
|
1258 |
is_write = (insn >> 9) & 1; |
1259 |
break;
|
1260 |
|
1261 |
case 0x03: |
1262 |
switch ((insn >> 6) & 15) { |
1263 |
case 0xa: /* STWS */ |
1264 |
case 0x9: /* STHS */ |
1265 |
case 0x8: /* STBS */ |
1266 |
case 0xe: /* STWAS */ |
1267 |
case 0xc: /* STBYS */ |
1268 |
is_write = 1;
|
1269 |
} |
1270 |
break;
|
1271 |
} |
1272 |
|
1273 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1274 |
is_write, &uc->uc_sigmask, puc); |
1275 |
} |
1276 |
|
1277 |
#else
|
1278 |
|
1279 |
#error host CPU specific signal handler needed
|
1280 |
|
1281 |
#endif
|
1282 |
|
1283 |
#endif /* !defined(CONFIG_SOFTMMU) */ |