root / cpu-exec.c @ f7b4f61f
History | View | Annotate | Download (40.1 kB)
1 |
/*
|
---|---|
2 |
* i386 emulator main execution loop
|
3 |
*
|
4 |
* Copyright (c) 2003-2005 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
#include "config.h" |
20 |
#include "exec.h" |
21 |
#include "disas.h" |
22 |
#include "tcg.h" |
23 |
#include "kvm.h" |
24 |
|
25 |
#if !defined(CONFIG_SOFTMMU)
|
26 |
#undef EAX
|
27 |
#undef ECX
|
28 |
#undef EDX
|
29 |
#undef EBX
|
30 |
#undef ESP
|
31 |
#undef EBP
|
32 |
#undef ESI
|
33 |
#undef EDI
|
34 |
#undef EIP
|
35 |
#include <signal.h> |
36 |
#ifdef __linux__
|
37 |
#include <sys/ucontext.h> |
38 |
#endif
|
39 |
#endif
|
40 |
|
41 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
42 |
// Work around ugly bugs in glibc that mangle global register contents
|
43 |
#undef env
|
44 |
#define env cpu_single_env
|
45 |
#endif
|
46 |
|
47 |
int tb_invalidated_flag;
|
48 |
|
49 |
//#define CONFIG_DEBUG_EXEC
|
50 |
//#define DEBUG_SIGNAL
|
51 |
|
52 |
int qemu_cpu_has_work(CPUState *env)
|
53 |
{ |
54 |
return cpu_has_work(env);
|
55 |
} |
56 |
|
57 |
void cpu_loop_exit(void) |
58 |
{ |
59 |
/* NOTE: the register at this point must be saved by hand because
|
60 |
longjmp restore them */
|
61 |
regs_to_env(); |
62 |
longjmp(env->jmp_env, 1);
|
63 |
} |
64 |
|
65 |
/* exit the current TB from a signal handler. The host registers are
|
66 |
restored in a state compatible with the CPU emulator
|
67 |
*/
|
68 |
void cpu_resume_from_signal(CPUState *env1, void *puc) |
69 |
{ |
70 |
#if !defined(CONFIG_SOFTMMU)
|
71 |
#ifdef __linux__
|
72 |
struct ucontext *uc = puc;
|
73 |
#elif defined(__OpenBSD__)
|
74 |
struct sigcontext *uc = puc;
|
75 |
#endif
|
76 |
#endif
|
77 |
|
78 |
env = env1; |
79 |
|
80 |
/* XXX: restore cpu registers saved in host registers */
|
81 |
|
82 |
#if !defined(CONFIG_SOFTMMU)
|
83 |
if (puc) {
|
84 |
/* XXX: use siglongjmp ? */
|
85 |
#ifdef __linux__
|
86 |
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
87 |
#elif defined(__OpenBSD__)
|
88 |
sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
|
89 |
#endif
|
90 |
} |
91 |
#endif
|
92 |
env->exception_index = -1;
|
93 |
longjmp(env->jmp_env, 1);
|
94 |
} |
95 |
|
96 |
/* Execute the code without caching the generated code. An interpreter
|
97 |
could be used if available. */
|
98 |
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb) |
99 |
{ |
100 |
unsigned long next_tb; |
101 |
TranslationBlock *tb; |
102 |
|
103 |
/* Should never happen.
|
104 |
We only end up here when an existing TB is too long. */
|
105 |
if (max_cycles > CF_COUNT_MASK)
|
106 |
max_cycles = CF_COUNT_MASK; |
107 |
|
108 |
tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, |
109 |
max_cycles); |
110 |
env->current_tb = tb; |
111 |
/* execute the generated code */
|
112 |
next_tb = tcg_qemu_tb_exec(tb->tc_ptr); |
113 |
|
114 |
if ((next_tb & 3) == 2) { |
115 |
/* Restore PC. This may happen if async event occurs before
|
116 |
the TB starts executing. */
|
117 |
cpu_pc_from_tb(env, tb); |
118 |
} |
119 |
tb_phys_invalidate(tb, -1);
|
120 |
tb_free(tb); |
121 |
} |
122 |
|
123 |
static TranslationBlock *tb_find_slow(target_ulong pc,
|
124 |
target_ulong cs_base, |
125 |
uint64_t flags) |
126 |
{ |
127 |
TranslationBlock *tb, **ptb1; |
128 |
unsigned int h; |
129 |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
130 |
|
131 |
tb_invalidated_flag = 0;
|
132 |
|
133 |
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
|
134 |
|
135 |
/* find translated block using physical mappings */
|
136 |
phys_pc = get_phys_addr_code(env, pc); |
137 |
phys_page1 = phys_pc & TARGET_PAGE_MASK; |
138 |
phys_page2 = -1;
|
139 |
h = tb_phys_hash_func(phys_pc); |
140 |
ptb1 = &tb_phys_hash[h]; |
141 |
for(;;) {
|
142 |
tb = *ptb1; |
143 |
if (!tb)
|
144 |
goto not_found;
|
145 |
if (tb->pc == pc &&
|
146 |
tb->page_addr[0] == phys_page1 &&
|
147 |
tb->cs_base == cs_base && |
148 |
tb->flags == flags) { |
149 |
/* check next page if needed */
|
150 |
if (tb->page_addr[1] != -1) { |
151 |
virt_page2 = (pc & TARGET_PAGE_MASK) + |
152 |
TARGET_PAGE_SIZE; |
153 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
154 |
if (tb->page_addr[1] == phys_page2) |
155 |
goto found;
|
156 |
} else {
|
157 |
goto found;
|
158 |
} |
159 |
} |
160 |
ptb1 = &tb->phys_hash_next; |
161 |
} |
162 |
not_found:
|
163 |
/* if no translated code available, then translate it now */
|
164 |
tb = tb_gen_code(env, pc, cs_base, flags, 0);
|
165 |
|
166 |
found:
|
167 |
/* we add the TB in the virtual pc hash table */
|
168 |
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; |
169 |
return tb;
|
170 |
} |
171 |
|
172 |
static inline TranslationBlock *tb_find_fast(void) |
173 |
{ |
174 |
TranslationBlock *tb; |
175 |
target_ulong cs_base, pc; |
176 |
int flags;
|
177 |
|
178 |
/* we record a subset of the CPU state. It will
|
179 |
always be the same before a given translated block
|
180 |
is executed. */
|
181 |
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
182 |
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; |
183 |
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
|
184 |
tb->flags != flags)) { |
185 |
tb = tb_find_slow(pc, cs_base, flags); |
186 |
} |
187 |
return tb;
|
188 |
} |
189 |
|
190 |
static CPUDebugExcpHandler *debug_excp_handler;
|
191 |
|
192 |
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) |
193 |
{ |
194 |
CPUDebugExcpHandler *old_handler = debug_excp_handler; |
195 |
|
196 |
debug_excp_handler = handler; |
197 |
return old_handler;
|
198 |
} |
199 |
|
200 |
static void cpu_handle_debug_exception(CPUState *env) |
201 |
{ |
202 |
CPUWatchpoint *wp; |
203 |
|
204 |
if (!env->watchpoint_hit)
|
205 |
TAILQ_FOREACH(wp, &env->watchpoints, entry) |
206 |
wp->flags &= ~BP_WATCHPOINT_HIT; |
207 |
|
208 |
if (debug_excp_handler)
|
209 |
debug_excp_handler(env); |
210 |
} |
211 |
|
212 |
/* main execution loop */
|
213 |
|
214 |
int cpu_exec(CPUState *env1)
|
215 |
{ |
216 |
#define DECLARE_HOST_REGS 1 |
217 |
#include "hostregs_helper.h" |
218 |
int ret, interrupt_request;
|
219 |
TranslationBlock *tb; |
220 |
uint8_t *tc_ptr; |
221 |
unsigned long next_tb; |
222 |
|
223 |
if (cpu_halted(env1) == EXCP_HALTED)
|
224 |
return EXCP_HALTED;
|
225 |
|
226 |
cpu_single_env = env1; |
227 |
|
228 |
/* first we save global registers */
|
229 |
#define SAVE_HOST_REGS 1 |
230 |
#include "hostregs_helper.h" |
231 |
env = env1; |
232 |
|
233 |
env_to_regs(); |
234 |
#if defined(TARGET_I386)
|
235 |
/* put eflags in CPU temporary format */
|
236 |
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
237 |
DF = 1 - (2 * ((env->eflags >> 10) & 1)); |
238 |
CC_OP = CC_OP_EFLAGS; |
239 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
240 |
#elif defined(TARGET_SPARC)
|
241 |
#elif defined(TARGET_M68K)
|
242 |
env->cc_op = CC_OP_FLAGS; |
243 |
env->cc_dest = env->sr & 0xf;
|
244 |
env->cc_x = (env->sr >> 4) & 1; |
245 |
#elif defined(TARGET_ALPHA)
|
246 |
#elif defined(TARGET_ARM)
|
247 |
#elif defined(TARGET_PPC)
|
248 |
#elif defined(TARGET_MICROBLAZE)
|
249 |
#elif defined(TARGET_MIPS)
|
250 |
#elif defined(TARGET_SH4)
|
251 |
#elif defined(TARGET_CRIS)
|
252 |
/* XXXXX */
|
253 |
#else
|
254 |
#error unsupported target CPU
|
255 |
#endif
|
256 |
env->exception_index = -1;
|
257 |
|
258 |
/* prepare setjmp context for exception handling */
|
259 |
for(;;) {
|
260 |
if (setjmp(env->jmp_env) == 0) { |
261 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
262 |
#undef env
|
263 |
env = cpu_single_env; |
264 |
#define env cpu_single_env
|
265 |
#endif
|
266 |
env->current_tb = NULL;
|
267 |
/* if an exception is pending, we execute it here */
|
268 |
if (env->exception_index >= 0) { |
269 |
if (env->exception_index >= EXCP_INTERRUPT) {
|
270 |
/* exit request from the cpu execution loop */
|
271 |
ret = env->exception_index; |
272 |
if (ret == EXCP_DEBUG)
|
273 |
cpu_handle_debug_exception(env); |
274 |
break;
|
275 |
} else {
|
276 |
#if defined(CONFIG_USER_ONLY)
|
277 |
/* if user mode only, we simulate a fake exception
|
278 |
which will be handled outside the cpu execution
|
279 |
loop */
|
280 |
#if defined(TARGET_I386)
|
281 |
do_interrupt_user(env->exception_index, |
282 |
env->exception_is_int, |
283 |
env->error_code, |
284 |
env->exception_next_eip); |
285 |
/* successfully delivered */
|
286 |
env->old_exception = -1;
|
287 |
#endif
|
288 |
ret = env->exception_index; |
289 |
break;
|
290 |
#else
|
291 |
#if defined(TARGET_I386)
|
292 |
/* simulate a real cpu exception. On i386, it can
|
293 |
trigger new exceptions, but we do not handle
|
294 |
double or triple faults yet. */
|
295 |
do_interrupt(env->exception_index, |
296 |
env->exception_is_int, |
297 |
env->error_code, |
298 |
env->exception_next_eip, 0);
|
299 |
/* successfully delivered */
|
300 |
env->old_exception = -1;
|
301 |
#elif defined(TARGET_PPC)
|
302 |
do_interrupt(env); |
303 |
#elif defined(TARGET_MICROBLAZE)
|
304 |
do_interrupt(env); |
305 |
#elif defined(TARGET_MIPS)
|
306 |
do_interrupt(env); |
307 |
#elif defined(TARGET_SPARC)
|
308 |
do_interrupt(env); |
309 |
#elif defined(TARGET_ARM)
|
310 |
do_interrupt(env); |
311 |
#elif defined(TARGET_SH4)
|
312 |
do_interrupt(env); |
313 |
#elif defined(TARGET_ALPHA)
|
314 |
do_interrupt(env); |
315 |
#elif defined(TARGET_CRIS)
|
316 |
do_interrupt(env); |
317 |
#elif defined(TARGET_M68K)
|
318 |
do_interrupt(0);
|
319 |
#endif
|
320 |
#endif
|
321 |
} |
322 |
env->exception_index = -1;
|
323 |
} |
324 |
|
325 |
if (kvm_enabled()) {
|
326 |
kvm_cpu_exec(env); |
327 |
longjmp(env->jmp_env, 1);
|
328 |
} |
329 |
|
330 |
next_tb = 0; /* force lookup of first TB */ |
331 |
for(;;) {
|
332 |
interrupt_request = env->interrupt_request; |
333 |
if (unlikely(interrupt_request)) {
|
334 |
if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
|
335 |
/* Mask out external interrupts for this step. */
|
336 |
interrupt_request &= ~(CPU_INTERRUPT_HARD | |
337 |
CPU_INTERRUPT_FIQ | |
338 |
CPU_INTERRUPT_SMI | |
339 |
CPU_INTERRUPT_NMI); |
340 |
} |
341 |
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
342 |
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
343 |
env->exception_index = EXCP_DEBUG; |
344 |
cpu_loop_exit(); |
345 |
} |
346 |
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
|
347 |
defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ |
348 |
defined(TARGET_MICROBLAZE) |
349 |
if (interrupt_request & CPU_INTERRUPT_HALT) {
|
350 |
env->interrupt_request &= ~CPU_INTERRUPT_HALT; |
351 |
env->halted = 1;
|
352 |
env->exception_index = EXCP_HLT; |
353 |
cpu_loop_exit(); |
354 |
} |
355 |
#endif
|
356 |
#if defined(TARGET_I386)
|
357 |
if (interrupt_request & CPU_INTERRUPT_INIT) {
|
358 |
svm_check_intercept(SVM_EXIT_INIT); |
359 |
do_cpu_init(env); |
360 |
env->exception_index = EXCP_HALTED; |
361 |
cpu_loop_exit(); |
362 |
} else if (interrupt_request & CPU_INTERRUPT_SIPI) { |
363 |
do_cpu_sipi(env); |
364 |
} else if (env->hflags2 & HF2_GIF_MASK) { |
365 |
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
366 |
!(env->hflags & HF_SMM_MASK)) { |
367 |
svm_check_intercept(SVM_EXIT_SMI); |
368 |
env->interrupt_request &= ~CPU_INTERRUPT_SMI; |
369 |
do_smm_enter(); |
370 |
next_tb = 0;
|
371 |
} else if ((interrupt_request & CPU_INTERRUPT_NMI) && |
372 |
!(env->hflags2 & HF2_NMI_MASK)) { |
373 |
env->interrupt_request &= ~CPU_INTERRUPT_NMI; |
374 |
env->hflags2 |= HF2_NMI_MASK; |
375 |
do_interrupt(EXCP02_NMI, 0, 0, 0, 1); |
376 |
next_tb = 0;
|
377 |
} else if (interrupt_request & CPU_INTERRUPT_MCE) { |
378 |
env->interrupt_request &= ~CPU_INTERRUPT_MCE; |
379 |
do_interrupt(EXCP12_MCHK, 0, 0, 0, 0); |
380 |
next_tb = 0;
|
381 |
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
382 |
(((env->hflags2 & HF2_VINTR_MASK) && |
383 |
(env->hflags2 & HF2_HIF_MASK)) || |
384 |
(!(env->hflags2 & HF2_VINTR_MASK) && |
385 |
(env->eflags & IF_MASK && |
386 |
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) { |
387 |
int intno;
|
388 |
svm_check_intercept(SVM_EXIT_INTR); |
389 |
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); |
390 |
intno = cpu_get_pic_interrupt(env); |
391 |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
|
392 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
393 |
#undef env
|
394 |
env = cpu_single_env; |
395 |
#define env cpu_single_env
|
396 |
#endif
|
397 |
do_interrupt(intno, 0, 0, 0, 1); |
398 |
/* ensure that no TB jump will be modified as
|
399 |
the program flow was changed */
|
400 |
next_tb = 0;
|
401 |
#if !defined(CONFIG_USER_ONLY)
|
402 |
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && |
403 |
(env->eflags & IF_MASK) && |
404 |
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
405 |
int intno;
|
406 |
/* FIXME: this should respect TPR */
|
407 |
svm_check_intercept(SVM_EXIT_VINTR); |
408 |
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
|
409 |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
|
410 |
do_interrupt(intno, 0, 0, 0, 1); |
411 |
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
412 |
next_tb = 0;
|
413 |
#endif
|
414 |
} |
415 |
} |
416 |
#elif defined(TARGET_PPC)
|
417 |
#if 0
|
418 |
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
|
419 |
cpu_ppc_reset(env);
|
420 |
}
|
421 |
#endif
|
422 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
423 |
ppc_hw_interrupt(env); |
424 |
if (env->pending_interrupts == 0) |
425 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
426 |
next_tb = 0;
|
427 |
} |
428 |
#elif defined(TARGET_MICROBLAZE)
|
429 |
if ((interrupt_request & CPU_INTERRUPT_HARD)
|
430 |
&& (env->sregs[SR_MSR] & MSR_IE) |
431 |
&& !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)) |
432 |
&& !(env->iflags & (D_FLAG | IMM_FLAG))) { |
433 |
env->exception_index = EXCP_IRQ; |
434 |
do_interrupt(env); |
435 |
next_tb = 0;
|
436 |
} |
437 |
#elif defined(TARGET_MIPS)
|
438 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
439 |
(env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) && |
440 |
(env->CP0_Status & (1 << CP0St_IE)) &&
|
441 |
!(env->CP0_Status & (1 << CP0St_EXL)) &&
|
442 |
!(env->CP0_Status & (1 << CP0St_ERL)) &&
|
443 |
!(env->hflags & MIPS_HFLAG_DM)) { |
444 |
/* Raise it */
|
445 |
env->exception_index = EXCP_EXT_INTERRUPT; |
446 |
env->error_code = 0;
|
447 |
do_interrupt(env); |
448 |
next_tb = 0;
|
449 |
} |
450 |
#elif defined(TARGET_SPARC)
|
451 |
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
452 |
cpu_interrupts_enabled(env)) { |
453 |
int pil = env->interrupt_index & 15; |
454 |
int type = env->interrupt_index & 0xf0; |
455 |
|
456 |
if (((type == TT_EXTINT) &&
|
457 |
(pil == 15 || pil > env->psrpil)) ||
|
458 |
type != TT_EXTINT) { |
459 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
460 |
env->exception_index = env->interrupt_index; |
461 |
do_interrupt(env); |
462 |
env->interrupt_index = 0;
|
463 |
next_tb = 0;
|
464 |
} |
465 |
} else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
466 |
//do_interrupt(0, 0, 0, 0, 0);
|
467 |
env->interrupt_request &= ~CPU_INTERRUPT_TIMER; |
468 |
} |
469 |
#elif defined(TARGET_ARM)
|
470 |
if (interrupt_request & CPU_INTERRUPT_FIQ
|
471 |
&& !(env->uncached_cpsr & CPSR_F)) { |
472 |
env->exception_index = EXCP_FIQ; |
473 |
do_interrupt(env); |
474 |
next_tb = 0;
|
475 |
} |
476 |
/* ARMv7-M interrupt return works by loading a magic value
|
477 |
into the PC. On real hardware the load causes the
|
478 |
return to occur. The qemu implementation performs the
|
479 |
jump normally, then does the exception return when the
|
480 |
CPU tries to execute code at the magic address.
|
481 |
This will cause the magic PC value to be pushed to
|
482 |
the stack if an interrupt occured at the wrong time.
|
483 |
We avoid this by disabling interrupts when
|
484 |
pc contains a magic address. */
|
485 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
486 |
&& ((IS_M(env) && env->regs[15] < 0xfffffff0) |
487 |
|| !(env->uncached_cpsr & CPSR_I))) { |
488 |
env->exception_index = EXCP_IRQ; |
489 |
do_interrupt(env); |
490 |
next_tb = 0;
|
491 |
} |
492 |
#elif defined(TARGET_SH4)
|
493 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
494 |
do_interrupt(env); |
495 |
next_tb = 0;
|
496 |
} |
497 |
#elif defined(TARGET_ALPHA)
|
498 |
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
499 |
do_interrupt(env); |
500 |
next_tb = 0;
|
501 |
} |
502 |
#elif defined(TARGET_CRIS)
|
503 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
504 |
&& (env->pregs[PR_CCS] & I_FLAG)) { |
505 |
env->exception_index = EXCP_IRQ; |
506 |
do_interrupt(env); |
507 |
next_tb = 0;
|
508 |
} |
509 |
if (interrupt_request & CPU_INTERRUPT_NMI
|
510 |
&& (env->pregs[PR_CCS] & M_FLAG)) { |
511 |
env->exception_index = EXCP_NMI; |
512 |
do_interrupt(env); |
513 |
next_tb = 0;
|
514 |
} |
515 |
#elif defined(TARGET_M68K)
|
516 |
if (interrupt_request & CPU_INTERRUPT_HARD
|
517 |
&& ((env->sr & SR_I) >> SR_I_SHIFT) |
518 |
< env->pending_level) { |
519 |
/* Real hardware gets the interrupt vector via an
|
520 |
IACK cycle at this point. Current emulated
|
521 |
hardware doesn't rely on this, so we
|
522 |
provide/save the vector when the interrupt is
|
523 |
first signalled. */
|
524 |
env->exception_index = env->pending_vector; |
525 |
do_interrupt(1);
|
526 |
next_tb = 0;
|
527 |
} |
528 |
#endif
|
529 |
/* Don't use the cached interupt_request value,
|
530 |
do_interrupt may have updated the EXITTB flag. */
|
531 |
if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
|
532 |
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
533 |
/* ensure that no TB jump will be modified as
|
534 |
the program flow was changed */
|
535 |
next_tb = 0;
|
536 |
} |
537 |
} |
538 |
if (unlikely(env->exit_request)) {
|
539 |
env->exit_request = 0;
|
540 |
env->exception_index = EXCP_INTERRUPT; |
541 |
cpu_loop_exit(); |
542 |
} |
543 |
#ifdef CONFIG_DEBUG_EXEC
|
544 |
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
545 |
/* restore flags in standard format */
|
546 |
regs_to_env(); |
547 |
#if defined(TARGET_I386)
|
548 |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
549 |
log_cpu_state(env, X86_DUMP_CCOP); |
550 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
551 |
#elif defined(TARGET_ARM)
|
552 |
log_cpu_state(env, 0);
|
553 |
#elif defined(TARGET_SPARC)
|
554 |
log_cpu_state(env, 0);
|
555 |
#elif defined(TARGET_PPC)
|
556 |
log_cpu_state(env, 0);
|
557 |
#elif defined(TARGET_M68K)
|
558 |
cpu_m68k_flush_flags(env, env->cc_op); |
559 |
env->cc_op = CC_OP_FLAGS; |
560 |
env->sr = (env->sr & 0xffe0)
|
561 |
| env->cc_dest | (env->cc_x << 4);
|
562 |
log_cpu_state(env, 0);
|
563 |
#elif defined(TARGET_MICROBLAZE)
|
564 |
log_cpu_state(env, 0);
|
565 |
#elif defined(TARGET_MIPS)
|
566 |
log_cpu_state(env, 0);
|
567 |
#elif defined(TARGET_SH4)
|
568 |
log_cpu_state(env, 0);
|
569 |
#elif defined(TARGET_ALPHA)
|
570 |
log_cpu_state(env, 0);
|
571 |
#elif defined(TARGET_CRIS)
|
572 |
log_cpu_state(env, 0);
|
573 |
#else
|
574 |
#error unsupported target CPU
|
575 |
#endif
|
576 |
} |
577 |
#endif
|
578 |
spin_lock(&tb_lock); |
579 |
tb = tb_find_fast(); |
580 |
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
581 |
doing it in tb_find_slow */
|
582 |
if (tb_invalidated_flag) {
|
583 |
/* as some TB could have been invalidated because
|
584 |
of memory exceptions while generating the code, we
|
585 |
must recompute the hash index here */
|
586 |
next_tb = 0;
|
587 |
tb_invalidated_flag = 0;
|
588 |
} |
589 |
#ifdef CONFIG_DEBUG_EXEC
|
590 |
qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", |
591 |
(long)tb->tc_ptr, tb->pc,
|
592 |
lookup_symbol(tb->pc)); |
593 |
#endif
|
594 |
/* see if we can patch the calling TB. When the TB
|
595 |
spans two pages, we cannot safely do a direct
|
596 |
jump. */
|
597 |
{ |
598 |
if (next_tb != 0 && tb->page_addr[1] == -1) { |
599 |
tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); |
600 |
} |
601 |
} |
602 |
spin_unlock(&tb_lock); |
603 |
env->current_tb = tb; |
604 |
|
605 |
/* cpu_interrupt might be called while translating the
|
606 |
TB, but before it is linked into a potentially
|
607 |
infinite loop and becomes env->current_tb. Avoid
|
608 |
starting execution if there is a pending interrupt. */
|
609 |
if (unlikely (env->exit_request))
|
610 |
env->current_tb = NULL;
|
611 |
|
612 |
while (env->current_tb) {
|
613 |
tc_ptr = tb->tc_ptr; |
614 |
/* execute the generated code */
|
615 |
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
|
616 |
#undef env
|
617 |
env = cpu_single_env; |
618 |
#define env cpu_single_env
|
619 |
#endif
|
620 |
next_tb = tcg_qemu_tb_exec(tc_ptr); |
621 |
env->current_tb = NULL;
|
622 |
if ((next_tb & 3) == 2) { |
623 |
/* Instruction counter expired. */
|
624 |
int insns_left;
|
625 |
tb = (TranslationBlock *)(long)(next_tb & ~3); |
626 |
/* Restore PC. */
|
627 |
cpu_pc_from_tb(env, tb); |
628 |
insns_left = env->icount_decr.u32; |
629 |
if (env->icount_extra && insns_left >= 0) { |
630 |
/* Refill decrementer and continue execution. */
|
631 |
env->icount_extra += insns_left; |
632 |
if (env->icount_extra > 0xffff) { |
633 |
insns_left = 0xffff;
|
634 |
} else {
|
635 |
insns_left = env->icount_extra; |
636 |
} |
637 |
env->icount_extra -= insns_left; |
638 |
env->icount_decr.u16.low = insns_left; |
639 |
} else {
|
640 |
if (insns_left > 0) { |
641 |
/* Execute remaining instructions. */
|
642 |
cpu_exec_nocache(insns_left, tb); |
643 |
} |
644 |
env->exception_index = EXCP_INTERRUPT; |
645 |
next_tb = 0;
|
646 |
cpu_loop_exit(); |
647 |
} |
648 |
} |
649 |
} |
650 |
/* reset soft MMU for next block (it can currently
|
651 |
only be set by a memory fault) */
|
652 |
} /* for(;;) */
|
653 |
} else {
|
654 |
env_to_regs(); |
655 |
} |
656 |
} /* for(;;) */
|
657 |
|
658 |
|
659 |
#if defined(TARGET_I386)
|
660 |
/* restore flags in standard format */
|
661 |
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
662 |
#elif defined(TARGET_ARM)
|
663 |
/* XXX: Save/restore host fpu exception state?. */
|
664 |
#elif defined(TARGET_SPARC)
|
665 |
#elif defined(TARGET_PPC)
|
666 |
#elif defined(TARGET_M68K)
|
667 |
cpu_m68k_flush_flags(env, env->cc_op); |
668 |
env->cc_op = CC_OP_FLAGS; |
669 |
env->sr = (env->sr & 0xffe0)
|
670 |
| env->cc_dest | (env->cc_x << 4);
|
671 |
#elif defined(TARGET_MICROBLAZE)
|
672 |
#elif defined(TARGET_MIPS)
|
673 |
#elif defined(TARGET_SH4)
|
674 |
#elif defined(TARGET_ALPHA)
|
675 |
#elif defined(TARGET_CRIS)
|
676 |
/* XXXXX */
|
677 |
#else
|
678 |
#error unsupported target CPU
|
679 |
#endif
|
680 |
|
681 |
/* restore global registers */
|
682 |
#include "hostregs_helper.h" |
683 |
|
684 |
/* fail safe : never use cpu_single_env outside cpu_exec() */
|
685 |
cpu_single_env = NULL;
|
686 |
return ret;
|
687 |
} |
688 |
|
689 |
/* must only be called from the generated code as an exception can be
|
690 |
generated */
|
691 |
void tb_invalidate_page_range(target_ulong start, target_ulong end)
|
692 |
{ |
693 |
/* XXX: cannot enable it yet because it yields to MMU exception
|
694 |
where NIP != read address on PowerPC */
|
695 |
#if 0
|
696 |
target_ulong phys_addr;
|
697 |
phys_addr = get_phys_addr_code(env, start);
|
698 |
tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
|
699 |
#endif
|
700 |
} |
701 |
|
702 |
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
|
703 |
|
704 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) |
705 |
{ |
706 |
CPUX86State *saved_env; |
707 |
|
708 |
saved_env = env; |
709 |
env = s; |
710 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { |
711 |
selector &= 0xffff;
|
712 |
cpu_x86_load_seg_cache(env, seg_reg, selector, |
713 |
(selector << 4), 0xffff, 0); |
714 |
} else {
|
715 |
helper_load_seg(seg_reg, selector); |
716 |
} |
717 |
env = saved_env; |
718 |
} |
719 |
|
720 |
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) |
721 |
{ |
722 |
CPUX86State *saved_env; |
723 |
|
724 |
saved_env = env; |
725 |
env = s; |
726 |
|
727 |
helper_fsave(ptr, data32); |
728 |
|
729 |
env = saved_env; |
730 |
} |
731 |
|
732 |
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) |
733 |
{ |
734 |
CPUX86State *saved_env; |
735 |
|
736 |
saved_env = env; |
737 |
env = s; |
738 |
|
739 |
helper_frstor(ptr, data32); |
740 |
|
741 |
env = saved_env; |
742 |
} |
743 |
|
744 |
#endif /* TARGET_I386 */ |
745 |
|
746 |
#if !defined(CONFIG_SOFTMMU)
|
747 |
|
748 |
#if defined(TARGET_I386)
|
749 |
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
|
750 |
#else
|
751 |
#define EXCEPTION_ACTION cpu_loop_exit()
|
752 |
#endif
|
753 |
|
754 |
/* 'pc' is the host PC at which the exception was raised. 'address' is
|
755 |
the effective address of the memory exception. 'is_write' is 1 if a
|
756 |
write caused the exception and otherwise 0'. 'old_set' is the
|
757 |
signal set which should be restored */
|
758 |
static inline int handle_cpu_signal(unsigned long pc, unsigned long address, |
759 |
int is_write, sigset_t *old_set,
|
760 |
void *puc)
|
761 |
{ |
762 |
TranslationBlock *tb; |
763 |
int ret;
|
764 |
|
765 |
if (cpu_single_env)
|
766 |
env = cpu_single_env; /* XXX: find a correct solution for multithread */
|
767 |
#if defined(DEBUG_SIGNAL)
|
768 |
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
|
769 |
pc, address, is_write, *(unsigned long *)old_set); |
770 |
#endif
|
771 |
/* XXX: locking issue */
|
772 |
if (is_write && page_unprotect(h2g(address), pc, puc)) {
|
773 |
return 1; |
774 |
} |
775 |
|
776 |
/* see if it is an MMU fault */
|
777 |
ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
778 |
if (ret < 0) |
779 |
return 0; /* not an MMU fault */ |
780 |
if (ret == 0) |
781 |
return 1; /* the MMU fault was handled without causing real CPU fault */ |
782 |
/* now we have a real cpu fault */
|
783 |
tb = tb_find_pc(pc); |
784 |
if (tb) {
|
785 |
/* the PC is inside the translated code. It means that we have
|
786 |
a virtual CPU fault */
|
787 |
cpu_restore_state(tb, env, pc, puc); |
788 |
} |
789 |
|
790 |
/* we restore the process signal mask as the sigreturn should
|
791 |
do it (XXX: use sigsetjmp) */
|
792 |
sigprocmask(SIG_SETMASK, old_set, NULL);
|
793 |
EXCEPTION_ACTION; |
794 |
|
795 |
/* never comes here */
|
796 |
return 1; |
797 |
} |
798 |
|
799 |
#if defined(__i386__)
|
800 |
|
801 |
#if defined(__APPLE__)
|
802 |
# include <sys/ucontext.h> |
803 |
|
804 |
# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip)) |
805 |
# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
|
806 |
# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
|
807 |
# define MASK_sig(context) ((context)->uc_sigmask)
|
808 |
#elif defined(__OpenBSD__)
|
809 |
# define EIP_sig(context) ((context)->sc_eip)
|
810 |
# define TRAP_sig(context) ((context)->sc_trapno)
|
811 |
# define ERROR_sig(context) ((context)->sc_err)
|
812 |
# define MASK_sig(context) ((context)->sc_mask)
|
813 |
#else
|
814 |
# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
|
815 |
# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
|
816 |
# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
|
817 |
# define MASK_sig(context) ((context)->uc_sigmask)
|
818 |
#endif
|
819 |
|
820 |
int cpu_signal_handler(int host_signum, void *pinfo, |
821 |
void *puc)
|
822 |
{ |
823 |
siginfo_t *info = pinfo; |
824 |
#if defined(__OpenBSD__)
|
825 |
struct sigcontext *uc = puc;
|
826 |
#else
|
827 |
struct ucontext *uc = puc;
|
828 |
#endif
|
829 |
unsigned long pc; |
830 |
int trapno;
|
831 |
|
832 |
#ifndef REG_EIP
|
833 |
/* for glibc 2.1 */
|
834 |
#define REG_EIP EIP
|
835 |
#define REG_ERR ERR
|
836 |
#define REG_TRAPNO TRAPNO
|
837 |
#endif
|
838 |
pc = EIP_sig(uc); |
839 |
trapno = TRAP_sig(uc); |
840 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
841 |
trapno == 0xe ?
|
842 |
(ERROR_sig(uc) >> 1) & 1 : 0, |
843 |
&MASK_sig(uc), puc); |
844 |
} |
845 |
|
846 |
#elif defined(__x86_64__)
|
847 |
|
848 |
#ifdef __NetBSD__
|
849 |
#define PC_sig(context) _UC_MACHINE_PC(context)
|
850 |
#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
|
851 |
#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
|
852 |
#define MASK_sig(context) ((context)->uc_sigmask)
|
853 |
#elif defined(__OpenBSD__)
|
854 |
#define PC_sig(context) ((context)->sc_rip)
|
855 |
#define TRAP_sig(context) ((context)->sc_trapno)
|
856 |
#define ERROR_sig(context) ((context)->sc_err)
|
857 |
#define MASK_sig(context) ((context)->sc_mask)
|
858 |
#else
|
859 |
#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
|
860 |
#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
|
861 |
#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
|
862 |
#define MASK_sig(context) ((context)->uc_sigmask)
|
863 |
#endif
|
864 |
|
865 |
int cpu_signal_handler(int host_signum, void *pinfo, |
866 |
void *puc)
|
867 |
{ |
868 |
siginfo_t *info = pinfo; |
869 |
unsigned long pc; |
870 |
#ifdef __NetBSD__
|
871 |
ucontext_t *uc = puc; |
872 |
#elif defined(__OpenBSD__)
|
873 |
struct sigcontext *uc = puc;
|
874 |
#else
|
875 |
struct ucontext *uc = puc;
|
876 |
#endif
|
877 |
|
878 |
pc = PC_sig(uc); |
879 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
880 |
TRAP_sig(uc) == 0xe ?
|
881 |
(ERROR_sig(uc) >> 1) & 1 : 0, |
882 |
&MASK_sig(uc), puc); |
883 |
} |
884 |
|
885 |
#elif defined(_ARCH_PPC)
|
886 |
|
887 |
/***********************************************************************
|
888 |
* signal context platform-specific definitions
|
889 |
* From Wine
|
890 |
*/
|
891 |
#ifdef linux
|
892 |
/* All Registers access - only for local access */
|
893 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
|
894 |
/* Gpr Registers access */
|
895 |
# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
|
896 |
# define IAR_sig(context) REG_sig(nip, context) /* Program counter */ |
897 |
# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */ |
898 |
# define CTR_sig(context) REG_sig(ctr, context) /* Count register */ |
899 |
# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */ |
900 |
# define LR_sig(context) REG_sig(link, context) /* Link register */ |
901 |
# define CR_sig(context) REG_sig(ccr, context) /* Condition register */ |
902 |
/* Float Registers access */
|
903 |
# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num]) |
904 |
# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4))) |
905 |
/* Exception Registers access */
|
906 |
# define DAR_sig(context) REG_sig(dar, context)
|
907 |
# define DSISR_sig(context) REG_sig(dsisr, context)
|
908 |
# define TRAP_sig(context) REG_sig(trap, context)
|
909 |
#endif /* linux */ |
910 |
|
911 |
#ifdef __APPLE__
|
912 |
# include <sys/ucontext.h> |
913 |
typedef struct ucontext SIGCONTEXT; |
914 |
/* All Registers access - only for local access */
|
915 |
# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
|
916 |
# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
|
917 |
# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
|
918 |
# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
|
919 |
/* Gpr Registers access */
|
920 |
# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) |
921 |
# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */ |
922 |
# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */ |
923 |
# define CTR_sig(context) REG_sig(ctr, context)
|
924 |
# define XER_sig(context) REG_sig(xer, context) /* Link register */ |
925 |
# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */ |
926 |
# define CR_sig(context) REG_sig(cr, context) /* Condition register */ |
927 |
/* Float Registers access */
|
928 |
# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
|
929 |
# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context)) |
930 |
/* Exception Registers access */
|
931 |
# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */ |
932 |
# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
|
933 |
# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */ |
934 |
#endif /* __APPLE__ */ |
935 |
|
936 |
int cpu_signal_handler(int host_signum, void *pinfo, |
937 |
void *puc)
|
938 |
{ |
939 |
siginfo_t *info = pinfo; |
940 |
struct ucontext *uc = puc;
|
941 |
unsigned long pc; |
942 |
int is_write;
|
943 |
|
944 |
pc = IAR_sig(uc); |
945 |
is_write = 0;
|
946 |
#if 0
|
947 |
/* ppc 4xx case */
|
948 |
if (DSISR_sig(uc) & 0x00800000)
|
949 |
is_write = 1;
|
950 |
#else
|
951 |
if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) |
952 |
is_write = 1;
|
953 |
#endif
|
954 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
955 |
is_write, &uc->uc_sigmask, puc); |
956 |
} |
957 |
|
958 |
#elif defined(__alpha__)
|
959 |
|
960 |
int cpu_signal_handler(int host_signum, void *pinfo, |
961 |
void *puc)
|
962 |
{ |
963 |
siginfo_t *info = pinfo; |
964 |
struct ucontext *uc = puc;
|
965 |
uint32_t *pc = uc->uc_mcontext.sc_pc; |
966 |
uint32_t insn = *pc; |
967 |
int is_write = 0; |
968 |
|
969 |
/* XXX: need kernel patch to get write flag faster */
|
970 |
switch (insn >> 26) { |
971 |
case 0x0d: // stw |
972 |
case 0x0e: // stb |
973 |
case 0x0f: // stq_u |
974 |
case 0x24: // stf |
975 |
case 0x25: // stg |
976 |
case 0x26: // sts |
977 |
case 0x27: // stt |
978 |
case 0x2c: // stl |
979 |
case 0x2d: // stq |
980 |
case 0x2e: // stl_c |
981 |
case 0x2f: // stq_c |
982 |
is_write = 1;
|
983 |
} |
984 |
|
985 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
986 |
is_write, &uc->uc_sigmask, puc); |
987 |
} |
988 |
#elif defined(__sparc__)
|
989 |
|
990 |
int cpu_signal_handler(int host_signum, void *pinfo, |
991 |
void *puc)
|
992 |
{ |
993 |
siginfo_t *info = pinfo; |
994 |
int is_write;
|
995 |
uint32_t insn; |
996 |
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
|
997 |
uint32_t *regs = (uint32_t *)(info + 1);
|
998 |
void *sigmask = (regs + 20); |
999 |
/* XXX: is there a standard glibc define ? */
|
1000 |
unsigned long pc = regs[1]; |
1001 |
#else
|
1002 |
#ifdef __linux__
|
1003 |
struct sigcontext *sc = puc;
|
1004 |
unsigned long pc = sc->sigc_regs.tpc; |
1005 |
void *sigmask = (void *)sc->sigc_mask; |
1006 |
#elif defined(__OpenBSD__)
|
1007 |
struct sigcontext *uc = puc;
|
1008 |
unsigned long pc = uc->sc_pc; |
1009 |
void *sigmask = (void *)(long)uc->sc_mask; |
1010 |
#endif
|
1011 |
#endif
|
1012 |
|
1013 |
/* XXX: need kernel patch to get write flag faster */
|
1014 |
is_write = 0;
|
1015 |
insn = *(uint32_t *)pc; |
1016 |
if ((insn >> 30) == 3) { |
1017 |
switch((insn >> 19) & 0x3f) { |
1018 |
case 0x05: // stb |
1019 |
case 0x15: // stba |
1020 |
case 0x06: // sth |
1021 |
case 0x16: // stha |
1022 |
case 0x04: // st |
1023 |
case 0x14: // sta |
1024 |
case 0x07: // std |
1025 |
case 0x17: // stda |
1026 |
case 0x0e: // stx |
1027 |
case 0x1e: // stxa |
1028 |
case 0x24: // stf |
1029 |
case 0x34: // stfa |
1030 |
case 0x27: // stdf |
1031 |
case 0x37: // stdfa |
1032 |
case 0x26: // stqf |
1033 |
case 0x36: // stqfa |
1034 |
case 0x25: // stfsr |
1035 |
case 0x3c: // casa |
1036 |
case 0x3e: // casxa |
1037 |
is_write = 1;
|
1038 |
break;
|
1039 |
} |
1040 |
} |
1041 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1042 |
is_write, sigmask, NULL);
|
1043 |
} |
1044 |
|
1045 |
#elif defined(__arm__)
|
1046 |
|
1047 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1048 |
void *puc)
|
1049 |
{ |
1050 |
siginfo_t *info = pinfo; |
1051 |
struct ucontext *uc = puc;
|
1052 |
unsigned long pc; |
1053 |
int is_write;
|
1054 |
|
1055 |
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) |
1056 |
pc = uc->uc_mcontext.gregs[R15]; |
1057 |
#else
|
1058 |
pc = uc->uc_mcontext.arm_pc; |
1059 |
#endif
|
1060 |
/* XXX: compute is_write */
|
1061 |
is_write = 0;
|
1062 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1063 |
is_write, |
1064 |
&uc->uc_sigmask, puc); |
1065 |
} |
1066 |
|
1067 |
#elif defined(__mc68000)
|
1068 |
|
1069 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1070 |
void *puc)
|
1071 |
{ |
1072 |
siginfo_t *info = pinfo; |
1073 |
struct ucontext *uc = puc;
|
1074 |
unsigned long pc; |
1075 |
int is_write;
|
1076 |
|
1077 |
pc = uc->uc_mcontext.gregs[16];
|
1078 |
/* XXX: compute is_write */
|
1079 |
is_write = 0;
|
1080 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1081 |
is_write, |
1082 |
&uc->uc_sigmask, puc); |
1083 |
} |
1084 |
|
1085 |
#elif defined(__ia64)
|
1086 |
|
1087 |
#ifndef __ISR_VALID
|
1088 |
/* This ought to be in <bits/siginfo.h>... */
|
1089 |
# define __ISR_VALID 1 |
1090 |
#endif
|
1091 |
|
1092 |
int cpu_signal_handler(int host_signum, void *pinfo, void *puc) |
1093 |
{ |
1094 |
siginfo_t *info = pinfo; |
1095 |
struct ucontext *uc = puc;
|
1096 |
unsigned long ip; |
1097 |
int is_write = 0; |
1098 |
|
1099 |
ip = uc->uc_mcontext.sc_ip; |
1100 |
switch (host_signum) {
|
1101 |
case SIGILL:
|
1102 |
case SIGFPE:
|
1103 |
case SIGSEGV:
|
1104 |
case SIGBUS:
|
1105 |
case SIGTRAP:
|
1106 |
if (info->si_code && (info->si_segvflags & __ISR_VALID))
|
1107 |
/* ISR.W (write-access) is bit 33: */
|
1108 |
is_write = (info->si_isr >> 33) & 1; |
1109 |
break;
|
1110 |
|
1111 |
default:
|
1112 |
break;
|
1113 |
} |
1114 |
return handle_cpu_signal(ip, (unsigned long)info->si_addr, |
1115 |
is_write, |
1116 |
&uc->uc_sigmask, puc); |
1117 |
} |
1118 |
|
1119 |
#elif defined(__s390__)
|
1120 |
|
1121 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1122 |
void *puc)
|
1123 |
{ |
1124 |
siginfo_t *info = pinfo; |
1125 |
struct ucontext *uc = puc;
|
1126 |
unsigned long pc; |
1127 |
int is_write;
|
1128 |
|
1129 |
pc = uc->uc_mcontext.psw.addr; |
1130 |
/* XXX: compute is_write */
|
1131 |
is_write = 0;
|
1132 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1133 |
is_write, &uc->uc_sigmask, puc); |
1134 |
} |
1135 |
|
1136 |
#elif defined(__mips__)
|
1137 |
|
1138 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1139 |
void *puc)
|
1140 |
{ |
1141 |
siginfo_t *info = pinfo; |
1142 |
struct ucontext *uc = puc;
|
1143 |
greg_t pc = uc->uc_mcontext.pc; |
1144 |
int is_write;
|
1145 |
|
1146 |
/* XXX: compute is_write */
|
1147 |
is_write = 0;
|
1148 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1149 |
is_write, &uc->uc_sigmask, puc); |
1150 |
} |
1151 |
|
1152 |
#elif defined(__hppa__)
|
1153 |
|
1154 |
int cpu_signal_handler(int host_signum, void *pinfo, |
1155 |
void *puc)
|
1156 |
{ |
1157 |
struct siginfo *info = pinfo;
|
1158 |
struct ucontext *uc = puc;
|
1159 |
unsigned long pc; |
1160 |
int is_write;
|
1161 |
|
1162 |
pc = uc->uc_mcontext.sc_iaoq[0];
|
1163 |
/* FIXME: compute is_write */
|
1164 |
is_write = 0;
|
1165 |
return handle_cpu_signal(pc, (unsigned long)info->si_addr, |
1166 |
is_write, |
1167 |
&uc->uc_sigmask, puc); |
1168 |
} |
1169 |
|
1170 |
#else
|
1171 |
|
1172 |
#error host CPU specific signal handler needed
|
1173 |
|
1174 |
#endif
|
1175 |
|
1176 |
#endif /* !defined(CONFIG_SOFTMMU) */ |