Statistics
| Branch: | Revision:

root / cpu-exec.c @ dcfd14b3

History | View | Annotate | Download (44.1 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24
#include "qemu-barrier.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define CONFIG_DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
int qemu_cpu_has_work(CPUState *env)
54
{
55
    return cpu_has_work(env);
56
}
57

    
58
void cpu_loop_exit(void)
59
{
60
    env->current_tb = NULL;
61
    longjmp(env->jmp_env, 1);
62
}
63

    
64
/* exit the current TB from a signal handler. The host registers are
65
   restored in a state compatible with the CPU emulator
66
 */
67
void cpu_resume_from_signal(CPUState *env1, void *puc)
68
{
69
#if !defined(CONFIG_SOFTMMU)
70
#ifdef __linux__
71
    struct ucontext *uc = puc;
72
#elif defined(__OpenBSD__)
73
    struct sigcontext *uc = puc;
74
#endif
75
#endif
76

    
77
    env = env1;
78

    
79
    /* XXX: restore cpu registers saved in host registers */
80

    
81
#if !defined(CONFIG_SOFTMMU)
82
    if (puc) {
83
        /* XXX: use siglongjmp ? */
84
#ifdef __linux__
85
#ifdef __ia64
86
        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87
#else
88
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89
#endif
90
#elif defined(__OpenBSD__)
91
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92
#endif
93
    }
94
#endif
95
    env->exception_index = -1;
96
    longjmp(env->jmp_env, 1);
97
}
98

    
99
/* Execute the code without caching the generated code. An interpreter
100
   could be used if available. */
101
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102
{
103
    unsigned long next_tb;
104
    TranslationBlock *tb;
105

    
106
    /* Should never happen.
107
       We only end up here when an existing TB is too long.  */
108
    if (max_cycles > CF_COUNT_MASK)
109
        max_cycles = CF_COUNT_MASK;
110

    
111
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112
                     max_cycles);
113
    env->current_tb = tb;
114
    /* execute the generated code */
115
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116
    env->current_tb = NULL;
117

    
118
    if ((next_tb & 3) == 2) {
119
        /* Restore PC.  This may happen if async event occurs before
120
           the TB starts executing.  */
121
        cpu_pc_from_tb(env, tb);
122
    }
123
    tb_phys_invalidate(tb, -1);
124
    tb_free(tb);
125
}
126

    
127
static TranslationBlock *tb_find_slow(target_ulong pc,
128
                                      target_ulong cs_base,
129
                                      uint64_t flags)
130
{
131
    TranslationBlock *tb, **ptb1;
132
    unsigned int h;
133
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
134
    target_ulong virt_page2;
135

    
136
    tb_invalidated_flag = 0;
137

    
138
    /* find translated block using physical mappings */
139
    phys_pc = get_page_addr_code(env, pc);
140
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
141
    phys_page2 = -1;
142
    h = tb_phys_hash_func(phys_pc);
143
    ptb1 = &tb_phys_hash[h];
144
    for(;;) {
145
        tb = *ptb1;
146
        if (!tb)
147
            goto not_found;
148
        if (tb->pc == pc &&
149
            tb->page_addr[0] == phys_page1 &&
150
            tb->cs_base == cs_base &&
151
            tb->flags == flags) {
152
            /* check next page if needed */
153
            if (tb->page_addr[1] != -1) {
154
                virt_page2 = (pc & TARGET_PAGE_MASK) +
155
                    TARGET_PAGE_SIZE;
156
                phys_page2 = get_page_addr_code(env, virt_page2);
157
                if (tb->page_addr[1] == phys_page2)
158
                    goto found;
159
            } else {
160
                goto found;
161
            }
162
        }
163
        ptb1 = &tb->phys_hash_next;
164
    }
165
 not_found:
166
   /* if no translated code available, then translate it now */
167
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
168

    
169
 found:
170
    /* Move the last found TB to the head of the list */
171
    if (likely(*ptb1)) {
172
        *ptb1 = tb->phys_hash_next;
173
        tb->phys_hash_next = tb_phys_hash[h];
174
        tb_phys_hash[h] = tb;
175
    }
176
    /* we add the TB in the virtual pc hash table */
177
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178
    return tb;
179
}
180

    
181
static inline TranslationBlock *tb_find_fast(void)
182
{
183
    TranslationBlock *tb;
184
    target_ulong cs_base, pc;
185
    int flags;
186

    
187
    /* we record a subset of the CPU state. It will
188
       always be the same before a given translated block
189
       is executed. */
190
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193
                 tb->flags != flags)) {
194
        tb = tb_find_slow(pc, cs_base, flags);
195
    }
196
    return tb;
197
}
198

    
199
static CPUDebugExcpHandler *debug_excp_handler;
200

    
201
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
202
{
203
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
204

    
205
    debug_excp_handler = handler;
206
    return old_handler;
207
}
208

    
209
static void cpu_handle_debug_exception(CPUState *env)
210
{
211
    CPUWatchpoint *wp;
212

    
213
    if (!env->watchpoint_hit) {
214
        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
215
            wp->flags &= ~BP_WATCHPOINT_HIT;
216
        }
217
    }
218
    if (debug_excp_handler) {
219
        debug_excp_handler(env);
220
    }
221
}
222

    
223
/* main execution loop */
224

    
225
volatile sig_atomic_t exit_request;
226

    
227
int cpu_exec(CPUState *env1)
228
{
229
    volatile host_reg_t saved_env_reg;
230
    int ret, interrupt_request;
231
    TranslationBlock *tb;
232
    uint8_t *tc_ptr;
233
    unsigned long next_tb;
234

    
235
    if (env1->halted) {
236
        if (!cpu_has_work(env1)) {
237
            return EXCP_HALTED;
238
        }
239

    
240
        env1->halted = 0;
241
    }
242

    
243
    cpu_single_env = env1;
244

    
245
    /* the access to env below is actually saving the global register's
246
       value, so that files not including target-xyz/exec.h are free to
247
       use it.  */
248
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
249
    saved_env_reg = (host_reg_t) env;
250
    barrier();
251
    env = env1;
252

    
253
    if (unlikely(exit_request)) {
254
        env->exit_request = 1;
255
    }
256

    
257
#if defined(TARGET_I386)
258
    /* put eflags in CPU temporary format */
259
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
261
    CC_OP = CC_OP_EFLAGS;
262
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263
#elif defined(TARGET_SPARC)
264
#elif defined(TARGET_M68K)
265
    env->cc_op = CC_OP_FLAGS;
266
    env->cc_dest = env->sr & 0xf;
267
    env->cc_x = (env->sr >> 4) & 1;
268
#elif defined(TARGET_ALPHA)
269
#elif defined(TARGET_ARM)
270
#elif defined(TARGET_UNICORE32)
271
#elif defined(TARGET_PPC)
272
#elif defined(TARGET_LM32)
273
#elif defined(TARGET_MICROBLAZE)
274
#elif defined(TARGET_MIPS)
275
#elif defined(TARGET_SH4)
276
#elif defined(TARGET_CRIS)
277
#elif defined(TARGET_S390X)
278
    /* XXXXX */
279
#else
280
#error unsupported target CPU
281
#endif
282
    env->exception_index = -1;
283

    
284
    /* prepare setjmp context for exception handling */
285
    for(;;) {
286
        if (setjmp(env->jmp_env) == 0) {
287
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
288
#undef env
289
            env = cpu_single_env;
290
#define env cpu_single_env
291
#endif
292
            /* if an exception is pending, we execute it here */
293
            if (env->exception_index >= 0) {
294
                if (env->exception_index >= EXCP_INTERRUPT) {
295
                    /* exit request from the cpu execution loop */
296
                    ret = env->exception_index;
297
                    if (ret == EXCP_DEBUG) {
298
                        cpu_handle_debug_exception(env);
299
                    }
300
                    break;
301
                } else {
302
#if defined(CONFIG_USER_ONLY)
303
                    /* if user mode only, we simulate a fake exception
304
                       which will be handled outside the cpu execution
305
                       loop */
306
#if defined(TARGET_I386)
307
                    do_interrupt_user(env->exception_index,
308
                                      env->exception_is_int,
309
                                      env->error_code,
310
                                      env->exception_next_eip);
311
                    /* successfully delivered */
312
                    env->old_exception = -1;
313
#endif
314
                    ret = env->exception_index;
315
                    break;
316
#else
317
#if defined(TARGET_I386)
318
                    /* simulate a real cpu exception. On i386, it can
319
                       trigger new exceptions, but we do not handle
320
                       double or triple faults yet. */
321
                    do_interrupt(env->exception_index,
322
                                 env->exception_is_int,
323
                                 env->error_code,
324
                                 env->exception_next_eip, 0);
325
                    /* successfully delivered */
326
                    env->old_exception = -1;
327
#elif defined(TARGET_PPC)
328
                    do_interrupt(env);
329
#elif defined(TARGET_LM32)
330
                    do_interrupt(env);
331
#elif defined(TARGET_MICROBLAZE)
332
                    do_interrupt(env);
333
#elif defined(TARGET_MIPS)
334
                    do_interrupt(env);
335
#elif defined(TARGET_SPARC)
336
                    do_interrupt(env);
337
#elif defined(TARGET_ARM)
338
                    do_interrupt(env);
339
#elif defined(TARGET_UNICORE32)
340
                    do_interrupt(env);
341
#elif defined(TARGET_SH4)
342
                    do_interrupt(env);
343
#elif defined(TARGET_ALPHA)
344
                    do_interrupt(env);
345
#elif defined(TARGET_CRIS)
346
                    do_interrupt(env);
347
#elif defined(TARGET_M68K)
348
                    do_interrupt(0);
349
#elif defined(TARGET_S390X)
350
                    do_interrupt(env);
351
#endif
352
                    env->exception_index = -1;
353
#endif
354
                }
355
            }
356

    
357
            next_tb = 0; /* force lookup of first TB */
358
            for(;;) {
359
                interrupt_request = env->interrupt_request;
360
                if (unlikely(interrupt_request)) {
361
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
362
                        /* Mask out external interrupts for this step. */
363
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
364
                    }
365
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
366
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
367
                        env->exception_index = EXCP_DEBUG;
368
                        cpu_loop_exit();
369
                    }
370
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
371
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
372
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
373
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
374
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
375
                        env->halted = 1;
376
                        env->exception_index = EXCP_HLT;
377
                        cpu_loop_exit();
378
                    }
379
#endif
380
#if defined(TARGET_I386)
381
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
382
                            svm_check_intercept(SVM_EXIT_INIT);
383
                            do_cpu_init(env);
384
                            env->exception_index = EXCP_HALTED;
385
                            cpu_loop_exit();
386
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
387
                            do_cpu_sipi(env);
388
                    } else if (env->hflags2 & HF2_GIF_MASK) {
389
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
390
                            !(env->hflags & HF_SMM_MASK)) {
391
                            svm_check_intercept(SVM_EXIT_SMI);
392
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
393
                            do_smm_enter();
394
                            next_tb = 0;
395
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
396
                                   !(env->hflags2 & HF2_NMI_MASK)) {
397
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
398
                            env->hflags2 |= HF2_NMI_MASK;
399
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
400
                            next_tb = 0;
401
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
402
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
403
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
404
                            next_tb = 0;
405
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
406
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
407
                                     (env->hflags2 & HF2_HIF_MASK)) ||
408
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
409
                                     (env->eflags & IF_MASK && 
410
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
411
                            int intno;
412
                            svm_check_intercept(SVM_EXIT_INTR);
413
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
414
                            intno = cpu_get_pic_interrupt(env);
415
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
416
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
417
#undef env
418
                    env = cpu_single_env;
419
#define env cpu_single_env
420
#endif
421
                            do_interrupt(intno, 0, 0, 0, 1);
422
                            /* ensure that no TB jump will be modified as
423
                               the program flow was changed */
424
                            next_tb = 0;
425
#if !defined(CONFIG_USER_ONLY)
426
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
427
                                   (env->eflags & IF_MASK) && 
428
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429
                            int intno;
430
                            /* FIXME: this should respect TPR */
431
                            svm_check_intercept(SVM_EXIT_VINTR);
432
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
434
                            do_interrupt(intno, 0, 0, 0, 1);
435
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
436
                            next_tb = 0;
437
#endif
438
                        }
439
                    }
440
#elif defined(TARGET_PPC)
441
#if 0
442
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
443
                        cpu_reset(env);
444
                    }
445
#endif
446
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
447
                        ppc_hw_interrupt(env);
448
                        if (env->pending_interrupts == 0)
449
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
450
                        next_tb = 0;
451
                    }
452
#elif defined(TARGET_LM32)
453
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
454
                        && (env->ie & IE_IE)) {
455
                        env->exception_index = EXCP_IRQ;
456
                        do_interrupt(env);
457
                        next_tb = 0;
458
                    }
459
#elif defined(TARGET_MICROBLAZE)
460
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
461
                        && (env->sregs[SR_MSR] & MSR_IE)
462
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
463
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
464
                        env->exception_index = EXCP_IRQ;
465
                        do_interrupt(env);
466
                        next_tb = 0;
467
                    }
468
#elif defined(TARGET_MIPS)
469
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470
                        cpu_mips_hw_interrupts_pending(env)) {
471
                        /* Raise it */
472
                        env->exception_index = EXCP_EXT_INTERRUPT;
473
                        env->error_code = 0;
474
                        do_interrupt(env);
475
                        next_tb = 0;
476
                    }
477
#elif defined(TARGET_SPARC)
478
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
479
                        if (cpu_interrupts_enabled(env) &&
480
                            env->interrupt_index > 0) {
481
                            int pil = env->interrupt_index & 0xf;
482
                            int type = env->interrupt_index & 0xf0;
483

    
484
                            if (((type == TT_EXTINT) &&
485
                                  cpu_pil_allowed(env, pil)) ||
486
                                  type != TT_EXTINT) {
487
                                env->exception_index = env->interrupt_index;
488
                                do_interrupt(env);
489
                                next_tb = 0;
490
                            }
491
                        }
492
                    }
493
#elif defined(TARGET_ARM)
494
                    if (interrupt_request & CPU_INTERRUPT_FIQ
495
                        && !(env->uncached_cpsr & CPSR_F)) {
496
                        env->exception_index = EXCP_FIQ;
497
                        do_interrupt(env);
498
                        next_tb = 0;
499
                    }
500
                    /* ARMv7-M interrupt return works by loading a magic value
501
                       into the PC.  On real hardware the load causes the
502
                       return to occur.  The qemu implementation performs the
503
                       jump normally, then does the exception return when the
504
                       CPU tries to execute code at the magic address.
505
                       This will cause the magic PC value to be pushed to
506
                       the stack if an interrupt occurred at the wrong time.
507
                       We avoid this by disabling interrupts when
508
                       pc contains a magic address.  */
509
                    if (interrupt_request & CPU_INTERRUPT_HARD
510
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
511
                            || !(env->uncached_cpsr & CPSR_I))) {
512
                        env->exception_index = EXCP_IRQ;
513
                        do_interrupt(env);
514
                        next_tb = 0;
515
                    }
516
#elif defined(TARGET_UNICORE32)
517
                    if (interrupt_request & CPU_INTERRUPT_HARD
518
                        && !(env->uncached_asr & ASR_I)) {
519
                        do_interrupt(env);
520
                        next_tb = 0;
521
                    }
522
#elif defined(TARGET_SH4)
523
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
524
                        do_interrupt(env);
525
                        next_tb = 0;
526
                    }
527
#elif defined(TARGET_ALPHA)
528
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
529
                        do_interrupt(env);
530
                        next_tb = 0;
531
                    }
532
#elif defined(TARGET_CRIS)
533
                    if (interrupt_request & CPU_INTERRUPT_HARD
534
                        && (env->pregs[PR_CCS] & I_FLAG)
535
                        && !env->locked_irq) {
536
                        env->exception_index = EXCP_IRQ;
537
                        do_interrupt(env);
538
                        next_tb = 0;
539
                    }
540
                    if (interrupt_request & CPU_INTERRUPT_NMI
541
                        && (env->pregs[PR_CCS] & M_FLAG)) {
542
                        env->exception_index = EXCP_NMI;
543
                        do_interrupt(env);
544
                        next_tb = 0;
545
                    }
546
#elif defined(TARGET_M68K)
547
                    if (interrupt_request & CPU_INTERRUPT_HARD
548
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
549
                            < env->pending_level) {
550
                        /* Real hardware gets the interrupt vector via an
551
                           IACK cycle at this point.  Current emulated
552
                           hardware doesn't rely on this, so we
553
                           provide/save the vector when the interrupt is
554
                           first signalled.  */
555
                        env->exception_index = env->pending_vector;
556
                        do_interrupt(1);
557
                        next_tb = 0;
558
                    }
559
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
560
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
561
                        (env->psw.mask & PSW_MASK_EXT)) {
562
                        do_interrupt(env);
563
                        next_tb = 0;
564
                    }
565
#endif
566
                   /* Don't use the cached interrupt_request value,
567
                      do_interrupt may have updated the EXITTB flag. */
568
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
569
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
570
                        /* ensure that no TB jump will be modified as
571
                           the program flow was changed */
572
                        next_tb = 0;
573
                    }
574
                }
575
                if (unlikely(env->exit_request)) {
576
                    env->exit_request = 0;
577
                    env->exception_index = EXCP_INTERRUPT;
578
                    cpu_loop_exit();
579
                }
580
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
581
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
582
                    /* restore flags in standard format */
583
#if defined(TARGET_I386)
584
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
585
                    log_cpu_state(env, X86_DUMP_CCOP);
586
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
587
#elif defined(TARGET_M68K)
588
                    cpu_m68k_flush_flags(env, env->cc_op);
589
                    env->cc_op = CC_OP_FLAGS;
590
                    env->sr = (env->sr & 0xffe0)
591
                              | env->cc_dest | (env->cc_x << 4);
592
                    log_cpu_state(env, 0);
593
#else
594
                    log_cpu_state(env, 0);
595
#endif
596
                }
597
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
598
                spin_lock(&tb_lock);
599
                tb = tb_find_fast();
600
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
601
                   doing it in tb_find_slow */
602
                if (tb_invalidated_flag) {
603
                    /* as some TB could have been invalidated because
604
                       of memory exceptions while generating the code, we
605
                       must recompute the hash index here */
606
                    next_tb = 0;
607
                    tb_invalidated_flag = 0;
608
                }
609
#ifdef CONFIG_DEBUG_EXEC
610
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
611
                             (long)tb->tc_ptr, tb->pc,
612
                             lookup_symbol(tb->pc));
613
#endif
614
                /* see if we can patch the calling TB. When the TB
615
                   spans two pages, we cannot safely do a direct
616
                   jump. */
617
                if (next_tb != 0 && tb->page_addr[1] == -1) {
618
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
619
                }
620
                spin_unlock(&tb_lock);
621

    
622
                /* cpu_interrupt might be called while translating the
623
                   TB, but before it is linked into a potentially
624
                   infinite loop and becomes env->current_tb. Avoid
625
                   starting execution if there is a pending interrupt. */
626
                env->current_tb = tb;
627
                barrier();
628
                if (likely(!env->exit_request)) {
629
                    tc_ptr = tb->tc_ptr;
630
                /* execute the generated code */
631
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
632
#undef env
633
                    env = cpu_single_env;
634
#define env cpu_single_env
635
#endif
636
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
637
                    if ((next_tb & 3) == 2) {
638
                        /* Instruction counter expired.  */
639
                        int insns_left;
640
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
641
                        /* Restore PC.  */
642
                        cpu_pc_from_tb(env, tb);
643
                        insns_left = env->icount_decr.u32;
644
                        if (env->icount_extra && insns_left >= 0) {
645
                            /* Refill decrementer and continue execution.  */
646
                            env->icount_extra += insns_left;
647
                            if (env->icount_extra > 0xffff) {
648
                                insns_left = 0xffff;
649
                            } else {
650
                                insns_left = env->icount_extra;
651
                            }
652
                            env->icount_extra -= insns_left;
653
                            env->icount_decr.u16.low = insns_left;
654
                        } else {
655
                            if (insns_left > 0) {
656
                                /* Execute remaining instructions.  */
657
                                cpu_exec_nocache(insns_left, tb);
658
                            }
659
                            env->exception_index = EXCP_INTERRUPT;
660
                            next_tb = 0;
661
                            cpu_loop_exit();
662
                        }
663
                    }
664
                }
665
                env->current_tb = NULL;
666
                /* reset soft MMU for next block (it can currently
667
                   only be set by a memory fault) */
668
            } /* for(;;) */
669
        }
670
    } /* for(;;) */
671

    
672

    
673
#if defined(TARGET_I386)
674
    /* restore flags in standard format */
675
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
676
#elif defined(TARGET_ARM)
677
    /* XXX: Save/restore host fpu exception state?.  */
678
#elif defined(TARGET_UNICORE32)
679
#elif defined(TARGET_SPARC)
680
#elif defined(TARGET_PPC)
681
#elif defined(TARGET_LM32)
682
#elif defined(TARGET_M68K)
683
    cpu_m68k_flush_flags(env, env->cc_op);
684
    env->cc_op = CC_OP_FLAGS;
685
    env->sr = (env->sr & 0xffe0)
686
              | env->cc_dest | (env->cc_x << 4);
687
#elif defined(TARGET_MICROBLAZE)
688
#elif defined(TARGET_MIPS)
689
#elif defined(TARGET_SH4)
690
#elif defined(TARGET_ALPHA)
691
#elif defined(TARGET_CRIS)
692
#elif defined(TARGET_S390X)
693
    /* XXXXX */
694
#else
695
#error unsupported target CPU
696
#endif
697

    
698
    /* restore global registers */
699
    barrier();
700
    env = (void *) saved_env_reg;
701

    
702
    /* fail safe : never use cpu_single_env outside cpu_exec() */
703
    cpu_single_env = NULL;
704
    return ret;
705
}
706

    
707
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
708

    
709
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
710
{
711
    CPUX86State *saved_env;
712

    
713
    saved_env = env;
714
    env = s;
715
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
716
        selector &= 0xffff;
717
        cpu_x86_load_seg_cache(env, seg_reg, selector,
718
                               (selector << 4), 0xffff, 0);
719
    } else {
720
        helper_load_seg(seg_reg, selector);
721
    }
722
    env = saved_env;
723
}
724

    
725
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
726
{
727
    CPUX86State *saved_env;
728

    
729
    saved_env = env;
730
    env = s;
731

    
732
    helper_fsave(ptr, data32);
733

    
734
    env = saved_env;
735
}
736

    
737
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
738
{
739
    CPUX86State *saved_env;
740

    
741
    saved_env = env;
742
    env = s;
743

    
744
    helper_frstor(ptr, data32);
745

    
746
    env = saved_env;
747
}
748

    
749
#endif /* TARGET_I386 */
750

    
751
#if !defined(CONFIG_SOFTMMU)
752

    
753
#if defined(TARGET_I386)
754
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
755
#else
756
#define EXCEPTION_ACTION cpu_loop_exit()
757
#endif
758

    
759
/* 'pc' is the host PC at which the exception was raised. 'address' is
760
   the effective address of the memory exception. 'is_write' is 1 if a
761
   write caused the exception and otherwise 0'. 'old_set' is the
762
   signal set which should be restored */
763
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
764
                                    int is_write, sigset_t *old_set,
765
                                    void *puc)
766
{
767
    TranslationBlock *tb;
768
    int ret;
769

    
770
    if (cpu_single_env)
771
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
772
#if defined(DEBUG_SIGNAL)
773
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
774
                pc, address, is_write, *(unsigned long *)old_set);
775
#endif
776
    /* XXX: locking issue */
777
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
778
        return 1;
779
    }
780

    
781
    /* see if it is an MMU fault */
782
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
783
    if (ret < 0)
784
        return 0; /* not an MMU fault */
785
    if (ret == 0)
786
        return 1; /* the MMU fault was handled without causing real CPU fault */
787
    /* now we have a real cpu fault */
788
    tb = tb_find_pc(pc);
789
    if (tb) {
790
        /* the PC is inside the translated code. It means that we have
791
           a virtual CPU fault */
792
        cpu_restore_state(tb, env, pc);
793
    }
794

    
795
    /* we restore the process signal mask as the sigreturn should
796
       do it (XXX: use sigsetjmp) */
797
    sigprocmask(SIG_SETMASK, old_set, NULL);
798
    EXCEPTION_ACTION;
799

    
800
    /* never comes here */
801
    return 1;
802
}
803

    
804
#if defined(__i386__)
805

    
806
#if defined(__APPLE__)
807
# include <sys/ucontext.h>
808

    
809
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
810
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
811
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
812
# define MASK_sig(context)    ((context)->uc_sigmask)
813
#elif defined (__NetBSD__)
814
# include <ucontext.h>
815

    
816
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
817
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
818
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
819
# define MASK_sig(context)    ((context)->uc_sigmask)
820
#elif defined (__FreeBSD__) || defined(__DragonFly__)
821
# include <ucontext.h>
822

    
823
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
824
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
825
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
826
# define MASK_sig(context)    ((context)->uc_sigmask)
827
#elif defined(__OpenBSD__)
828
# define EIP_sig(context)     ((context)->sc_eip)
829
# define TRAP_sig(context)    ((context)->sc_trapno)
830
# define ERROR_sig(context)   ((context)->sc_err)
831
# define MASK_sig(context)    ((context)->sc_mask)
832
#else
833
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
834
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
835
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
836
# define MASK_sig(context)    ((context)->uc_sigmask)
837
#endif
838

    
839
int cpu_signal_handler(int host_signum, void *pinfo,
840
                       void *puc)
841
{
842
    siginfo_t *info = pinfo;
843
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
844
    ucontext_t *uc = puc;
845
#elif defined(__OpenBSD__)
846
    struct sigcontext *uc = puc;
847
#else
848
    struct ucontext *uc = puc;
849
#endif
850
    unsigned long pc;
851
    int trapno;
852

    
853
#ifndef REG_EIP
854
/* for glibc 2.1 */
855
#define REG_EIP    EIP
856
#define REG_ERR    ERR
857
#define REG_TRAPNO TRAPNO
858
#endif
859
    pc = EIP_sig(uc);
860
    trapno = TRAP_sig(uc);
861
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
862
                             trapno == 0xe ?
863
                             (ERROR_sig(uc) >> 1) & 1 : 0,
864
                             &MASK_sig(uc), puc);
865
}
866

    
867
#elif defined(__x86_64__)
868

    
869
#ifdef __NetBSD__
870
#define PC_sig(context)       _UC_MACHINE_PC(context)
871
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
872
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
873
#define MASK_sig(context)     ((context)->uc_sigmask)
874
#elif defined(__OpenBSD__)
875
#define PC_sig(context)       ((context)->sc_rip)
876
#define TRAP_sig(context)     ((context)->sc_trapno)
877
#define ERROR_sig(context)    ((context)->sc_err)
878
#define MASK_sig(context)     ((context)->sc_mask)
879
#elif defined (__FreeBSD__) || defined(__DragonFly__)
880
#include <ucontext.h>
881

    
882
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
883
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
884
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
885
#define MASK_sig(context)     ((context)->uc_sigmask)
886
#else
887
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
888
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
889
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
890
#define MASK_sig(context)     ((context)->uc_sigmask)
891
#endif
892

    
893
int cpu_signal_handler(int host_signum, void *pinfo,
894
                       void *puc)
895
{
896
    siginfo_t *info = pinfo;
897
    unsigned long pc;
898
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
899
    ucontext_t *uc = puc;
900
#elif defined(__OpenBSD__)
901
    struct sigcontext *uc = puc;
902
#else
903
    struct ucontext *uc = puc;
904
#endif
905

    
906
    pc = PC_sig(uc);
907
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
908
                             TRAP_sig(uc) == 0xe ?
909
                             (ERROR_sig(uc) >> 1) & 1 : 0,
910
                             &MASK_sig(uc), puc);
911
}
912

    
913
#elif defined(_ARCH_PPC)
914

    
915
/***********************************************************************
916
 * signal context platform-specific definitions
917
 * From Wine
918
 */
919
#ifdef linux
920
/* All Registers access - only for local access */
921
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
922
/* Gpr Registers access  */
923
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
924
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
925
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
926
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
927
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
928
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
929
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
930
/* Float Registers access  */
931
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
932
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
933
/* Exception Registers access */
934
# define DAR_sig(context)                        REG_sig(dar, context)
935
# define DSISR_sig(context)                        REG_sig(dsisr, context)
936
# define TRAP_sig(context)                        REG_sig(trap, context)
937
#endif /* linux */
938

    
939
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
940
#include <ucontext.h>
941
# define IAR_sig(context)                ((context)->uc_mcontext.mc_srr0)
942
# define MSR_sig(context)                ((context)->uc_mcontext.mc_srr1)
943
# define CTR_sig(context)                ((context)->uc_mcontext.mc_ctr)
944
# define XER_sig(context)                ((context)->uc_mcontext.mc_xer)
945
# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
946
# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
947
/* Exception Registers access */
948
# define DAR_sig(context)                ((context)->uc_mcontext.mc_dar)
949
# define DSISR_sig(context)                ((context)->uc_mcontext.mc_dsisr)
950
# define TRAP_sig(context)                ((context)->uc_mcontext.mc_exc)
951
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
952

    
953
#ifdef __APPLE__
954
# include <sys/ucontext.h>
955
typedef struct ucontext SIGCONTEXT;
956
/* All Registers access - only for local access */
957
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
958
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
959
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
960
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
961
/* Gpr Registers access */
962
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
963
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
964
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
965
# define CTR_sig(context)                        REG_sig(ctr, context)
966
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
967
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
968
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
969
/* Float Registers access */
970
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
971
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
972
/* Exception Registers access */
973
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
974
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
975
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
976
#endif /* __APPLE__ */
977

    
978
int cpu_signal_handler(int host_signum, void *pinfo,
979
                       void *puc)
980
{
981
    siginfo_t *info = pinfo;
982
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
983
    ucontext_t *uc = puc;
984
#else
985
    struct ucontext *uc = puc;
986
#endif
987
    unsigned long pc;
988
    int is_write;
989

    
990
    pc = IAR_sig(uc);
991
    is_write = 0;
992
#if 0
993
    /* ppc 4xx case */
994
    if (DSISR_sig(uc) & 0x00800000)
995
        is_write = 1;
996
#else
997
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
998
        is_write = 1;
999
#endif
1000
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1001
                             is_write, &uc->uc_sigmask, puc);
1002
}
1003

    
1004
#elif defined(__alpha__)
1005

    
1006
int cpu_signal_handler(int host_signum, void *pinfo,
1007
                           void *puc)
1008
{
1009
    siginfo_t *info = pinfo;
1010
    struct ucontext *uc = puc;
1011
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1012
    uint32_t insn = *pc;
1013
    int is_write = 0;
1014

    
1015
    /* XXX: need kernel patch to get write flag faster */
1016
    switch (insn >> 26) {
1017
    case 0x0d: // stw
1018
    case 0x0e: // stb
1019
    case 0x0f: // stq_u
1020
    case 0x24: // stf
1021
    case 0x25: // stg
1022
    case 0x26: // sts
1023
    case 0x27: // stt
1024
    case 0x2c: // stl
1025
    case 0x2d: // stq
1026
    case 0x2e: // stl_c
1027
    case 0x2f: // stq_c
1028
        is_write = 1;
1029
    }
1030

    
1031
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1032
                             is_write, &uc->uc_sigmask, puc);
1033
}
1034
#elif defined(__sparc__)
1035

    
1036
int cpu_signal_handler(int host_signum, void *pinfo,
1037
                       void *puc)
1038
{
1039
    siginfo_t *info = pinfo;
1040
    int is_write;
1041
    uint32_t insn;
1042
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1043
    uint32_t *regs = (uint32_t *)(info + 1);
1044
    void *sigmask = (regs + 20);
1045
    /* XXX: is there a standard glibc define ? */
1046
    unsigned long pc = regs[1];
1047
#else
1048
#ifdef __linux__
1049
    struct sigcontext *sc = puc;
1050
    unsigned long pc = sc->sigc_regs.tpc;
1051
    void *sigmask = (void *)sc->sigc_mask;
1052
#elif defined(__OpenBSD__)
1053
    struct sigcontext *uc = puc;
1054
    unsigned long pc = uc->sc_pc;
1055
    void *sigmask = (void *)(long)uc->sc_mask;
1056
#endif
1057
#endif
1058

    
1059
    /* XXX: need kernel patch to get write flag faster */
1060
    is_write = 0;
1061
    insn = *(uint32_t *)pc;
1062
    if ((insn >> 30) == 3) {
1063
      switch((insn >> 19) & 0x3f) {
1064
      case 0x05: // stb
1065
      case 0x15: // stba
1066
      case 0x06: // sth
1067
      case 0x16: // stha
1068
      case 0x04: // st
1069
      case 0x14: // sta
1070
      case 0x07: // std
1071
      case 0x17: // stda
1072
      case 0x0e: // stx
1073
      case 0x1e: // stxa
1074
      case 0x24: // stf
1075
      case 0x34: // stfa
1076
      case 0x27: // stdf
1077
      case 0x37: // stdfa
1078
      case 0x26: // stqf
1079
      case 0x36: // stqfa
1080
      case 0x25: // stfsr
1081
      case 0x3c: // casa
1082
      case 0x3e: // casxa
1083
        is_write = 1;
1084
        break;
1085
      }
1086
    }
1087
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1088
                             is_write, sigmask, NULL);
1089
}
1090

    
1091
#elif defined(__arm__)
1092

    
1093
int cpu_signal_handler(int host_signum, void *pinfo,
1094
                       void *puc)
1095
{
1096
    siginfo_t *info = pinfo;
1097
    struct ucontext *uc = puc;
1098
    unsigned long pc;
1099
    int is_write;
1100

    
1101
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1102
    pc = uc->uc_mcontext.gregs[R15];
1103
#else
1104
    pc = uc->uc_mcontext.arm_pc;
1105
#endif
1106
    /* XXX: compute is_write */
1107
    is_write = 0;
1108
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1109
                             is_write,
1110
                             &uc->uc_sigmask, puc);
1111
}
1112

    
1113
#elif defined(__mc68000)
1114

    
1115
int cpu_signal_handler(int host_signum, void *pinfo,
1116
                       void *puc)
1117
{
1118
    siginfo_t *info = pinfo;
1119
    struct ucontext *uc = puc;
1120
    unsigned long pc;
1121
    int is_write;
1122

    
1123
    pc = uc->uc_mcontext.gregs[16];
1124
    /* XXX: compute is_write */
1125
    is_write = 0;
1126
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1127
                             is_write,
1128
                             &uc->uc_sigmask, puc);
1129
}
1130

    
1131
#elif defined(__ia64)
1132

    
1133
#ifndef __ISR_VALID
1134
  /* This ought to be in <bits/siginfo.h>... */
1135
# define __ISR_VALID        1
1136
#endif
1137

    
1138
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1139
{
1140
    siginfo_t *info = pinfo;
1141
    struct ucontext *uc = puc;
1142
    unsigned long ip;
1143
    int is_write = 0;
1144

    
1145
    ip = uc->uc_mcontext.sc_ip;
1146
    switch (host_signum) {
1147
      case SIGILL:
1148
      case SIGFPE:
1149
      case SIGSEGV:
1150
      case SIGBUS:
1151
      case SIGTRAP:
1152
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1153
              /* ISR.W (write-access) is bit 33:  */
1154
              is_write = (info->si_isr >> 33) & 1;
1155
          break;
1156

    
1157
      default:
1158
          break;
1159
    }
1160
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1161
                             is_write,
1162
                             (sigset_t *)&uc->uc_sigmask, puc);
1163
}
1164

    
1165
#elif defined(__s390__)
1166

    
1167
int cpu_signal_handler(int host_signum, void *pinfo,
1168
                       void *puc)
1169
{
1170
    siginfo_t *info = pinfo;
1171
    struct ucontext *uc = puc;
1172
    unsigned long pc;
1173
    uint16_t *pinsn;
1174
    int is_write = 0;
1175

    
1176
    pc = uc->uc_mcontext.psw.addr;
1177

    
1178
    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1179
       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1180
       from the hardware which does in fact contain the is_write value.
1181
       The rt signal handler, as far as I can tell, does not give this value
1182
       at all.  Not that we could get to it from here even if it were.  */
1183
    /* ??? This is not even close to complete, since it ignores all
1184
       of the read-modify-write instructions.  */
1185
    pinsn = (uint16_t *)pc;
1186
    switch (pinsn[0] >> 8) {
1187
    case 0x50: /* ST */
1188
    case 0x42: /* STC */
1189
    case 0x40: /* STH */
1190
        is_write = 1;
1191
        break;
1192
    case 0xc4: /* RIL format insns */
1193
        switch (pinsn[0] & 0xf) {
1194
        case 0xf: /* STRL */
1195
        case 0xb: /* STGRL */
1196
        case 0x7: /* STHRL */
1197
            is_write = 1;
1198
        }
1199
        break;
1200
    case 0xe3: /* RXY format insns */
1201
        switch (pinsn[2] & 0xff) {
1202
        case 0x50: /* STY */
1203
        case 0x24: /* STG */
1204
        case 0x72: /* STCY */
1205
        case 0x70: /* STHY */
1206
        case 0x8e: /* STPQ */
1207
        case 0x3f: /* STRVH */
1208
        case 0x3e: /* STRV */
1209
        case 0x2f: /* STRVG */
1210
            is_write = 1;
1211
        }
1212
        break;
1213
    }
1214
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1215
                             is_write, &uc->uc_sigmask, puc);
1216
}
1217

    
1218
#elif defined(__mips__)
1219

    
1220
int cpu_signal_handler(int host_signum, void *pinfo,
1221
                       void *puc)
1222
{
1223
    siginfo_t *info = pinfo;
1224
    struct ucontext *uc = puc;
1225
    greg_t pc = uc->uc_mcontext.pc;
1226
    int is_write;
1227

    
1228
    /* XXX: compute is_write */
1229
    is_write = 0;
1230
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1231
                             is_write, &uc->uc_sigmask, puc);
1232
}
1233

    
1234
#elif defined(__hppa__)
1235

    
1236
int cpu_signal_handler(int host_signum, void *pinfo,
1237
                       void *puc)
1238
{
1239
    struct siginfo *info = pinfo;
1240
    struct ucontext *uc = puc;
1241
    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1242
    uint32_t insn = *(uint32_t *)pc;
1243
    int is_write = 0;
1244

    
1245
    /* XXX: need kernel patch to get write flag faster.  */
1246
    switch (insn >> 26) {
1247
    case 0x1a: /* STW */
1248
    case 0x19: /* STH */
1249
    case 0x18: /* STB */
1250
    case 0x1b: /* STWM */
1251
        is_write = 1;
1252
        break;
1253

    
1254
    case 0x09: /* CSTWX, FSTWX, FSTWS */
1255
    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1256
        /* Distinguish from coprocessor load ... */
1257
        is_write = (insn >> 9) & 1;
1258
        break;
1259

    
1260
    case 0x03:
1261
        switch ((insn >> 6) & 15) {
1262
        case 0xa: /* STWS */
1263
        case 0x9: /* STHS */
1264
        case 0x8: /* STBS */
1265
        case 0xe: /* STWAS */
1266
        case 0xc: /* STBYS */
1267
            is_write = 1;
1268
        }
1269
        break;
1270
    }
1271

    
1272
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1273
                             is_write, &uc->uc_sigmask, puc);
1274
}
1275

    
1276
#else
1277

    
1278
#error host CPU specific signal handler needed
1279

    
1280
#endif
1281

    
1282
#endif /* !defined(CONFIG_SOFTMMU) */