Statistics
| Branch: | Revision:

root / cpu-exec.c @ 1f56e32a

History | View | Annotate | Download (44.5 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24
#include "qemu-barrier.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define CONFIG_DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
int qemu_cpu_has_work(CPUState *env)
54
{
55
    return cpu_has_work(env);
56
}
57

    
58
void cpu_loop_exit(void)
59
{
60
    env->current_tb = NULL;
61
    longjmp(env->jmp_env, 1);
62
}
63

    
64
/* exit the current TB from a signal handler. The host registers are
65
   restored in a state compatible with the CPU emulator
66
 */
67
void cpu_resume_from_signal(CPUState *env1, void *puc)
68
{
69
#if !defined(CONFIG_SOFTMMU)
70
#ifdef __linux__
71
    struct ucontext *uc = puc;
72
#elif defined(__OpenBSD__)
73
    struct sigcontext *uc = puc;
74
#endif
75
#endif
76

    
77
    env = env1;
78

    
79
    /* XXX: restore cpu registers saved in host registers */
80

    
81
#if !defined(CONFIG_SOFTMMU)
82
    if (puc) {
83
        /* XXX: use siglongjmp ? */
84
#ifdef __linux__
85
#ifdef __ia64
86
        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87
#else
88
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89
#endif
90
#elif defined(__OpenBSD__)
91
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92
#endif
93
    }
94
#endif
95
    env->exception_index = -1;
96
    longjmp(env->jmp_env, 1);
97
}
98

    
99
/* Execute the code without caching the generated code. An interpreter
100
   could be used if available. */
101
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102
{
103
    unsigned long next_tb;
104
    TranslationBlock *tb;
105

    
106
    /* Should never happen.
107
       We only end up here when an existing TB is too long.  */
108
    if (max_cycles > CF_COUNT_MASK)
109
        max_cycles = CF_COUNT_MASK;
110

    
111
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112
                     max_cycles);
113
    env->current_tb = tb;
114
    /* execute the generated code */
115
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116
    env->current_tb = NULL;
117

    
118
    if ((next_tb & 3) == 2) {
119
        /* Restore PC.  This may happen if async event occurs before
120
           the TB starts executing.  */
121
        cpu_pc_from_tb(env, tb);
122
    }
123
    tb_phys_invalidate(tb, -1);
124
    tb_free(tb);
125
}
126

    
127
static TranslationBlock *tb_find_slow(target_ulong pc,
128
                                      target_ulong cs_base,
129
                                      uint64_t flags)
130
{
131
    TranslationBlock *tb, **ptb1;
132
    unsigned int h;
133
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
134
    target_ulong virt_page2;
135

    
136
    tb_invalidated_flag = 0;
137

    
138
    /* find translated block using physical mappings */
139
    phys_pc = get_page_addr_code(env, pc);
140
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
141
    phys_page2 = -1;
142
    h = tb_phys_hash_func(phys_pc);
143
    ptb1 = &tb_phys_hash[h];
144
    for(;;) {
145
        tb = *ptb1;
146
        if (!tb)
147
            goto not_found;
148
        if (tb->pc == pc &&
149
            tb->page_addr[0] == phys_page1 &&
150
            tb->cs_base == cs_base &&
151
            tb->flags == flags) {
152
            /* check next page if needed */
153
            if (tb->page_addr[1] != -1) {
154
                virt_page2 = (pc & TARGET_PAGE_MASK) +
155
                    TARGET_PAGE_SIZE;
156
                phys_page2 = get_page_addr_code(env, virt_page2);
157
                if (tb->page_addr[1] == phys_page2)
158
                    goto found;
159
            } else {
160
                goto found;
161
            }
162
        }
163
        ptb1 = &tb->phys_hash_next;
164
    }
165
 not_found:
166
   /* if no translated code available, then translate it now */
167
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
168

    
169
 found:
170
    /* Move the last found TB to the head of the list */
171
    if (likely(*ptb1)) {
172
        *ptb1 = tb->phys_hash_next;
173
        tb->phys_hash_next = tb_phys_hash[h];
174
        tb_phys_hash[h] = tb;
175
    }
176
    /* we add the TB in the virtual pc hash table */
177
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178
    return tb;
179
}
180

    
181
static inline TranslationBlock *tb_find_fast(void)
182
{
183
    TranslationBlock *tb;
184
    target_ulong cs_base, pc;
185
    int flags;
186

    
187
    /* we record a subset of the CPU state. It will
188
       always be the same before a given translated block
189
       is executed. */
190
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193
                 tb->flags != flags)) {
194
        tb = tb_find_slow(pc, cs_base, flags);
195
    }
196
    return tb;
197
}
198

    
199
static CPUDebugExcpHandler *debug_excp_handler;
200

    
201
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
202
{
203
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
204

    
205
    debug_excp_handler = handler;
206
    return old_handler;
207
}
208

    
209
static void cpu_handle_debug_exception(CPUState *env)
210
{
211
    CPUWatchpoint *wp;
212

    
213
    if (!env->watchpoint_hit) {
214
        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
215
            wp->flags &= ~BP_WATCHPOINT_HIT;
216
        }
217
    }
218
    if (debug_excp_handler) {
219
        debug_excp_handler(env);
220
    }
221
}
222

    
223
/* main execution loop */
224

    
225
volatile sig_atomic_t exit_request;
226

    
227
int cpu_exec(CPUState *env1)
228
{
229
    volatile host_reg_t saved_env_reg;
230
    int ret, interrupt_request;
231
    TranslationBlock *tb;
232
    uint8_t *tc_ptr;
233
    unsigned long next_tb;
234

    
235
    if (env1->halted) {
236
        if (!cpu_has_work(env1)) {
237
            return EXCP_HALTED;
238
        }
239

    
240
        env1->halted = 0;
241
    }
242

    
243
    cpu_single_env = env1;
244

    
245
    /* the access to env below is actually saving the global register's
246
       value, so that files not including target-xyz/exec.h are free to
247
       use it.  */
248
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
249
    saved_env_reg = (host_reg_t) env;
250
    barrier();
251
    env = env1;
252

    
253
    if (unlikely(exit_request)) {
254
        env->exit_request = 1;
255
    }
256

    
257
#if defined(TARGET_I386)
258
    /* put eflags in CPU temporary format */
259
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
261
    CC_OP = CC_OP_EFLAGS;
262
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263
#elif defined(TARGET_SPARC)
264
#elif defined(TARGET_M68K)
265
    env->cc_op = CC_OP_FLAGS;
266
    env->cc_dest = env->sr & 0xf;
267
    env->cc_x = (env->sr >> 4) & 1;
268
#elif defined(TARGET_ALPHA)
269
#elif defined(TARGET_ARM)
270
#elif defined(TARGET_UNICORE32)
271
#elif defined(TARGET_PPC)
272
#elif defined(TARGET_LM32)
273
#elif defined(TARGET_MICROBLAZE)
274
#elif defined(TARGET_MIPS)
275
#elif defined(TARGET_SH4)
276
#elif defined(TARGET_CRIS)
277
#elif defined(TARGET_S390X)
278
    /* XXXXX */
279
#else
280
#error unsupported target CPU
281
#endif
282
    env->exception_index = -1;
283

    
284
    /* prepare setjmp context for exception handling */
285
    for(;;) {
286
        if (setjmp(env->jmp_env) == 0) {
287
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
288
#undef env
289
            env = cpu_single_env;
290
#define env cpu_single_env
291
#endif
292
            /* if an exception is pending, we execute it here */
293
            if (env->exception_index >= 0) {
294
                if (env->exception_index >= EXCP_INTERRUPT) {
295
                    /* exit request from the cpu execution loop */
296
                    ret = env->exception_index;
297
                    if (ret == EXCP_DEBUG) {
298
                        cpu_handle_debug_exception(env);
299
                    }
300
                    break;
301
                } else {
302
#if defined(CONFIG_USER_ONLY)
303
                    /* if user mode only, we simulate a fake exception
304
                       which will be handled outside the cpu execution
305
                       loop */
306
#if defined(TARGET_I386)
307
                    do_interrupt_user(env->exception_index,
308
                                      env->exception_is_int,
309
                                      env->error_code,
310
                                      env->exception_next_eip);
311
                    /* successfully delivered */
312
                    env->old_exception = -1;
313
#endif
314
                    ret = env->exception_index;
315
                    break;
316
#else
317
#if defined(TARGET_I386)
318
                    /* simulate a real cpu exception. On i386, it can
319
                       trigger new exceptions, but we do not handle
320
                       double or triple faults yet. */
321
                    do_interrupt(env->exception_index,
322
                                 env->exception_is_int,
323
                                 env->error_code,
324
                                 env->exception_next_eip, 0);
325
                    /* successfully delivered */
326
                    env->old_exception = -1;
327
#elif defined(TARGET_PPC)
328
                    do_interrupt(env);
329
#elif defined(TARGET_LM32)
330
                    do_interrupt(env);
331
#elif defined(TARGET_MICROBLAZE)
332
                    do_interrupt(env);
333
#elif defined(TARGET_MIPS)
334
                    do_interrupt(env);
335
#elif defined(TARGET_SPARC)
336
                    do_interrupt(env);
337
#elif defined(TARGET_ARM)
338
                    do_interrupt(env);
339
#elif defined(TARGET_UNICORE32)
340
                    do_interrupt(env);
341
#elif defined(TARGET_SH4)
342
                    do_interrupt(env);
343
#elif defined(TARGET_ALPHA)
344
                    do_interrupt(env);
345
#elif defined(TARGET_CRIS)
346
                    do_interrupt(env);
347
#elif defined(TARGET_M68K)
348
                    do_interrupt(0);
349
#elif defined(TARGET_S390X)
350
                    do_interrupt(env);
351
#endif
352
                    env->exception_index = -1;
353
#endif
354
                }
355
            }
356

    
357
            next_tb = 0; /* force lookup of first TB */
358
            for(;;) {
359
                interrupt_request = env->interrupt_request;
360
                if (unlikely(interrupt_request)) {
361
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
362
                        /* Mask out external interrupts for this step. */
363
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
364
                    }
365
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
366
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
367
                        env->exception_index = EXCP_DEBUG;
368
                        cpu_loop_exit();
369
                    }
370
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
371
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
372
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
373
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
374
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
375
                        env->halted = 1;
376
                        env->exception_index = EXCP_HLT;
377
                        cpu_loop_exit();
378
                    }
379
#endif
380
#if defined(TARGET_I386)
381
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
382
                            svm_check_intercept(SVM_EXIT_INIT);
383
                            do_cpu_init(env);
384
                            env->exception_index = EXCP_HALTED;
385
                            cpu_loop_exit();
386
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
387
                            do_cpu_sipi(env);
388
                    } else if (env->hflags2 & HF2_GIF_MASK) {
389
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
390
                            !(env->hflags & HF_SMM_MASK)) {
391
                            svm_check_intercept(SVM_EXIT_SMI);
392
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
393
                            do_smm_enter();
394
                            next_tb = 0;
395
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
396
                                   !(env->hflags2 & HF2_NMI_MASK)) {
397
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
398
                            env->hflags2 |= HF2_NMI_MASK;
399
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
400
                            next_tb = 0;
401
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
402
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
403
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
404
                            next_tb = 0;
405
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
406
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
407
                                     (env->hflags2 & HF2_HIF_MASK)) ||
408
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
409
                                     (env->eflags & IF_MASK && 
410
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
411
                            int intno;
412
                            svm_check_intercept(SVM_EXIT_INTR);
413
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
414
                            intno = cpu_get_pic_interrupt(env);
415
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
416
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
417
#undef env
418
                    env = cpu_single_env;
419
#define env cpu_single_env
420
#endif
421
                            do_interrupt(intno, 0, 0, 0, 1);
422
                            /* ensure that no TB jump will be modified as
423
                               the program flow was changed */
424
                            next_tb = 0;
425
#if !defined(CONFIG_USER_ONLY)
426
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
427
                                   (env->eflags & IF_MASK) && 
428
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429
                            int intno;
430
                            /* FIXME: this should respect TPR */
431
                            svm_check_intercept(SVM_EXIT_VINTR);
432
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
434
                            do_interrupt(intno, 0, 0, 0, 1);
435
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
436
                            next_tb = 0;
437
#endif
438
                        }
439
                    }
440
#elif defined(TARGET_PPC)
441
#if 0
442
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
443
                        cpu_reset(env);
444
                    }
445
#endif
446
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
447
                        ppc_hw_interrupt(env);
448
                        if (env->pending_interrupts == 0)
449
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
450
                        next_tb = 0;
451
                    }
452
#elif defined(TARGET_LM32)
453
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
454
                        && (env->ie & IE_IE)) {
455
                        env->exception_index = EXCP_IRQ;
456
                        do_interrupt(env);
457
                        next_tb = 0;
458
                    }
459
#elif defined(TARGET_MICROBLAZE)
460
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
461
                        && (env->sregs[SR_MSR] & MSR_IE)
462
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
463
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
464
                        env->exception_index = EXCP_IRQ;
465
                        do_interrupt(env);
466
                        next_tb = 0;
467
                    }
468
#elif defined(TARGET_MIPS)
469
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470
                        cpu_mips_hw_interrupts_pending(env)) {
471
                        /* Raise it */
472
                        env->exception_index = EXCP_EXT_INTERRUPT;
473
                        env->error_code = 0;
474
                        do_interrupt(env);
475
                        next_tb = 0;
476
                    }
477
#elif defined(TARGET_SPARC)
478
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
479
                        if (cpu_interrupts_enabled(env) &&
480
                            env->interrupt_index > 0) {
481
                            int pil = env->interrupt_index & 0xf;
482
                            int type = env->interrupt_index & 0xf0;
483

    
484
                            if (((type == TT_EXTINT) &&
485
                                  cpu_pil_allowed(env, pil)) ||
486
                                  type != TT_EXTINT) {
487
                                env->exception_index = env->interrupt_index;
488
                                do_interrupt(env);
489
                                next_tb = 0;
490
                            }
491
                        }
492
                    }
493
#elif defined(TARGET_ARM)
494
                    if (interrupt_request & CPU_INTERRUPT_FIQ
495
                        && !(env->uncached_cpsr & CPSR_F)) {
496
                        env->exception_index = EXCP_FIQ;
497
                        do_interrupt(env);
498
                        next_tb = 0;
499
                    }
500
                    /* ARMv7-M interrupt return works by loading a magic value
501
                       into the PC.  On real hardware the load causes the
502
                       return to occur.  The qemu implementation performs the
503
                       jump normally, then does the exception return when the
504
                       CPU tries to execute code at the magic address.
505
                       This will cause the magic PC value to be pushed to
506
                       the stack if an interrupt occurred at the wrong time.
507
                       We avoid this by disabling interrupts when
508
                       pc contains a magic address.  */
509
                    if (interrupt_request & CPU_INTERRUPT_HARD
510
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
511
                            || !(env->uncached_cpsr & CPSR_I))) {
512
                        env->exception_index = EXCP_IRQ;
513
                        do_interrupt(env);
514
                        next_tb = 0;
515
                    }
516
#elif defined(TARGET_UNICORE32)
517
                    if (interrupt_request & CPU_INTERRUPT_HARD
518
                        && !(env->uncached_asr & ASR_I)) {
519
                        do_interrupt(env);
520
                        next_tb = 0;
521
                    }
522
#elif defined(TARGET_SH4)
523
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
524
                        do_interrupt(env);
525
                        next_tb = 0;
526
                    }
527
#elif defined(TARGET_ALPHA)
528
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
529
                        do_interrupt(env);
530
                        next_tb = 0;
531
                    }
532
#elif defined(TARGET_CRIS)
533
                    if (interrupt_request & CPU_INTERRUPT_HARD
534
                        && (env->pregs[PR_CCS] & I_FLAG)
535
                        && !env->locked_irq) {
536
                        env->exception_index = EXCP_IRQ;
537
                        do_interrupt(env);
538
                        next_tb = 0;
539
                    }
540
                    if (interrupt_request & CPU_INTERRUPT_NMI
541
                        && (env->pregs[PR_CCS] & M_FLAG)) {
542
                        env->exception_index = EXCP_NMI;
543
                        do_interrupt(env);
544
                        next_tb = 0;
545
                    }
546
#elif defined(TARGET_M68K)
547
                    if (interrupt_request & CPU_INTERRUPT_HARD
548
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
549
                            < env->pending_level) {
550
                        /* Real hardware gets the interrupt vector via an
551
                           IACK cycle at this point.  Current emulated
552
                           hardware doesn't rely on this, so we
553
                           provide/save the vector when the interrupt is
554
                           first signalled.  */
555
                        env->exception_index = env->pending_vector;
556
                        do_interrupt(1);
557
                        next_tb = 0;
558
                    }
559
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
560
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
561
                        (env->psw.mask & PSW_MASK_EXT)) {
562
                        do_interrupt(env);
563
                        next_tb = 0;
564
                    }
565
#endif
566
                   /* Don't use the cached interrupt_request value,
567
                      do_interrupt may have updated the EXITTB flag. */
568
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
569
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
570
                        /* ensure that no TB jump will be modified as
571
                           the program flow was changed */
572
                        next_tb = 0;
573
                    }
574
                }
575
                if (unlikely(env->exit_request)) {
576
                    env->exit_request = 0;
577
                    env->exception_index = EXCP_INTERRUPT;
578
                    cpu_loop_exit();
579
                }
580
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
581
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
582
                    /* restore flags in standard format */
583
#if defined(TARGET_I386)
584
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
585
                    log_cpu_state(env, X86_DUMP_CCOP);
586
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
587
#elif defined(TARGET_M68K)
588
                    cpu_m68k_flush_flags(env, env->cc_op);
589
                    env->cc_op = CC_OP_FLAGS;
590
                    env->sr = (env->sr & 0xffe0)
591
                              | env->cc_dest | (env->cc_x << 4);
592
                    log_cpu_state(env, 0);
593
#else
594
                    log_cpu_state(env, 0);
595
#endif
596
                }
597
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
598
                spin_lock(&tb_lock);
599
                tb = tb_find_fast();
600
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
601
                   doing it in tb_find_slow */
602
                if (tb_invalidated_flag) {
603
                    /* as some TB could have been invalidated because
604
                       of memory exceptions while generating the code, we
605
                       must recompute the hash index here */
606
                    next_tb = 0;
607
                    tb_invalidated_flag = 0;
608
                }
609
#ifdef CONFIG_DEBUG_EXEC
610
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
611
                             (long)tb->tc_ptr, tb->pc,
612
                             lookup_symbol(tb->pc));
613
#endif
614
                /* see if we can patch the calling TB. When the TB
615
                   spans two pages, we cannot safely do a direct
616
                   jump. */
617
                if (next_tb != 0 && tb->page_addr[1] == -1) {
618
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
619
                }
620
                spin_unlock(&tb_lock);
621

    
622
                /* cpu_interrupt might be called while translating the
623
                   TB, but before it is linked into a potentially
624
                   infinite loop and becomes env->current_tb. Avoid
625
                   starting execution if there is a pending interrupt. */
626
                env->current_tb = tb;
627
                barrier();
628
                if (likely(!env->exit_request)) {
629
                    tc_ptr = tb->tc_ptr;
630
                /* execute the generated code */
631
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
632
#undef env
633
                    env = cpu_single_env;
634
#define env cpu_single_env
635
#endif
636
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
637
                    if ((next_tb & 3) == 2) {
638
                        /* Instruction counter expired.  */
639
                        int insns_left;
640
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
641
                        /* Restore PC.  */
642
                        cpu_pc_from_tb(env, tb);
643
                        insns_left = env->icount_decr.u32;
644
                        if (env->icount_extra && insns_left >= 0) {
645
                            /* Refill decrementer and continue execution.  */
646
                            env->icount_extra += insns_left;
647
                            if (env->icount_extra > 0xffff) {
648
                                insns_left = 0xffff;
649
                            } else {
650
                                insns_left = env->icount_extra;
651
                            }
652
                            env->icount_extra -= insns_left;
653
                            env->icount_decr.u16.low = insns_left;
654
                        } else {
655
                            if (insns_left > 0) {
656
                                /* Execute remaining instructions.  */
657
                                cpu_exec_nocache(insns_left, tb);
658
                            }
659
                            env->exception_index = EXCP_INTERRUPT;
660
                            next_tb = 0;
661
                            cpu_loop_exit();
662
                        }
663
                    }
664
                }
665
                env->current_tb = NULL;
666
                /* reset soft MMU for next block (it can currently
667
                   only be set by a memory fault) */
668
            } /* for(;;) */
669
        }
670
    } /* for(;;) */
671

    
672

    
673
#if defined(TARGET_I386)
674
    /* restore flags in standard format */
675
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
676
#elif defined(TARGET_ARM)
677
    /* XXX: Save/restore host fpu exception state?.  */
678
#elif defined(TARGET_UNICORE32)
679
#elif defined(TARGET_SPARC)
680
#elif defined(TARGET_PPC)
681
#elif defined(TARGET_LM32)
682
#elif defined(TARGET_M68K)
683
    cpu_m68k_flush_flags(env, env->cc_op);
684
    env->cc_op = CC_OP_FLAGS;
685
    env->sr = (env->sr & 0xffe0)
686
              | env->cc_dest | (env->cc_x << 4);
687
#elif defined(TARGET_MICROBLAZE)
688
#elif defined(TARGET_MIPS)
689
#elif defined(TARGET_SH4)
690
#elif defined(TARGET_ALPHA)
691
#elif defined(TARGET_CRIS)
692
#elif defined(TARGET_S390X)
693
    /* XXXXX */
694
#else
695
#error unsupported target CPU
696
#endif
697

    
698
    /* restore global registers */
699
    barrier();
700
    env = (void *) saved_env_reg;
701

    
702
    /* fail safe : never use cpu_single_env outside cpu_exec() */
703
    cpu_single_env = NULL;
704
    return ret;
705
}
706

    
707
/* must only be called from the generated code as an exception can be
708
   generated */
709
void tb_invalidate_page_range(target_ulong start, target_ulong end)
710
{
711
    /* XXX: cannot enable it yet because it yields to MMU exception
712
       where NIP != read address on PowerPC */
713
#if 0
714
    target_ulong phys_addr;
715
    phys_addr = get_phys_addr_code(env, start);
716
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
717
#endif
718
}
719

    
720
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
721

    
722
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
723
{
724
    CPUX86State *saved_env;
725

    
726
    saved_env = env;
727
    env = s;
728
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
729
        selector &= 0xffff;
730
        cpu_x86_load_seg_cache(env, seg_reg, selector,
731
                               (selector << 4), 0xffff, 0);
732
    } else {
733
        helper_load_seg(seg_reg, selector);
734
    }
735
    env = saved_env;
736
}
737

    
738
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
739
{
740
    CPUX86State *saved_env;
741

    
742
    saved_env = env;
743
    env = s;
744

    
745
    helper_fsave(ptr, data32);
746

    
747
    env = saved_env;
748
}
749

    
750
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
751
{
752
    CPUX86State *saved_env;
753

    
754
    saved_env = env;
755
    env = s;
756

    
757
    helper_frstor(ptr, data32);
758

    
759
    env = saved_env;
760
}
761

    
762
#endif /* TARGET_I386 */
763

    
764
#if !defined(CONFIG_SOFTMMU)
765

    
766
#if defined(TARGET_I386)
767
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
768
#else
769
#define EXCEPTION_ACTION cpu_loop_exit()
770
#endif
771

    
772
/* 'pc' is the host PC at which the exception was raised. 'address' is
773
   the effective address of the memory exception. 'is_write' is 1 if a
774
   write caused the exception and otherwise 0'. 'old_set' is the
775
   signal set which should be restored */
776
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
777
                                    int is_write, sigset_t *old_set,
778
                                    void *puc)
779
{
780
    TranslationBlock *tb;
781
    int ret;
782

    
783
    if (cpu_single_env)
784
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
785
#if defined(DEBUG_SIGNAL)
786
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
787
                pc, address, is_write, *(unsigned long *)old_set);
788
#endif
789
    /* XXX: locking issue */
790
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
791
        return 1;
792
    }
793

    
794
    /* see if it is an MMU fault */
795
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
796
    if (ret < 0)
797
        return 0; /* not an MMU fault */
798
    if (ret == 0)
799
        return 1; /* the MMU fault was handled without causing real CPU fault */
800
    /* now we have a real cpu fault */
801
    tb = tb_find_pc(pc);
802
    if (tb) {
803
        /* the PC is inside the translated code. It means that we have
804
           a virtual CPU fault */
805
        cpu_restore_state(tb, env, pc);
806
    }
807

    
808
    /* we restore the process signal mask as the sigreturn should
809
       do it (XXX: use sigsetjmp) */
810
    sigprocmask(SIG_SETMASK, old_set, NULL);
811
    EXCEPTION_ACTION;
812

    
813
    /* never comes here */
814
    return 1;
815
}
816

    
817
#if defined(__i386__)
818

    
819
#if defined(__APPLE__)
820
# include <sys/ucontext.h>
821

    
822
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
823
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
824
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
825
# define MASK_sig(context)    ((context)->uc_sigmask)
826
#elif defined (__NetBSD__)
827
# include <ucontext.h>
828

    
829
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
830
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
831
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
832
# define MASK_sig(context)    ((context)->uc_sigmask)
833
#elif defined (__FreeBSD__) || defined(__DragonFly__)
834
# include <ucontext.h>
835

    
836
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
837
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
838
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
839
# define MASK_sig(context)    ((context)->uc_sigmask)
840
#elif defined(__OpenBSD__)
841
# define EIP_sig(context)     ((context)->sc_eip)
842
# define TRAP_sig(context)    ((context)->sc_trapno)
843
# define ERROR_sig(context)   ((context)->sc_err)
844
# define MASK_sig(context)    ((context)->sc_mask)
845
#else
846
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
847
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
848
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
849
# define MASK_sig(context)    ((context)->uc_sigmask)
850
#endif
851

    
852
int cpu_signal_handler(int host_signum, void *pinfo,
853
                       void *puc)
854
{
855
    siginfo_t *info = pinfo;
856
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
857
    ucontext_t *uc = puc;
858
#elif defined(__OpenBSD__)
859
    struct sigcontext *uc = puc;
860
#else
861
    struct ucontext *uc = puc;
862
#endif
863
    unsigned long pc;
864
    int trapno;
865

    
866
#ifndef REG_EIP
867
/* for glibc 2.1 */
868
#define REG_EIP    EIP
869
#define REG_ERR    ERR
870
#define REG_TRAPNO TRAPNO
871
#endif
872
    pc = EIP_sig(uc);
873
    trapno = TRAP_sig(uc);
874
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
875
                             trapno == 0xe ?
876
                             (ERROR_sig(uc) >> 1) & 1 : 0,
877
                             &MASK_sig(uc), puc);
878
}
879

    
880
#elif defined(__x86_64__)
881

    
882
#ifdef __NetBSD__
883
#define PC_sig(context)       _UC_MACHINE_PC(context)
884
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
885
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
886
#define MASK_sig(context)     ((context)->uc_sigmask)
887
#elif defined(__OpenBSD__)
888
#define PC_sig(context)       ((context)->sc_rip)
889
#define TRAP_sig(context)     ((context)->sc_trapno)
890
#define ERROR_sig(context)    ((context)->sc_err)
891
#define MASK_sig(context)     ((context)->sc_mask)
892
#elif defined (__FreeBSD__) || defined(__DragonFly__)
893
#include <ucontext.h>
894

    
895
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
896
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
897
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
898
#define MASK_sig(context)     ((context)->uc_sigmask)
899
#else
900
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
901
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
902
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
903
#define MASK_sig(context)     ((context)->uc_sigmask)
904
#endif
905

    
906
int cpu_signal_handler(int host_signum, void *pinfo,
907
                       void *puc)
908
{
909
    siginfo_t *info = pinfo;
910
    unsigned long pc;
911
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
912
    ucontext_t *uc = puc;
913
#elif defined(__OpenBSD__)
914
    struct sigcontext *uc = puc;
915
#else
916
    struct ucontext *uc = puc;
917
#endif
918

    
919
    pc = PC_sig(uc);
920
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
921
                             TRAP_sig(uc) == 0xe ?
922
                             (ERROR_sig(uc) >> 1) & 1 : 0,
923
                             &MASK_sig(uc), puc);
924
}
925

    
926
#elif defined(_ARCH_PPC)
927

    
928
/***********************************************************************
929
 * signal context platform-specific definitions
930
 * From Wine
931
 */
932
#ifdef linux
933
/* All Registers access - only for local access */
934
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
935
/* Gpr Registers access  */
936
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
937
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
938
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
939
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
940
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
941
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
942
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
943
/* Float Registers access  */
944
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
945
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
946
/* Exception Registers access */
947
# define DAR_sig(context)                        REG_sig(dar, context)
948
# define DSISR_sig(context)                        REG_sig(dsisr, context)
949
# define TRAP_sig(context)                        REG_sig(trap, context)
950
#endif /* linux */
951

    
952
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
953
#include <ucontext.h>
954
# define IAR_sig(context)                ((context)->uc_mcontext.mc_srr0)
955
# define MSR_sig(context)                ((context)->uc_mcontext.mc_srr1)
956
# define CTR_sig(context)                ((context)->uc_mcontext.mc_ctr)
957
# define XER_sig(context)                ((context)->uc_mcontext.mc_xer)
958
# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
959
# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
960
/* Exception Registers access */
961
# define DAR_sig(context)                ((context)->uc_mcontext.mc_dar)
962
# define DSISR_sig(context)                ((context)->uc_mcontext.mc_dsisr)
963
# define TRAP_sig(context)                ((context)->uc_mcontext.mc_exc)
964
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
965

    
966
#ifdef __APPLE__
967
# include <sys/ucontext.h>
968
typedef struct ucontext SIGCONTEXT;
969
/* All Registers access - only for local access */
970
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
971
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
972
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
973
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
974
/* Gpr Registers access */
975
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
976
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
977
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
978
# define CTR_sig(context)                        REG_sig(ctr, context)
979
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
980
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
981
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
982
/* Float Registers access */
983
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
984
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
985
/* Exception Registers access */
986
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
987
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
988
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
989
#endif /* __APPLE__ */
990

    
991
int cpu_signal_handler(int host_signum, void *pinfo,
992
                       void *puc)
993
{
994
    siginfo_t *info = pinfo;
995
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
996
    ucontext_t *uc = puc;
997
#else
998
    struct ucontext *uc = puc;
999
#endif
1000
    unsigned long pc;
1001
    int is_write;
1002

    
1003
    pc = IAR_sig(uc);
1004
    is_write = 0;
1005
#if 0
1006
    /* ppc 4xx case */
1007
    if (DSISR_sig(uc) & 0x00800000)
1008
        is_write = 1;
1009
#else
1010
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1011
        is_write = 1;
1012
#endif
1013
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1014
                             is_write, &uc->uc_sigmask, puc);
1015
}
1016

    
1017
#elif defined(__alpha__)
1018

    
1019
int cpu_signal_handler(int host_signum, void *pinfo,
1020
                           void *puc)
1021
{
1022
    siginfo_t *info = pinfo;
1023
    struct ucontext *uc = puc;
1024
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1025
    uint32_t insn = *pc;
1026
    int is_write = 0;
1027

    
1028
    /* XXX: need kernel patch to get write flag faster */
1029
    switch (insn >> 26) {
1030
    case 0x0d: // stw
1031
    case 0x0e: // stb
1032
    case 0x0f: // stq_u
1033
    case 0x24: // stf
1034
    case 0x25: // stg
1035
    case 0x26: // sts
1036
    case 0x27: // stt
1037
    case 0x2c: // stl
1038
    case 0x2d: // stq
1039
    case 0x2e: // stl_c
1040
    case 0x2f: // stq_c
1041
        is_write = 1;
1042
    }
1043

    
1044
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1045
                             is_write, &uc->uc_sigmask, puc);
1046
}
1047
#elif defined(__sparc__)
1048

    
1049
int cpu_signal_handler(int host_signum, void *pinfo,
1050
                       void *puc)
1051
{
1052
    siginfo_t *info = pinfo;
1053
    int is_write;
1054
    uint32_t insn;
1055
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1056
    uint32_t *regs = (uint32_t *)(info + 1);
1057
    void *sigmask = (regs + 20);
1058
    /* XXX: is there a standard glibc define ? */
1059
    unsigned long pc = regs[1];
1060
#else
1061
#ifdef __linux__
1062
    struct sigcontext *sc = puc;
1063
    unsigned long pc = sc->sigc_regs.tpc;
1064
    void *sigmask = (void *)sc->sigc_mask;
1065
#elif defined(__OpenBSD__)
1066
    struct sigcontext *uc = puc;
1067
    unsigned long pc = uc->sc_pc;
1068
    void *sigmask = (void *)(long)uc->sc_mask;
1069
#endif
1070
#endif
1071

    
1072
    /* XXX: need kernel patch to get write flag faster */
1073
    is_write = 0;
1074
    insn = *(uint32_t *)pc;
1075
    if ((insn >> 30) == 3) {
1076
      switch((insn >> 19) & 0x3f) {
1077
      case 0x05: // stb
1078
      case 0x15: // stba
1079
      case 0x06: // sth
1080
      case 0x16: // stha
1081
      case 0x04: // st
1082
      case 0x14: // sta
1083
      case 0x07: // std
1084
      case 0x17: // stda
1085
      case 0x0e: // stx
1086
      case 0x1e: // stxa
1087
      case 0x24: // stf
1088
      case 0x34: // stfa
1089
      case 0x27: // stdf
1090
      case 0x37: // stdfa
1091
      case 0x26: // stqf
1092
      case 0x36: // stqfa
1093
      case 0x25: // stfsr
1094
      case 0x3c: // casa
1095
      case 0x3e: // casxa
1096
        is_write = 1;
1097
        break;
1098
      }
1099
    }
1100
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1101
                             is_write, sigmask, NULL);
1102
}
1103

    
1104
#elif defined(__arm__)
1105

    
1106
int cpu_signal_handler(int host_signum, void *pinfo,
1107
                       void *puc)
1108
{
1109
    siginfo_t *info = pinfo;
1110
    struct ucontext *uc = puc;
1111
    unsigned long pc;
1112
    int is_write;
1113

    
1114
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1115
    pc = uc->uc_mcontext.gregs[R15];
1116
#else
1117
    pc = uc->uc_mcontext.arm_pc;
1118
#endif
1119
    /* XXX: compute is_write */
1120
    is_write = 0;
1121
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1122
                             is_write,
1123
                             &uc->uc_sigmask, puc);
1124
}
1125

    
1126
#elif defined(__mc68000)
1127

    
1128
int cpu_signal_handler(int host_signum, void *pinfo,
1129
                       void *puc)
1130
{
1131
    siginfo_t *info = pinfo;
1132
    struct ucontext *uc = puc;
1133
    unsigned long pc;
1134
    int is_write;
1135

    
1136
    pc = uc->uc_mcontext.gregs[16];
1137
    /* XXX: compute is_write */
1138
    is_write = 0;
1139
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1140
                             is_write,
1141
                             &uc->uc_sigmask, puc);
1142
}
1143

    
1144
#elif defined(__ia64)
1145

    
1146
#ifndef __ISR_VALID
1147
  /* This ought to be in <bits/siginfo.h>... */
1148
# define __ISR_VALID        1
1149
#endif
1150

    
1151
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1152
{
1153
    siginfo_t *info = pinfo;
1154
    struct ucontext *uc = puc;
1155
    unsigned long ip;
1156
    int is_write = 0;
1157

    
1158
    ip = uc->uc_mcontext.sc_ip;
1159
    switch (host_signum) {
1160
      case SIGILL:
1161
      case SIGFPE:
1162
      case SIGSEGV:
1163
      case SIGBUS:
1164
      case SIGTRAP:
1165
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1166
              /* ISR.W (write-access) is bit 33:  */
1167
              is_write = (info->si_isr >> 33) & 1;
1168
          break;
1169

    
1170
      default:
1171
          break;
1172
    }
1173
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1174
                             is_write,
1175
                             (sigset_t *)&uc->uc_sigmask, puc);
1176
}
1177

    
1178
#elif defined(__s390__)
1179

    
1180
int cpu_signal_handler(int host_signum, void *pinfo,
1181
                       void *puc)
1182
{
1183
    siginfo_t *info = pinfo;
1184
    struct ucontext *uc = puc;
1185
    unsigned long pc;
1186
    uint16_t *pinsn;
1187
    int is_write = 0;
1188

    
1189
    pc = uc->uc_mcontext.psw.addr;
1190

    
1191
    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1192
       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1193
       from the hardware which does in fact contain the is_write value.
1194
       The rt signal handler, as far as I can tell, does not give this value
1195
       at all.  Not that we could get to it from here even if it were.  */
1196
    /* ??? This is not even close to complete, since it ignores all
1197
       of the read-modify-write instructions.  */
1198
    pinsn = (uint16_t *)pc;
1199
    switch (pinsn[0] >> 8) {
1200
    case 0x50: /* ST */
1201
    case 0x42: /* STC */
1202
    case 0x40: /* STH */
1203
        is_write = 1;
1204
        break;
1205
    case 0xc4: /* RIL format insns */
1206
        switch (pinsn[0] & 0xf) {
1207
        case 0xf: /* STRL */
1208
        case 0xb: /* STGRL */
1209
        case 0x7: /* STHRL */
1210
            is_write = 1;
1211
        }
1212
        break;
1213
    case 0xe3: /* RXY format insns */
1214
        switch (pinsn[2] & 0xff) {
1215
        case 0x50: /* STY */
1216
        case 0x24: /* STG */
1217
        case 0x72: /* STCY */
1218
        case 0x70: /* STHY */
1219
        case 0x8e: /* STPQ */
1220
        case 0x3f: /* STRVH */
1221
        case 0x3e: /* STRV */
1222
        case 0x2f: /* STRVG */
1223
            is_write = 1;
1224
        }
1225
        break;
1226
    }
1227
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1228
                             is_write, &uc->uc_sigmask, puc);
1229
}
1230

    
1231
#elif defined(__mips__)
1232

    
1233
int cpu_signal_handler(int host_signum, void *pinfo,
1234
                       void *puc)
1235
{
1236
    siginfo_t *info = pinfo;
1237
    struct ucontext *uc = puc;
1238
    greg_t pc = uc->uc_mcontext.pc;
1239
    int is_write;
1240

    
1241
    /* XXX: compute is_write */
1242
    is_write = 0;
1243
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1244
                             is_write, &uc->uc_sigmask, puc);
1245
}
1246

    
1247
#elif defined(__hppa__)
1248

    
1249
int cpu_signal_handler(int host_signum, void *pinfo,
1250
                       void *puc)
1251
{
1252
    struct siginfo *info = pinfo;
1253
    struct ucontext *uc = puc;
1254
    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1255
    uint32_t insn = *(uint32_t *)pc;
1256
    int is_write = 0;
1257

    
1258
    /* XXX: need kernel patch to get write flag faster.  */
1259
    switch (insn >> 26) {
1260
    case 0x1a: /* STW */
1261
    case 0x19: /* STH */
1262
    case 0x18: /* STB */
1263
    case 0x1b: /* STWM */
1264
        is_write = 1;
1265
        break;
1266

    
1267
    case 0x09: /* CSTWX, FSTWX, FSTWS */
1268
    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1269
        /* Distinguish from coprocessor load ... */
1270
        is_write = (insn >> 9) & 1;
1271
        break;
1272

    
1273
    case 0x03:
1274
        switch ((insn >> 6) & 15) {
1275
        case 0xa: /* STWS */
1276
        case 0x9: /* STHS */
1277
        case 0x8: /* STBS */
1278
        case 0xe: /* STWAS */
1279
        case 0xc: /* STBYS */
1280
            is_write = 1;
1281
        }
1282
        break;
1283
    }
1284

    
1285
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1286
                             is_write, &uc->uc_sigmask, puc);
1287
}
1288

    
1289
#else
1290

    
1291
#error host CPU specific signal handler needed
1292

    
1293
#endif
1294

    
1295
#endif /* !defined(CONFIG_SOFTMMU) */