Statistics
| Branch: | Revision:

root / cpu-exec.c @ 0fead125

History | View | Annotate | Download (48.5 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#define CPU_NO_GLOBAL_REGS
22
#include "exec.h"
23
#include "disas.h"
24
#include "tcg.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#include <sys/ucontext.h>
38
#endif
39

    
40
#if defined(__sparc__) && !defined(HOST_SOLARIS)
41
// Work around ugly bugs in glibc that mangle global register contents
42
#undef env
43
#define env cpu_single_env
44
#endif
45

    
46
int tb_invalidated_flag;
47
static unsigned long next_tb;
48

    
49
//#define DEBUG_EXEC
50
//#define DEBUG_SIGNAL
51

    
52
void cpu_loop_exit(void)
53
{
54
    /* NOTE: the register at this point must be saved by hand because
55
       longjmp restore them */
56
    regs_to_env();
57
    longjmp(env->jmp_env, 1);
58
}
59

    
60
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
61
#define reg_T2
62
#endif
63

    
64
/* exit the current TB from a signal handler. The host registers are
65
   restored in a state compatible with the CPU emulator
66
 */
67
void cpu_resume_from_signal(CPUState *env1, void *puc)
68
{
69
#if !defined(CONFIG_SOFTMMU)
70
    struct ucontext *uc = puc;
71
#endif
72

    
73
    env = env1;
74

    
75
    /* XXX: restore cpu registers saved in host registers */
76

    
77
#if !defined(CONFIG_SOFTMMU)
78
    if (puc) {
79
        /* XXX: use siglongjmp ? */
80
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81
    }
82
#endif
83
    longjmp(env->jmp_env, 1);
84
}
85

    
86
static TranslationBlock *tb_find_slow(target_ulong pc,
87
                                      target_ulong cs_base,
88
                                      uint64_t flags)
89
{
90
    TranslationBlock *tb, **ptb1;
91
    int code_gen_size;
92
    unsigned int h;
93
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94
    uint8_t *tc_ptr;
95

    
96
    spin_lock(&tb_lock);
97

    
98
    tb_invalidated_flag = 0;
99

    
100
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
101

    
102
    /* find translated block using physical mappings */
103
    phys_pc = get_phys_addr_code(env, pc);
104
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
105
    phys_page2 = -1;
106
    h = tb_phys_hash_func(phys_pc);
107
    ptb1 = &tb_phys_hash[h];
108
    for(;;) {
109
        tb = *ptb1;
110
        if (!tb)
111
            goto not_found;
112
        if (tb->pc == pc &&
113
            tb->page_addr[0] == phys_page1 &&
114
            tb->cs_base == cs_base &&
115
            tb->flags == flags) {
116
            /* check next page if needed */
117
            if (tb->page_addr[1] != -1) {
118
                virt_page2 = (pc & TARGET_PAGE_MASK) +
119
                    TARGET_PAGE_SIZE;
120
                phys_page2 = get_phys_addr_code(env, virt_page2);
121
                if (tb->page_addr[1] == phys_page2)
122
                    goto found;
123
            } else {
124
                goto found;
125
            }
126
        }
127
        ptb1 = &tb->phys_hash_next;
128
    }
129
 not_found:
130
    /* if no translated code available, then translate it now */
131
    tb = tb_alloc(pc);
132
    if (!tb) {
133
        /* flush must be done */
134
        tb_flush(env);
135
        /* cannot fail at this point */
136
        tb = tb_alloc(pc);
137
        /* don't forget to invalidate previous TB info */
138
        tb_invalidated_flag = 1;
139
    }
140
    tc_ptr = code_gen_ptr;
141
    tb->tc_ptr = tc_ptr;
142
    tb->cs_base = cs_base;
143
    tb->flags = flags;
144
    cpu_gen_code(env, tb, &code_gen_size);
145
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
146

    
147
    /* check next page if needed */
148
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149
    phys_page2 = -1;
150
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151
        phys_page2 = get_phys_addr_code(env, virt_page2);
152
    }
153
    tb_link_phys(tb, phys_pc, phys_page2);
154

    
155
 found:
156
    /* we add the TB in the virtual pc hash table */
157
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158
    spin_unlock(&tb_lock);
159
    return tb;
160
}
161

    
162
static inline TranslationBlock *tb_find_fast(void)
163
{
164
    TranslationBlock *tb;
165
    target_ulong cs_base, pc;
166
    uint64_t flags;
167

    
168
    /* we record a subset of the CPU state. It will
169
       always be the same before a given translated block
170
       is executed. */
171
#if defined(TARGET_I386)
172
    flags = env->hflags;
173
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174
    cs_base = env->segs[R_CS].base;
175
    pc = cs_base + env->eip;
176
#elif defined(TARGET_ARM)
177
    flags = env->thumb | (env->vfp.vec_len << 1)
178
            | (env->vfp.vec_stride << 4);
179
    if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180
        flags |= (1 << 6);
181
    if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182
        flags |= (1 << 7);
183
    flags |= (env->condexec_bits << 8);
184
    cs_base = 0;
185
    pc = env->regs[15];
186
#elif defined(TARGET_SPARC)
187
#ifdef TARGET_SPARC64
188
    // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189
    flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190
        | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
191
#else
192
    // FPU enable . Supervisor
193
    flags = (env->psref << 4) | env->psrs;
194
#endif
195
    cs_base = env->npc;
196
    pc = env->pc;
197
#elif defined(TARGET_PPC)
198
    flags = env->hflags;
199
    cs_base = 0;
200
    pc = env->nip;
201
#elif defined(TARGET_MIPS)
202
    flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
203
    cs_base = 0;
204
    pc = env->PC[env->current_tc];
205
#elif defined(TARGET_M68K)
206
    flags = (env->fpcr & M68K_FPCR_PREC)  /* Bit  6 */
207
            | (env->sr & SR_S)            /* Bit  13 */
208
            | ((env->macsr >> 4) & 0xf);  /* Bits 0-3 */
209
    cs_base = 0;
210
    pc = env->pc;
211
#elif defined(TARGET_SH4)
212
    flags = env->flags;
213
    cs_base = 0;
214
    pc = env->pc;
215
#elif defined(TARGET_ALPHA)
216
    flags = env->ps;
217
    cs_base = 0;
218
    pc = env->pc;
219
#elif defined(TARGET_CRIS)
220
    flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
221
    flags |= env->dslot;
222
    cs_base = 0;
223
    pc = env->pc;
224
#else
225
#error unsupported CPU
226
#endif
227
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
228
    if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229
                         tb->flags != flags, 0)) {
230
        tb = tb_find_slow(pc, cs_base, flags);
231
        /* Note: we do it here to avoid a gcc bug on Mac OS X when
232
           doing it in tb_find_slow */
233
        if (tb_invalidated_flag) {
234
            /* as some TB could have been invalidated because
235
               of memory exceptions while generating the code, we
236
               must recompute the hash index here */
237
            next_tb = 0;
238
        }
239
    }
240
    return tb;
241
}
242

    
243
/* main execution loop */
244

    
245
int cpu_exec(CPUState *env1)
246
{
247
#define DECLARE_HOST_REGS 1
248
#include "hostregs_helper.h"
249
    int ret, interrupt_request;
250
    TranslationBlock *tb;
251
    uint8_t *tc_ptr;
252

    
253
    if (cpu_halted(env1) == EXCP_HALTED)
254
        return EXCP_HALTED;
255

    
256
    cpu_single_env = env1;
257

    
258
    /* first we save global registers */
259
#define SAVE_HOST_REGS 1
260
#include "hostregs_helper.h"
261
    env = env1;
262

    
263
    env_to_regs();
264
#if defined(TARGET_I386)
265
    /* put eflags in CPU temporary format */
266
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
268
    CC_OP = CC_OP_EFLAGS;
269
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270
#elif defined(TARGET_SPARC)
271
#elif defined(TARGET_M68K)
272
    env->cc_op = CC_OP_FLAGS;
273
    env->cc_dest = env->sr & 0xf;
274
    env->cc_x = (env->sr >> 4) & 1;
275
#elif defined(TARGET_ALPHA)
276
#elif defined(TARGET_ARM)
277
#elif defined(TARGET_PPC)
278
#elif defined(TARGET_MIPS)
279
#elif defined(TARGET_SH4)
280
#elif defined(TARGET_CRIS)
281
    /* XXXXX */
282
#else
283
#error unsupported target CPU
284
#endif
285
    env->exception_index = -1;
286

    
287
    /* prepare setjmp context for exception handling */
288
    for(;;) {
289
        if (setjmp(env->jmp_env) == 0) {
290
            env->current_tb = NULL;
291
            /* if an exception is pending, we execute it here */
292
            if (env->exception_index >= 0) {
293
                if (env->exception_index >= EXCP_INTERRUPT) {
294
                    /* exit request from the cpu execution loop */
295
                    ret = env->exception_index;
296
                    break;
297
                } else if (env->user_mode_only) {
298
                    /* if user mode only, we simulate a fake exception
299
                       which will be handled outside the cpu execution
300
                       loop */
301
#if defined(TARGET_I386)
302
                    do_interrupt_user(env->exception_index,
303
                                      env->exception_is_int,
304
                                      env->error_code,
305
                                      env->exception_next_eip);
306
                    /* successfully delivered */
307
                    env->old_exception = -1;
308
#endif
309
                    ret = env->exception_index;
310
                    break;
311
                } else {
312
#if defined(TARGET_I386)
313
                    /* simulate a real cpu exception. On i386, it can
314
                       trigger new exceptions, but we do not handle
315
                       double or triple faults yet. */
316
                    do_interrupt(env->exception_index,
317
                                 env->exception_is_int,
318
                                 env->error_code,
319
                                 env->exception_next_eip, 0);
320
                    /* successfully delivered */
321
                    env->old_exception = -1;
322
#elif defined(TARGET_PPC)
323
                    do_interrupt(env);
324
#elif defined(TARGET_MIPS)
325
                    do_interrupt(env);
326
#elif defined(TARGET_SPARC)
327
                    do_interrupt(env);
328
#elif defined(TARGET_ARM)
329
                    do_interrupt(env);
330
#elif defined(TARGET_SH4)
331
                    do_interrupt(env);
332
#elif defined(TARGET_ALPHA)
333
                    do_interrupt(env);
334
#elif defined(TARGET_CRIS)
335
                    do_interrupt(env);
336
#elif defined(TARGET_M68K)
337
                    do_interrupt(0);
338
#endif
339
                }
340
                env->exception_index = -1;
341
            }
342
#ifdef USE_KQEMU
343
            if (kqemu_is_ok(env) && env->interrupt_request == 0) {
344
                int ret;
345
                env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
346
                ret = kqemu_cpu_exec(env);
347
                /* put eflags in CPU temporary format */
348
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
349
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
350
                CC_OP = CC_OP_EFLAGS;
351
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352
                if (ret == 1) {
353
                    /* exception */
354
                    longjmp(env->jmp_env, 1);
355
                } else if (ret == 2) {
356
                    /* softmmu execution needed */
357
                } else {
358
                    if (env->interrupt_request != 0) {
359
                        /* hardware interrupt will be executed just after */
360
                    } else {
361
                        /* otherwise, we restart */
362
                        longjmp(env->jmp_env, 1);
363
                    }
364
                }
365
            }
366
#endif
367

    
368
            next_tb = 0; /* force lookup of first TB */
369
            for(;;) {
370
                interrupt_request = env->interrupt_request;
371
                if (__builtin_expect(interrupt_request, 0)
372
#if defined(TARGET_I386)
373
                        && env->hflags & HF_GIF_MASK
374
#endif
375
            && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
376
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
377
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
378
                        env->exception_index = EXCP_DEBUG;
379
                        cpu_loop_exit();
380
                    }
381
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
382
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
383
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
384
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
385
                        env->halted = 1;
386
                        env->exception_index = EXCP_HLT;
387
                        cpu_loop_exit();
388
                    }
389
#endif
390
#if defined(TARGET_I386)
391
                    if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392
                        !(env->hflags & HF_SMM_MASK)) {
393
                        svm_check_intercept(SVM_EXIT_SMI);
394
                        env->interrupt_request &= ~CPU_INTERRUPT_SMI;
395
                        do_smm_enter();
396
                        next_tb = 0;
397
                    } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398
                        !(env->hflags & HF_NMI_MASK)) {
399
                        env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400
                        env->hflags |= HF_NMI_MASK;
401
                        do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
402
                        next_tb = 0;
403
                    } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
404
                        (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
405
                        !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
406
                        int intno;
407
                        svm_check_intercept(SVM_EXIT_INTR);
408
                        env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
409
                        intno = cpu_get_pic_interrupt(env);
410
                        if (loglevel & CPU_LOG_TB_IN_ASM) {
411
                            fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
412
                        }
413
                        do_interrupt(intno, 0, 0, 0, 1);
414
                        /* ensure that no TB jump will be modified as
415
                           the program flow was changed */
416
                        next_tb = 0;
417
#if !defined(CONFIG_USER_ONLY)
418
                    } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
419
                        (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
420
                         int intno;
421
                         /* FIXME: this should respect TPR */
422
                         env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
423
                         svm_check_intercept(SVM_EXIT_VINTR);
424
                         intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
425
                         if (loglevel & CPU_LOG_TB_IN_ASM)
426
                             fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
427
                         do_interrupt(intno, 0, 0, -1, 1);
428
                         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
429
                                  ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
430
                        next_tb = 0;
431
#endif
432
                    }
433
#elif defined(TARGET_PPC)
434
#if 0
435
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
436
                        cpu_ppc_reset(env);
437
                    }
438
#endif
439
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
440
                        ppc_hw_interrupt(env);
441
                        if (env->pending_interrupts == 0)
442
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
443
                        next_tb = 0;
444
                    }
445
#elif defined(TARGET_MIPS)
446
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
447
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
448
                        (env->CP0_Status & (1 << CP0St_IE)) &&
449
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
450
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
451
                        !(env->hflags & MIPS_HFLAG_DM)) {
452
                        /* Raise it */
453
                        env->exception_index = EXCP_EXT_INTERRUPT;
454
                        env->error_code = 0;
455
                        do_interrupt(env);
456
                        next_tb = 0;
457
                    }
458
#elif defined(TARGET_SPARC)
459
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
460
                        (env->psret != 0)) {
461
                        int pil = env->interrupt_index & 15;
462
                        int type = env->interrupt_index & 0xf0;
463

    
464
                        if (((type == TT_EXTINT) &&
465
                             (pil == 15 || pil > env->psrpil)) ||
466
                            type != TT_EXTINT) {
467
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
468
                            env->exception_index = env->interrupt_index;
469
                            do_interrupt(env);
470
                            env->interrupt_index = 0;
471
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
472
                            cpu_check_irqs(env);
473
#endif
474
                        next_tb = 0;
475
                        }
476
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
477
                        //do_interrupt(0, 0, 0, 0, 0);
478
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
479
                    }
480
#elif defined(TARGET_ARM)
481
                    if (interrupt_request & CPU_INTERRUPT_FIQ
482
                        && !(env->uncached_cpsr & CPSR_F)) {
483
                        env->exception_index = EXCP_FIQ;
484
                        do_interrupt(env);
485
                        next_tb = 0;
486
                    }
487
                    /* ARMv7-M interrupt return works by loading a magic value
488
                       into the PC.  On real hardware the load causes the
489
                       return to occur.  The qemu implementation performs the
490
                       jump normally, then does the exception return when the
491
                       CPU tries to execute code at the magic address.
492
                       This will cause the magic PC value to be pushed to
493
                       the stack if an interrupt occured at the wrong time.
494
                       We avoid this by disabling interrupts when
495
                       pc contains a magic address.  */
496
                    if (interrupt_request & CPU_INTERRUPT_HARD
497
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
498
                            || !(env->uncached_cpsr & CPSR_I))) {
499
                        env->exception_index = EXCP_IRQ;
500
                        do_interrupt(env);
501
                        next_tb = 0;
502
                    }
503
#elif defined(TARGET_SH4)
504
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
505
                        do_interrupt(env);
506
                        next_tb = 0;
507
                    }
508
#elif defined(TARGET_ALPHA)
509
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
510
                        do_interrupt(env);
511
                        next_tb = 0;
512
                    }
513
#elif defined(TARGET_CRIS)
514
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
515
                        do_interrupt(env);
516
                        next_tb = 0;
517
                    }
518
#elif defined(TARGET_M68K)
519
                    if (interrupt_request & CPU_INTERRUPT_HARD
520
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
521
                            < env->pending_level) {
522
                        /* Real hardware gets the interrupt vector via an
523
                           IACK cycle at this point.  Current emulated
524
                           hardware doesn't rely on this, so we
525
                           provide/save the vector when the interrupt is
526
                           first signalled.  */
527
                        env->exception_index = env->pending_vector;
528
                        do_interrupt(1);
529
                        next_tb = 0;
530
                    }
531
#endif
532
                   /* Don't use the cached interupt_request value,
533
                      do_interrupt may have updated the EXITTB flag. */
534
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
535
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
536
                        /* ensure that no TB jump will be modified as
537
                           the program flow was changed */
538
                        next_tb = 0;
539
                    }
540
                    if (interrupt_request & CPU_INTERRUPT_EXIT) {
541
                        env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
542
                        env->exception_index = EXCP_INTERRUPT;
543
                        cpu_loop_exit();
544
                    }
545
                }
546
#ifdef DEBUG_EXEC
547
                if ((loglevel & CPU_LOG_TB_CPU)) {
548
                    /* restore flags in standard format */
549
                    regs_to_env();
550
#if defined(TARGET_I386)
551
                    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
552
                    cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
553
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
554
#elif defined(TARGET_ARM)
555
                    cpu_dump_state(env, logfile, fprintf, 0);
556
#elif defined(TARGET_SPARC)
557
                    cpu_dump_state(env, logfile, fprintf, 0);
558
#elif defined(TARGET_PPC)
559
                    cpu_dump_state(env, logfile, fprintf, 0);
560
#elif defined(TARGET_M68K)
561
                    cpu_m68k_flush_flags(env, env->cc_op);
562
                    env->cc_op = CC_OP_FLAGS;
563
                    env->sr = (env->sr & 0xffe0)
564
                              | env->cc_dest | (env->cc_x << 4);
565
                    cpu_dump_state(env, logfile, fprintf, 0);
566
#elif defined(TARGET_MIPS)
567
                    cpu_dump_state(env, logfile, fprintf, 0);
568
#elif defined(TARGET_SH4)
569
                    cpu_dump_state(env, logfile, fprintf, 0);
570
#elif defined(TARGET_ALPHA)
571
                    cpu_dump_state(env, logfile, fprintf, 0);
572
#elif defined(TARGET_CRIS)
573
                    cpu_dump_state(env, logfile, fprintf, 0);
574
#else
575
#error unsupported target CPU
576
#endif
577
                }
578
#endif
579
                tb = tb_find_fast();
580
#ifdef DEBUG_EXEC
581
                if ((loglevel & CPU_LOG_EXEC)) {
582
                    fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
583
                            (long)tb->tc_ptr, tb->pc,
584
                            lookup_symbol(tb->pc));
585
                }
586
#endif
587
                /* see if we can patch the calling TB. When the TB
588
                   spans two pages, we cannot safely do a direct
589
                   jump. */
590
                {
591
                    if (next_tb != 0 &&
592
#ifdef USE_KQEMU
593
                        (env->kqemu_enabled != 2) &&
594
#endif
595
                        tb->page_addr[1] == -1) {
596
                    spin_lock(&tb_lock);
597
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
598
                    spin_unlock(&tb_lock);
599
                }
600
                }
601
                tc_ptr = tb->tc_ptr;
602
                env->current_tb = tb;
603
                /* execute the generated code */
604
#if defined(__sparc__) && !defined(HOST_SOLARIS)
605
#undef env
606
                env = cpu_single_env;
607
#define env cpu_single_env
608
#endif
609
                next_tb = tcg_qemu_tb_exec(tc_ptr);
610
                env->current_tb = NULL;
611
                /* reset soft MMU for next block (it can currently
612
                   only be set by a memory fault) */
613
#if defined(USE_KQEMU)
614
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
615
                if (kqemu_is_ok(env) &&
616
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
617
                    cpu_loop_exit();
618
                }
619
#endif
620
            } /* for(;;) */
621
        } else {
622
            env_to_regs();
623
        }
624
    } /* for(;;) */
625

    
626

    
627
#if defined(TARGET_I386)
628
    /* restore flags in standard format */
629
    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
630
#elif defined(TARGET_ARM)
631
    /* XXX: Save/restore host fpu exception state?.  */
632
#elif defined(TARGET_SPARC)
633
#elif defined(TARGET_PPC)
634
#elif defined(TARGET_M68K)
635
    cpu_m68k_flush_flags(env, env->cc_op);
636
    env->cc_op = CC_OP_FLAGS;
637
    env->sr = (env->sr & 0xffe0)
638
              | env->cc_dest | (env->cc_x << 4);
639
#elif defined(TARGET_MIPS)
640
#elif defined(TARGET_SH4)
641
#elif defined(TARGET_ALPHA)
642
#elif defined(TARGET_CRIS)
643
    /* XXXXX */
644
#else
645
#error unsupported target CPU
646
#endif
647

    
648
    /* restore global registers */
649
#include "hostregs_helper.h"
650

    
651
    /* fail safe : never use cpu_single_env outside cpu_exec() */
652
    cpu_single_env = NULL;
653
    return ret;
654
}
655

    
656
/* must only be called from the generated code as an exception can be
657
   generated */
658
void tb_invalidate_page_range(target_ulong start, target_ulong end)
659
{
660
    /* XXX: cannot enable it yet because it yields to MMU exception
661
       where NIP != read address on PowerPC */
662
#if 0
663
    target_ulong phys_addr;
664
    phys_addr = get_phys_addr_code(env, start);
665
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
666
#endif
667
}
668

    
669
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
670

    
671
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
672
{
673
    CPUX86State *saved_env;
674

    
675
    saved_env = env;
676
    env = s;
677
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
678
        selector &= 0xffff;
679
        cpu_x86_load_seg_cache(env, seg_reg, selector,
680
                               (selector << 4), 0xffff, 0);
681
    } else {
682
        helper_load_seg(seg_reg, selector);
683
    }
684
    env = saved_env;
685
}
686

    
687
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
688
{
689
    CPUX86State *saved_env;
690

    
691
    saved_env = env;
692
    env = s;
693

    
694
    helper_fsave(ptr, data32);
695

    
696
    env = saved_env;
697
}
698

    
699
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
700
{
701
    CPUX86State *saved_env;
702

    
703
    saved_env = env;
704
    env = s;
705

    
706
    helper_frstor(ptr, data32);
707

    
708
    env = saved_env;
709
}
710

    
711
#endif /* TARGET_I386 */
712

    
713
#if !defined(CONFIG_SOFTMMU)
714

    
715
#if defined(TARGET_I386)
716

    
717
/* 'pc' is the host PC at which the exception was raised. 'address' is
718
   the effective address of the memory exception. 'is_write' is 1 if a
719
   write caused the exception and otherwise 0'. 'old_set' is the
720
   signal set which should be restored */
721
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
722
                                    int is_write, sigset_t *old_set,
723
                                    void *puc)
724
{
725
    TranslationBlock *tb;
726
    int ret;
727

    
728
    if (cpu_single_env)
729
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
730
#if defined(DEBUG_SIGNAL)
731
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
732
                pc, address, is_write, *(unsigned long *)old_set);
733
#endif
734
    /* XXX: locking issue */
735
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
736
        return 1;
737
    }
738

    
739
    /* see if it is an MMU fault */
740
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
741
    if (ret < 0)
742
        return 0; /* not an MMU fault */
743
    if (ret == 0)
744
        return 1; /* the MMU fault was handled without causing real CPU fault */
745
    /* now we have a real cpu fault */
746
    tb = tb_find_pc(pc);
747
    if (tb) {
748
        /* the PC is inside the translated code. It means that we have
749
           a virtual CPU fault */
750
        cpu_restore_state(tb, env, pc, puc);
751
    }
752
    if (ret == 1) {
753
#if 0
754
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
755
               env->eip, env->cr[2], env->error_code);
756
#endif
757
        /* we restore the process signal mask as the sigreturn should
758
           do it (XXX: use sigsetjmp) */
759
        sigprocmask(SIG_SETMASK, old_set, NULL);
760
        raise_exception_err(env->exception_index, env->error_code);
761
    } else {
762
        /* activate soft MMU for this block */
763
        env->hflags |= HF_SOFTMMU_MASK;
764
        cpu_resume_from_signal(env, puc);
765
    }
766
    /* never comes here */
767
    return 1;
768
}
769

    
770
#elif defined(TARGET_ARM)
771
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
772
                                    int is_write, sigset_t *old_set,
773
                                    void *puc)
774
{
775
    TranslationBlock *tb;
776
    int ret;
777

    
778
    if (cpu_single_env)
779
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
780
#if defined(DEBUG_SIGNAL)
781
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
782
           pc, address, is_write, *(unsigned long *)old_set);
783
#endif
784
    /* XXX: locking issue */
785
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
786
        return 1;
787
    }
788
    /* see if it is an MMU fault */
789
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
790
    if (ret < 0)
791
        return 0; /* not an MMU fault */
792
    if (ret == 0)
793
        return 1; /* the MMU fault was handled without causing real CPU fault */
794
    /* now we have a real cpu fault */
795
    tb = tb_find_pc(pc);
796
    if (tb) {
797
        /* the PC is inside the translated code. It means that we have
798
           a virtual CPU fault */
799
        cpu_restore_state(tb, env, pc, puc);
800
    }
801
    /* we restore the process signal mask as the sigreturn should
802
       do it (XXX: use sigsetjmp) */
803
    sigprocmask(SIG_SETMASK, old_set, NULL);
804
    cpu_loop_exit();
805
    /* never comes here */
806
    return 1;
807
}
808
#elif defined(TARGET_SPARC)
809
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
810
                                    int is_write, sigset_t *old_set,
811
                                    void *puc)
812
{
813
    TranslationBlock *tb;
814
    int ret;
815

    
816
    if (cpu_single_env)
817
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
818
#if defined(DEBUG_SIGNAL)
819
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
820
           pc, address, is_write, *(unsigned long *)old_set);
821
#endif
822
    /* XXX: locking issue */
823
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
824
        return 1;
825
    }
826
    /* see if it is an MMU fault */
827
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
828
    if (ret < 0)
829
        return 0; /* not an MMU fault */
830
    if (ret == 0)
831
        return 1; /* the MMU fault was handled without causing real CPU fault */
832
    /* now we have a real cpu fault */
833
    tb = tb_find_pc(pc);
834
    if (tb) {
835
        /* the PC is inside the translated code. It means that we have
836
           a virtual CPU fault */
837
        cpu_restore_state(tb, env, pc, puc);
838
    }
839
    /* we restore the process signal mask as the sigreturn should
840
       do it (XXX: use sigsetjmp) */
841
    sigprocmask(SIG_SETMASK, old_set, NULL);
842
    cpu_loop_exit();
843
    /* never comes here */
844
    return 1;
845
}
846
#elif defined (TARGET_PPC)
847
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
848
                                    int is_write, sigset_t *old_set,
849
                                    void *puc)
850
{
851
    TranslationBlock *tb;
852
    int ret;
853

    
854
    if (cpu_single_env)
855
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
856
#if defined(DEBUG_SIGNAL)
857
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
858
           pc, address, is_write, *(unsigned long *)old_set);
859
#endif
860
    /* XXX: locking issue */
861
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
862
        return 1;
863
    }
864

    
865
    /* see if it is an MMU fault */
866
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
867
    if (ret < 0)
868
        return 0; /* not an MMU fault */
869
    if (ret == 0)
870
        return 1; /* the MMU fault was handled without causing real CPU fault */
871

    
872
    /* now we have a real cpu fault */
873
    tb = tb_find_pc(pc);
874
    if (tb) {
875
        /* the PC is inside the translated code. It means that we have
876
           a virtual CPU fault */
877
        cpu_restore_state(tb, env, pc, puc);
878
    }
879
    if (ret == 1) {
880
#if 0
881
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
882
               env->nip, env->error_code, tb);
883
#endif
884
    /* we restore the process signal mask as the sigreturn should
885
       do it (XXX: use sigsetjmp) */
886
        sigprocmask(SIG_SETMASK, old_set, NULL);
887
        do_raise_exception_err(env->exception_index, env->error_code);
888
    } else {
889
        /* activate soft MMU for this block */
890
        cpu_resume_from_signal(env, puc);
891
    }
892
    /* never comes here */
893
    return 1;
894
}
895

    
896
#elif defined(TARGET_M68K)
897
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
898
                                    int is_write, sigset_t *old_set,
899
                                    void *puc)
900
{
901
    TranslationBlock *tb;
902
    int ret;
903

    
904
    if (cpu_single_env)
905
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
906
#if defined(DEBUG_SIGNAL)
907
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
908
           pc, address, is_write, *(unsigned long *)old_set);
909
#endif
910
    /* XXX: locking issue */
911
    if (is_write && page_unprotect(address, pc, puc)) {
912
        return 1;
913
    }
914
    /* see if it is an MMU fault */
915
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
916
    if (ret < 0)
917
        return 0; /* not an MMU fault */
918
    if (ret == 0)
919
        return 1; /* the MMU fault was handled without causing real CPU fault */
920
    /* now we have a real cpu fault */
921
    tb = tb_find_pc(pc);
922
    if (tb) {
923
        /* the PC is inside the translated code. It means that we have
924
           a virtual CPU fault */
925
        cpu_restore_state(tb, env, pc, puc);
926
    }
927
    /* we restore the process signal mask as the sigreturn should
928
       do it (XXX: use sigsetjmp) */
929
    sigprocmask(SIG_SETMASK, old_set, NULL);
930
    cpu_loop_exit();
931
    /* never comes here */
932
    return 1;
933
}
934

    
935
#elif defined (TARGET_MIPS)
936
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
937
                                    int is_write, sigset_t *old_set,
938
                                    void *puc)
939
{
940
    TranslationBlock *tb;
941
    int ret;
942

    
943
    if (cpu_single_env)
944
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
945
#if defined(DEBUG_SIGNAL)
946
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
947
           pc, address, is_write, *(unsigned long *)old_set);
948
#endif
949
    /* XXX: locking issue */
950
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
951
        return 1;
952
    }
953

    
954
    /* see if it is an MMU fault */
955
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
956
    if (ret < 0)
957
        return 0; /* not an MMU fault */
958
    if (ret == 0)
959
        return 1; /* the MMU fault was handled without causing real CPU fault */
960

    
961
    /* now we have a real cpu fault */
962
    tb = tb_find_pc(pc);
963
    if (tb) {
964
        /* the PC is inside the translated code. It means that we have
965
           a virtual CPU fault */
966
        cpu_restore_state(tb, env, pc, puc);
967
    }
968
    if (ret == 1) {
969
#if 0
970
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
971
               env->PC, env->error_code, tb);
972
#endif
973
    /* we restore the process signal mask as the sigreturn should
974
       do it (XXX: use sigsetjmp) */
975
        sigprocmask(SIG_SETMASK, old_set, NULL);
976
        do_raise_exception_err(env->exception_index, env->error_code);
977
    } else {
978
        /* activate soft MMU for this block */
979
        cpu_resume_from_signal(env, puc);
980
    }
981
    /* never comes here */
982
    return 1;
983
}
984

    
985
#elif defined (TARGET_SH4)
986
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
987
                                    int is_write, sigset_t *old_set,
988
                                    void *puc)
989
{
990
    TranslationBlock *tb;
991
    int ret;
992

    
993
    if (cpu_single_env)
994
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
995
#if defined(DEBUG_SIGNAL)
996
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
997
           pc, address, is_write, *(unsigned long *)old_set);
998
#endif
999
    /* XXX: locking issue */
1000
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1001
        return 1;
1002
    }
1003

    
1004
    /* see if it is an MMU fault */
1005
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1006
    if (ret < 0)
1007
        return 0; /* not an MMU fault */
1008
    if (ret == 0)
1009
        return 1; /* the MMU fault was handled without causing real CPU fault */
1010

    
1011
    /* now we have a real cpu fault */
1012
    tb = tb_find_pc(pc);
1013
    if (tb) {
1014
        /* the PC is inside the translated code. It means that we have
1015
           a virtual CPU fault */
1016
        cpu_restore_state(tb, env, pc, puc);
1017
    }
1018
#if 0
1019
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1020
               env->nip, env->error_code, tb);
1021
#endif
1022
    /* we restore the process signal mask as the sigreturn should
1023
       do it (XXX: use sigsetjmp) */
1024
    sigprocmask(SIG_SETMASK, old_set, NULL);
1025
    cpu_loop_exit();
1026
    /* never comes here */
1027
    return 1;
1028
}
1029

    
1030
#elif defined (TARGET_ALPHA)
1031
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1032
                                    int is_write, sigset_t *old_set,
1033
                                    void *puc)
1034
{
1035
    TranslationBlock *tb;
1036
    int ret;
1037

    
1038
    if (cpu_single_env)
1039
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1040
#if defined(DEBUG_SIGNAL)
1041
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1042
           pc, address, is_write, *(unsigned long *)old_set);
1043
#endif
1044
    /* XXX: locking issue */
1045
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1046
        return 1;
1047
    }
1048

    
1049
    /* see if it is an MMU fault */
1050
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1051
    if (ret < 0)
1052
        return 0; /* not an MMU fault */
1053
    if (ret == 0)
1054
        return 1; /* the MMU fault was handled without causing real CPU fault */
1055

    
1056
    /* now we have a real cpu fault */
1057
    tb = tb_find_pc(pc);
1058
    if (tb) {
1059
        /* the PC is inside the translated code. It means that we have
1060
           a virtual CPU fault */
1061
        cpu_restore_state(tb, env, pc, puc);
1062
    }
1063
#if 0
1064
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1065
               env->nip, env->error_code, tb);
1066
#endif
1067
    /* we restore the process signal mask as the sigreturn should
1068
       do it (XXX: use sigsetjmp) */
1069
    sigprocmask(SIG_SETMASK, old_set, NULL);
1070
    cpu_loop_exit();
1071
    /* never comes here */
1072
    return 1;
1073
}
1074
#elif defined (TARGET_CRIS)
1075
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1076
                                    int is_write, sigset_t *old_set,
1077
                                    void *puc)
1078
{
1079
    TranslationBlock *tb;
1080
    int ret;
1081

    
1082
    if (cpu_single_env)
1083
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1084
#if defined(DEBUG_SIGNAL)
1085
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1086
           pc, address, is_write, *(unsigned long *)old_set);
1087
#endif
1088
    /* XXX: locking issue */
1089
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1090
        return 1;
1091
    }
1092

    
1093
    /* see if it is an MMU fault */
1094
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1095
    if (ret < 0)
1096
        return 0; /* not an MMU fault */
1097
    if (ret == 0)
1098
        return 1; /* the MMU fault was handled without causing real CPU fault */
1099

    
1100
    /* now we have a real cpu fault */
1101
    tb = tb_find_pc(pc);
1102
    if (tb) {
1103
        /* the PC is inside the translated code. It means that we have
1104
           a virtual CPU fault */
1105
        cpu_restore_state(tb, env, pc, puc);
1106
    }
1107
    /* we restore the process signal mask as the sigreturn should
1108
       do it (XXX: use sigsetjmp) */
1109
    sigprocmask(SIG_SETMASK, old_set, NULL);
1110
    cpu_loop_exit();
1111
    /* never comes here */
1112
    return 1;
1113
}
1114

    
1115
#else
1116
#error unsupported target CPU
1117
#endif
1118

    
1119
#if defined(__i386__)
1120

    
1121
#if defined(__APPLE__)
1122
# include <sys/ucontext.h>
1123

    
1124
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1125
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1126
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1127
#else
1128
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1129
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1130
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1131
#endif
1132

    
1133
int cpu_signal_handler(int host_signum, void *pinfo,
1134
                       void *puc)
1135
{
1136
    siginfo_t *info = pinfo;
1137
    struct ucontext *uc = puc;
1138
    unsigned long pc;
1139
    int trapno;
1140

    
1141
#ifndef REG_EIP
1142
/* for glibc 2.1 */
1143
#define REG_EIP    EIP
1144
#define REG_ERR    ERR
1145
#define REG_TRAPNO TRAPNO
1146
#endif
1147
    pc = EIP_sig(uc);
1148
    trapno = TRAP_sig(uc);
1149
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1150
                             trapno == 0xe ?
1151
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1152
                             &uc->uc_sigmask, puc);
1153
}
1154

    
1155
#elif defined(__x86_64__)
1156

    
1157
int cpu_signal_handler(int host_signum, void *pinfo,
1158
                       void *puc)
1159
{
1160
    siginfo_t *info = pinfo;
1161
    struct ucontext *uc = puc;
1162
    unsigned long pc;
1163

    
1164
    pc = uc->uc_mcontext.gregs[REG_RIP];
1165
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1166
                             uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1167
                             (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1168
                             &uc->uc_sigmask, puc);
1169
}
1170

    
1171
#elif defined(__powerpc__)
1172

    
1173
/***********************************************************************
1174
 * signal context platform-specific definitions
1175
 * From Wine
1176
 */
1177
#ifdef linux
1178
/* All Registers access - only for local access */
1179
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1180
/* Gpr Registers access  */
1181
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1182
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1183
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1184
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1185
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1186
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1187
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1188
/* Float Registers access  */
1189
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1190
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1191
/* Exception Registers access */
1192
# define DAR_sig(context)                        REG_sig(dar, context)
1193
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1194
# define TRAP_sig(context)                        REG_sig(trap, context)
1195
#endif /* linux */
1196

    
1197
#ifdef __APPLE__
1198
# include <sys/ucontext.h>
1199
typedef struct ucontext SIGCONTEXT;
1200
/* All Registers access - only for local access */
1201
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1202
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1203
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1204
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1205
/* Gpr Registers access */
1206
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1207
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1208
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1209
# define CTR_sig(context)                        REG_sig(ctr, context)
1210
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1211
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1212
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1213
/* Float Registers access */
1214
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1215
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1216
/* Exception Registers access */
1217
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1218
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1219
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1220
#endif /* __APPLE__ */
1221

    
1222
int cpu_signal_handler(int host_signum, void *pinfo,
1223
                       void *puc)
1224
{
1225
    siginfo_t *info = pinfo;
1226
    struct ucontext *uc = puc;
1227
    unsigned long pc;
1228
    int is_write;
1229

    
1230
    pc = IAR_sig(uc);
1231
    is_write = 0;
1232
#if 0
1233
    /* ppc 4xx case */
1234
    if (DSISR_sig(uc) & 0x00800000)
1235
        is_write = 1;
1236
#else
1237
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1238
        is_write = 1;
1239
#endif
1240
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1241
                             is_write, &uc->uc_sigmask, puc);
1242
}
1243

    
1244
#elif defined(__alpha__)
1245

    
1246
int cpu_signal_handler(int host_signum, void *pinfo,
1247
                           void *puc)
1248
{
1249
    siginfo_t *info = pinfo;
1250
    struct ucontext *uc = puc;
1251
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1252
    uint32_t insn = *pc;
1253
    int is_write = 0;
1254

    
1255
    /* XXX: need kernel patch to get write flag faster */
1256
    switch (insn >> 26) {
1257
    case 0x0d: // stw
1258
    case 0x0e: // stb
1259
    case 0x0f: // stq_u
1260
    case 0x24: // stf
1261
    case 0x25: // stg
1262
    case 0x26: // sts
1263
    case 0x27: // stt
1264
    case 0x2c: // stl
1265
    case 0x2d: // stq
1266
    case 0x2e: // stl_c
1267
    case 0x2f: // stq_c
1268
        is_write = 1;
1269
    }
1270

    
1271
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1272
                             is_write, &uc->uc_sigmask, puc);
1273
}
1274
#elif defined(__sparc__)
1275

    
1276
int cpu_signal_handler(int host_signum, void *pinfo,
1277
                       void *puc)
1278
{
1279
    siginfo_t *info = pinfo;
1280
    int is_write;
1281
    uint32_t insn;
1282
#if !defined(__arch64__) || defined(HOST_SOLARIS)
1283
    uint32_t *regs = (uint32_t *)(info + 1);
1284
    void *sigmask = (regs + 20);
1285
    /* XXX: is there a standard glibc define ? */
1286
    unsigned long pc = regs[1];
1287
#else
1288
    struct sigcontext *sc = puc;
1289
    unsigned long pc = sc->sigc_regs.tpc;
1290
    void *sigmask = (void *)sc->sigc_mask;
1291
#endif
1292

    
1293
    /* XXX: need kernel patch to get write flag faster */
1294
    is_write = 0;
1295
    insn = *(uint32_t *)pc;
1296
    if ((insn >> 30) == 3) {
1297
      switch((insn >> 19) & 0x3f) {
1298
      case 0x05: // stb
1299
      case 0x06: // sth
1300
      case 0x04: // st
1301
      case 0x07: // std
1302
      case 0x24: // stf
1303
      case 0x27: // stdf
1304
      case 0x25: // stfsr
1305
        is_write = 1;
1306
        break;
1307
      }
1308
    }
1309
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1310
                             is_write, sigmask, NULL);
1311
}
1312

    
1313
#elif defined(__arm__)
1314

    
1315
int cpu_signal_handler(int host_signum, void *pinfo,
1316
                       void *puc)
1317
{
1318
    siginfo_t *info = pinfo;
1319
    struct ucontext *uc = puc;
1320
    unsigned long pc;
1321
    int is_write;
1322

    
1323
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1324
    pc = uc->uc_mcontext.gregs[R15];
1325
#else
1326
    pc = uc->uc_mcontext.arm_pc;
1327
#endif
1328
    /* XXX: compute is_write */
1329
    is_write = 0;
1330
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331
                             is_write,
1332
                             &uc->uc_sigmask, puc);
1333
}
1334

    
1335
#elif defined(__mc68000)
1336

    
1337
int cpu_signal_handler(int host_signum, void *pinfo,
1338
                       void *puc)
1339
{
1340
    siginfo_t *info = pinfo;
1341
    struct ucontext *uc = puc;
1342
    unsigned long pc;
1343
    int is_write;
1344

    
1345
    pc = uc->uc_mcontext.gregs[16];
1346
    /* XXX: compute is_write */
1347
    is_write = 0;
1348
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1349
                             is_write,
1350
                             &uc->uc_sigmask, puc);
1351
}
1352

    
1353
#elif defined(__ia64)
1354

    
1355
#ifndef __ISR_VALID
1356
  /* This ought to be in <bits/siginfo.h>... */
1357
# define __ISR_VALID        1
1358
#endif
1359

    
1360
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1361
{
1362
    siginfo_t *info = pinfo;
1363
    struct ucontext *uc = puc;
1364
    unsigned long ip;
1365
    int is_write = 0;
1366

    
1367
    ip = uc->uc_mcontext.sc_ip;
1368
    switch (host_signum) {
1369
      case SIGILL:
1370
      case SIGFPE:
1371
      case SIGSEGV:
1372
      case SIGBUS:
1373
      case SIGTRAP:
1374
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1375
              /* ISR.W (write-access) is bit 33:  */
1376
              is_write = (info->si_isr >> 33) & 1;
1377
          break;
1378

    
1379
      default:
1380
          break;
1381
    }
1382
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1383
                             is_write,
1384
                             &uc->uc_sigmask, puc);
1385
}
1386

    
1387
#elif defined(__s390__)
1388

    
1389
int cpu_signal_handler(int host_signum, void *pinfo,
1390
                       void *puc)
1391
{
1392
    siginfo_t *info = pinfo;
1393
    struct ucontext *uc = puc;
1394
    unsigned long pc;
1395
    int is_write;
1396

    
1397
    pc = uc->uc_mcontext.psw.addr;
1398
    /* XXX: compute is_write */
1399
    is_write = 0;
1400
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1401
                             is_write, &uc->uc_sigmask, puc);
1402
}
1403

    
1404
#elif defined(__mips__)
1405

    
1406
int cpu_signal_handler(int host_signum, void *pinfo,
1407
                       void *puc)
1408
{
1409
    siginfo_t *info = pinfo;
1410
    struct ucontext *uc = puc;
1411
    greg_t pc = uc->uc_mcontext.pc;
1412
    int is_write;
1413

    
1414
    /* XXX: compute is_write */
1415
    is_write = 0;
1416
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1417
                             is_write, &uc->uc_sigmask, puc);
1418
}
1419

    
1420
#elif defined(__hppa__)
1421

    
1422
int cpu_signal_handler(int host_signum, void *pinfo,
1423
                       void *puc)
1424
{
1425
    struct siginfo *info = pinfo;
1426
    struct ucontext *uc = puc;
1427
    unsigned long pc;
1428
    int is_write;
1429

    
1430
    pc = uc->uc_mcontext.sc_iaoq[0];
1431
    /* FIXME: compute is_write */
1432
    is_write = 0;
1433
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1434
                             is_write,
1435
                             &uc->uc_sigmask, puc);
1436
}
1437

    
1438
#else
1439

    
1440
#error host CPU specific signal handler needed
1441

    
1442
#endif
1443

    
1444
#endif /* !defined(CONFIG_SOFTMMU) */