Statistics
| Branch: | Revision:

root / cpu-exec.c @ 9ee6e8bb

History | View | Annotate | Download (49.5 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#include "exec.h"
22
#include "disas.h"
23

    
24
#if !defined(CONFIG_SOFTMMU)
25
#undef EAX
26
#undef ECX
27
#undef EDX
28
#undef EBX
29
#undef ESP
30
#undef EBP
31
#undef ESI
32
#undef EDI
33
#undef EIP
34
#include <signal.h>
35
#include <sys/ucontext.h>
36
#endif
37

    
38
int tb_invalidated_flag;
39

    
40
//#define DEBUG_EXEC
41
//#define DEBUG_SIGNAL
42

    
43
void cpu_loop_exit(void)
44
{
45
    /* NOTE: the register at this point must be saved by hand because
46
       longjmp restore them */
47
    regs_to_env();
48
    longjmp(env->jmp_env, 1);
49
}
50

    
51
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
52
#define reg_T2
53
#endif
54

    
55
/* exit the current TB from a signal handler. The host registers are
56
   restored in a state compatible with the CPU emulator
57
 */
58
void cpu_resume_from_signal(CPUState *env1, void *puc)
59
{
60
#if !defined(CONFIG_SOFTMMU)
61
    struct ucontext *uc = puc;
62
#endif
63

    
64
    env = env1;
65

    
66
    /* XXX: restore cpu registers saved in host registers */
67

    
68
#if !defined(CONFIG_SOFTMMU)
69
    if (puc) {
70
        /* XXX: use siglongjmp ? */
71
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
72
    }
73
#endif
74
    longjmp(env->jmp_env, 1);
75
}
76

    
77

    
78
static TranslationBlock *tb_find_slow(target_ulong pc,
79
                                      target_ulong cs_base,
80
                                      uint64_t flags)
81
{
82
    TranslationBlock *tb, **ptb1;
83
    int code_gen_size;
84
    unsigned int h;
85
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
86
    uint8_t *tc_ptr;
87

    
88
    spin_lock(&tb_lock);
89

    
90
    tb_invalidated_flag = 0;
91

    
92
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93

    
94
    /* find translated block using physical mappings */
95
    phys_pc = get_phys_addr_code(env, pc);
96
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
97
    phys_page2 = -1;
98
    h = tb_phys_hash_func(phys_pc);
99
    ptb1 = &tb_phys_hash[h];
100
    for(;;) {
101
        tb = *ptb1;
102
        if (!tb)
103
            goto not_found;
104
        if (tb->pc == pc &&
105
            tb->page_addr[0] == phys_page1 &&
106
            tb->cs_base == cs_base &&
107
            tb->flags == flags) {
108
            /* check next page if needed */
109
            if (tb->page_addr[1] != -1) {
110
                virt_page2 = (pc & TARGET_PAGE_MASK) +
111
                    TARGET_PAGE_SIZE;
112
                phys_page2 = get_phys_addr_code(env, virt_page2);
113
                if (tb->page_addr[1] == phys_page2)
114
                    goto found;
115
            } else {
116
                goto found;
117
            }
118
        }
119
        ptb1 = &tb->phys_hash_next;
120
    }
121
 not_found:
122
    /* if no translated code available, then translate it now */
123
    tb = tb_alloc(pc);
124
    if (!tb) {
125
        /* flush must be done */
126
        tb_flush(env);
127
        /* cannot fail at this point */
128
        tb = tb_alloc(pc);
129
        /* don't forget to invalidate previous TB info */
130
        tb_invalidated_flag = 1;
131
    }
132
    tc_ptr = code_gen_ptr;
133
    tb->tc_ptr = tc_ptr;
134
    tb->cs_base = cs_base;
135
    tb->flags = flags;
136
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
137
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
138

    
139
    /* check next page if needed */
140
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141
    phys_page2 = -1;
142
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143
        phys_page2 = get_phys_addr_code(env, virt_page2);
144
    }
145
    tb_link_phys(tb, phys_pc, phys_page2);
146

    
147
 found:
148
    /* we add the TB in the virtual pc hash table */
149
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150
    spin_unlock(&tb_lock);
151
    return tb;
152
}
153

    
154
static inline TranslationBlock *tb_find_fast(void)
155
{
156
    TranslationBlock *tb;
157
    target_ulong cs_base, pc;
158
    uint64_t flags;
159

    
160
    /* we record a subset of the CPU state. It will
161
       always be the same before a given translated block
162
       is executed. */
163
#if defined(TARGET_I386)
164
    flags = env->hflags;
165
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166
    flags |= env->intercept;
167
    cs_base = env->segs[R_CS].base;
168
    pc = cs_base + env->eip;
169
#elif defined(TARGET_ARM)
170
    flags = env->thumb | (env->vfp.vec_len << 1)
171
            | (env->vfp.vec_stride << 4);
172
    if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
173
        flags |= (1 << 6);
174
    if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
175
        flags |= (1 << 7);
176
    flags |= (env->condexec_bits << 8);
177
    cs_base = 0;
178
    pc = env->regs[15];
179
#elif defined(TARGET_SPARC)
180
#ifdef TARGET_SPARC64
181
    // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182
    flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
183
        | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
184
#else
185
    // FPU enable . Supervisor
186
    flags = (env->psref << 4) | env->psrs;
187
#endif
188
    cs_base = env->npc;
189
    pc = env->pc;
190
#elif defined(TARGET_PPC)
191
    flags = env->hflags;
192
    cs_base = 0;
193
    pc = env->nip;
194
#elif defined(TARGET_MIPS)
195
    flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
196
    cs_base = 0;
197
    pc = env->PC[env->current_tc];
198
#elif defined(TARGET_M68K)
199
    flags = (env->fpcr & M68K_FPCR_PREC)  /* Bit  6 */
200
            | (env->sr & SR_S)            /* Bit  13 */
201
            | ((env->macsr >> 4) & 0xf);  /* Bits 0-3 */
202
    cs_base = 0;
203
    pc = env->pc;
204
#elif defined(TARGET_SH4)
205
    flags = env->sr & (SR_MD | SR_RB);
206
    cs_base = 0;         /* XXXXX */
207
    pc = env->pc;
208
#elif defined(TARGET_ALPHA)
209
    flags = env->ps;
210
    cs_base = 0;
211
    pc = env->pc;
212
#elif defined(TARGET_CRIS)
213
    flags = 0;
214
    cs_base = 0;
215
    pc = env->pc;
216
#else
217
#error unsupported CPU
218
#endif
219
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
220
    if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
221
                         tb->flags != flags, 0)) {
222
        tb = tb_find_slow(pc, cs_base, flags);
223
        /* Note: we do it here to avoid a gcc bug on Mac OS X when
224
           doing it in tb_find_slow */
225
        if (tb_invalidated_flag) {
226
            /* as some TB could have been invalidated because
227
               of memory exceptions while generating the code, we
228
               must recompute the hash index here */
229
            T0 = 0;
230
        }
231
    }
232
    return tb;
233
}
234

    
235

    
236
/* main execution loop */
237

    
238
int cpu_exec(CPUState *env1)
239
{
240
#define DECLARE_HOST_REGS 1
241
#include "hostregs_helper.h"
242
#if defined(TARGET_SPARC)
243
#if defined(reg_REGWPTR)
244
    uint32_t *saved_regwptr;
245
#endif
246
#endif
247
#if defined(__sparc__) && !defined(HOST_SOLARIS)
248
    int saved_i7;
249
    target_ulong tmp_T0;
250
#endif
251
    int ret, interrupt_request;
252
    void (*gen_func)(void);
253
    TranslationBlock *tb;
254
    uint8_t *tc_ptr;
255

    
256
    if (cpu_halted(env1) == EXCP_HALTED)
257
        return EXCP_HALTED;
258

    
259
    cpu_single_env = env1;
260

    
261
    /* first we save global registers */
262
#define SAVE_HOST_REGS 1
263
#include "hostregs_helper.h"
264
    env = env1;
265
#if defined(__sparc__) && !defined(HOST_SOLARIS)
266
    /* we also save i7 because longjmp may not restore it */
267
    asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
268
#endif
269

    
270
    env_to_regs();
271
#if defined(TARGET_I386)
272
    /* put eflags in CPU temporary format */
273
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
274
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
275
    CC_OP = CC_OP_EFLAGS;
276
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
277
#elif defined(TARGET_SPARC)
278
#if defined(reg_REGWPTR)
279
    saved_regwptr = REGWPTR;
280
#endif
281
#elif defined(TARGET_M68K)
282
    env->cc_op = CC_OP_FLAGS;
283
    env->cc_dest = env->sr & 0xf;
284
    env->cc_x = (env->sr >> 4) & 1;
285
#elif defined(TARGET_ALPHA)
286
#elif defined(TARGET_ARM)
287
#elif defined(TARGET_PPC)
288
#elif defined(TARGET_MIPS)
289
#elif defined(TARGET_SH4)
290
#elif defined(TARGET_CRIS)
291
    /* XXXXX */
292
#else
293
#error unsupported target CPU
294
#endif
295
    env->exception_index = -1;
296

    
297
    /* prepare setjmp context for exception handling */
298
    for(;;) {
299
        if (setjmp(env->jmp_env) == 0) {
300
            env->current_tb = NULL;
301
            /* if an exception is pending, we execute it here */
302
            if (env->exception_index >= 0) {
303
                if (env->exception_index >= EXCP_INTERRUPT) {
304
                    /* exit request from the cpu execution loop */
305
                    ret = env->exception_index;
306
                    break;
307
                } else if (env->user_mode_only) {
308
                    /* if user mode only, we simulate a fake exception
309
                       which will be handled outside the cpu execution
310
                       loop */
311
#if defined(TARGET_I386)
312
                    do_interrupt_user(env->exception_index,
313
                                      env->exception_is_int,
314
                                      env->error_code,
315
                                      env->exception_next_eip);
316
#endif
317
                    ret = env->exception_index;
318
                    break;
319
                } else {
320
#if defined(TARGET_I386)
321
                    /* simulate a real cpu exception. On i386, it can
322
                       trigger new exceptions, but we do not handle
323
                       double or triple faults yet. */
324
                    do_interrupt(env->exception_index,
325
                                 env->exception_is_int,
326
                                 env->error_code,
327
                                 env->exception_next_eip, 0);
328
                    /* successfully delivered */
329
                    env->old_exception = -1;
330
#elif defined(TARGET_PPC)
331
                    do_interrupt(env);
332
#elif defined(TARGET_MIPS)
333
                    do_interrupt(env);
334
#elif defined(TARGET_SPARC)
335
                    do_interrupt(env->exception_index);
336
#elif defined(TARGET_ARM)
337
                    do_interrupt(env);
338
#elif defined(TARGET_SH4)
339
                    do_interrupt(env);
340
#elif defined(TARGET_ALPHA)
341
                    do_interrupt(env);
342
#elif defined(TARGET_CRIS)
343
                    do_interrupt(env);
344
#elif defined(TARGET_M68K)
345
                    do_interrupt(0);
346
#endif
347
                }
348
                env->exception_index = -1;
349
            }
350
#ifdef USE_KQEMU
351
            if (kqemu_is_ok(env) && env->interrupt_request == 0) {
352
                int ret;
353
                env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
354
                ret = kqemu_cpu_exec(env);
355
                /* put eflags in CPU temporary format */
356
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
358
                CC_OP = CC_OP_EFLAGS;
359
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
360
                if (ret == 1) {
361
                    /* exception */
362
                    longjmp(env->jmp_env, 1);
363
                } else if (ret == 2) {
364
                    /* softmmu execution needed */
365
                } else {
366
                    if (env->interrupt_request != 0) {
367
                        /* hardware interrupt will be executed just after */
368
                    } else {
369
                        /* otherwise, we restart */
370
                        longjmp(env->jmp_env, 1);
371
                    }
372
                }
373
            }
374
#endif
375

    
376
            T0 = 0; /* force lookup of first TB */
377
            for(;;) {
378
#if defined(__sparc__) && !defined(HOST_SOLARIS)
379
                /* g1 can be modified by some libc? functions */
380
                tmp_T0 = T0;
381
#endif
382
                interrupt_request = env->interrupt_request;
383
                if (__builtin_expect(interrupt_request, 0)
384
#if defined(TARGET_I386)
385
                        && env->hflags & HF_GIF_MASK
386
#endif
387
                                ) {
388
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
389
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
390
                        env->exception_index = EXCP_DEBUG;
391
                        cpu_loop_exit();
392
                    }
393
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
394
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
395
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
396
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
397
                        env->halted = 1;
398
                        env->exception_index = EXCP_HLT;
399
                        cpu_loop_exit();
400
                    }
401
#endif
402
#if defined(TARGET_I386)
403
                    if ((interrupt_request & CPU_INTERRUPT_SMI) &&
404
                        !(env->hflags & HF_SMM_MASK)) {
405
                        svm_check_intercept(SVM_EXIT_SMI);
406
                        env->interrupt_request &= ~CPU_INTERRUPT_SMI;
407
                        do_smm_enter();
408
#if defined(__sparc__) && !defined(HOST_SOLARIS)
409
                        tmp_T0 = 0;
410
#else
411
                        T0 = 0;
412
#endif
413
                    } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
414
                        (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
415
                        !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
416
                        int intno;
417
                        svm_check_intercept(SVM_EXIT_INTR);
418
                        env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
419
                        intno = cpu_get_pic_interrupt(env);
420
                        if (loglevel & CPU_LOG_TB_IN_ASM) {
421
                            fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
422
                        }
423
                        do_interrupt(intno, 0, 0, 0, 1);
424
                        /* ensure that no TB jump will be modified as
425
                           the program flow was changed */
426
#if defined(__sparc__) && !defined(HOST_SOLARIS)
427
                        tmp_T0 = 0;
428
#else
429
                        T0 = 0;
430
#endif
431
#if !defined(CONFIG_USER_ONLY)
432
                    } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
433
                        (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
434
                         int intno;
435
                         /* FIXME: this should respect TPR */
436
                         env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
437
                         svm_check_intercept(SVM_EXIT_VINTR);
438
                         intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
439
                         if (loglevel & CPU_LOG_TB_IN_ASM)
440
                             fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
441
                         do_interrupt(intno, 0, 0, -1, 1);
442
                         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
443
                                  ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
444
#if defined(__sparc__) && !defined(HOST_SOLARIS)
445
                         tmp_T0 = 0;
446
#else
447
                         T0 = 0;
448
#endif
449
#endif
450
                    }
451
#elif defined(TARGET_PPC)
452
#if 0
453
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
454
                        cpu_ppc_reset(env);
455
                    }
456
#endif
457
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
458
                        ppc_hw_interrupt(env);
459
                        if (env->pending_interrupts == 0)
460
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
461
#if defined(__sparc__) && !defined(HOST_SOLARIS)
462
                        tmp_T0 = 0;
463
#else
464
                        T0 = 0;
465
#endif
466
                    }
467
#elif defined(TARGET_MIPS)
468
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
469
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
470
                        (env->CP0_Status & (1 << CP0St_IE)) &&
471
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
472
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
473
                        !(env->hflags & MIPS_HFLAG_DM)) {
474
                        /* Raise it */
475
                        env->exception_index = EXCP_EXT_INTERRUPT;
476
                        env->error_code = 0;
477
                        do_interrupt(env);
478
#if defined(__sparc__) && !defined(HOST_SOLARIS)
479
                        tmp_T0 = 0;
480
#else
481
                        T0 = 0;
482
#endif
483
                    }
484
#elif defined(TARGET_SPARC)
485
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
486
                        (env->psret != 0)) {
487
                        int pil = env->interrupt_index & 15;
488
                        int type = env->interrupt_index & 0xf0;
489

    
490
                        if (((type == TT_EXTINT) &&
491
                             (pil == 15 || pil > env->psrpil)) ||
492
                            type != TT_EXTINT) {
493
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
494
                            do_interrupt(env->interrupt_index);
495
                            env->interrupt_index = 0;
496
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
497
                            cpu_check_irqs(env);
498
#endif
499
#if defined(__sparc__) && !defined(HOST_SOLARIS)
500
                            tmp_T0 = 0;
501
#else
502
                            T0 = 0;
503
#endif
504
                        }
505
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
506
                        //do_interrupt(0, 0, 0, 0, 0);
507
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
508
                    }
509
#elif defined(TARGET_ARM)
510
                    if (interrupt_request & CPU_INTERRUPT_FIQ
511
                        && !(env->uncached_cpsr & CPSR_F)) {
512
                        env->exception_index = EXCP_FIQ;
513
                        do_interrupt(env);
514
                    }
515
                    /* ARMv7-M interrupt return works by loading a magic value
516
                       into the PC.  On real hardware the load causes the
517
                       return to occur.  The qemu implementation performs the
518
                       jump normally, then does the exception return when the
519
                       CPU tries to execute code at the magic address.
520
                       This will cause the magic PC value to be pushed to
521
                       the stack if an interrupt occured at the wrong time.
522
                       We avoid this by disabling interrupts when
523
                       pc contains a magic address.  */
524
                    if (interrupt_request & CPU_INTERRUPT_HARD
525
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
526
                            || !(env->uncached_cpsr & CPSR_I))) {
527
                        env->exception_index = EXCP_IRQ;
528
                        do_interrupt(env);
529
                    }
530
#elif defined(TARGET_SH4)
531
                    /* XXXXX */
532
#elif defined(TARGET_ALPHA)
533
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
534
                        do_interrupt(env);
535
                    }
536
#elif defined(TARGET_CRIS)
537
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
538
                        do_interrupt(env);
539
                        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
540
                    }
541
#elif defined(TARGET_M68K)
542
                    if (interrupt_request & CPU_INTERRUPT_HARD
543
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
544
                            < env->pending_level) {
545
                        /* Real hardware gets the interrupt vector via an
546
                           IACK cycle at this point.  Current emulated
547
                           hardware doesn't rely on this, so we
548
                           provide/save the vector when the interrupt is
549
                           first signalled.  */
550
                        env->exception_index = env->pending_vector;
551
                        do_interrupt(1);
552
                    }
553
#endif
554
                   /* Don't use the cached interupt_request value,
555
                      do_interrupt may have updated the EXITTB flag. */
556
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
557
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
558
                        /* ensure that no TB jump will be modified as
559
                           the program flow was changed */
560
#if defined(__sparc__) && !defined(HOST_SOLARIS)
561
                        tmp_T0 = 0;
562
#else
563
                        T0 = 0;
564
#endif
565
                    }
566
                    if (interrupt_request & CPU_INTERRUPT_EXIT) {
567
                        env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
568
                        env->exception_index = EXCP_INTERRUPT;
569
                        cpu_loop_exit();
570
                    }
571
                }
572
#ifdef DEBUG_EXEC
573
                if ((loglevel & CPU_LOG_TB_CPU)) {
574
                    /* restore flags in standard format */
575
                    regs_to_env();
576
#if defined(TARGET_I386)
577
                    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
578
                    cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
579
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
580
#elif defined(TARGET_ARM)
581
                    cpu_dump_state(env, logfile, fprintf, 0);
582
#elif defined(TARGET_SPARC)
583
                    REGWPTR = env->regbase + (env->cwp * 16);
584
                    env->regwptr = REGWPTR;
585
                    cpu_dump_state(env, logfile, fprintf, 0);
586
#elif defined(TARGET_PPC)
587
                    cpu_dump_state(env, logfile, fprintf, 0);
588
#elif defined(TARGET_M68K)
589
                    cpu_m68k_flush_flags(env, env->cc_op);
590
                    env->cc_op = CC_OP_FLAGS;
591
                    env->sr = (env->sr & 0xffe0)
592
                              | env->cc_dest | (env->cc_x << 4);
593
                    cpu_dump_state(env, logfile, fprintf, 0);
594
#elif defined(TARGET_MIPS)
595
                    cpu_dump_state(env, logfile, fprintf, 0);
596
#elif defined(TARGET_SH4)
597
                    cpu_dump_state(env, logfile, fprintf, 0);
598
#elif defined(TARGET_ALPHA)
599
                    cpu_dump_state(env, logfile, fprintf, 0);
600
#elif defined(TARGET_CRIS)
601
                    cpu_dump_state(env, logfile, fprintf, 0);
602
#else
603
#error unsupported target CPU
604
#endif
605
                }
606
#endif
607
                tb = tb_find_fast();
608
#ifdef DEBUG_EXEC
609
                if ((loglevel & CPU_LOG_EXEC)) {
610
                    fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
611
                            (long)tb->tc_ptr, tb->pc,
612
                            lookup_symbol(tb->pc));
613
                }
614
#endif
615
#if defined(__sparc__) && !defined(HOST_SOLARIS)
616
                T0 = tmp_T0;
617
#endif
618
                /* see if we can patch the calling TB. When the TB
619
                   spans two pages, we cannot safely do a direct
620
                   jump. */
621
                {
622
                    if (T0 != 0 &&
623
#if USE_KQEMU
624
                        (env->kqemu_enabled != 2) &&
625
#endif
626
                        tb->page_addr[1] == -1) {
627
                    spin_lock(&tb_lock);
628
                    tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
629
                    spin_unlock(&tb_lock);
630
                }
631
                }
632
                tc_ptr = tb->tc_ptr;
633
                env->current_tb = tb;
634
                /* execute the generated code */
635
                gen_func = (void *)tc_ptr;
636
#if defined(__sparc__)
637
                __asm__ __volatile__("call        %0\n\t"
638
                                     "mov        %%o7,%%i0"
639
                                     : /* no outputs */
640
                                     : "r" (gen_func)
641
                                     : "i0", "i1", "i2", "i3", "i4", "i5",
642
                                       "o0", "o1", "o2", "o3", "o4", "o5",
643
                                       "l0", "l1", "l2", "l3", "l4", "l5",
644
                                       "l6", "l7");
645
#elif defined(__arm__)
646
                asm volatile ("mov pc, %0\n\t"
647
                              ".global exec_loop\n\t"
648
                              "exec_loop:\n\t"
649
                              : /* no outputs */
650
                              : "r" (gen_func)
651
                              : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
652
#elif defined(__ia64)
653
                struct fptr {
654
                        void *ip;
655
                        void *gp;
656
                } fp;
657

    
658
                fp.ip = tc_ptr;
659
                fp.gp = code_gen_buffer + 2 * (1 << 20);
660
                (*(void (*)(void)) &fp)();
661
#else
662
                gen_func();
663
#endif
664
                env->current_tb = NULL;
665
                /* reset soft MMU for next block (it can currently
666
                   only be set by a memory fault) */
667
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
668
                if (env->hflags & HF_SOFTMMU_MASK) {
669
                    env->hflags &= ~HF_SOFTMMU_MASK;
670
                    /* do not allow linking to another block */
671
                    T0 = 0;
672
                }
673
#endif
674
#if defined(USE_KQEMU)
675
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676
                if (kqemu_is_ok(env) &&
677
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
678
                    cpu_loop_exit();
679
                }
680
#endif
681
            } /* for(;;) */
682
        } else {
683
            env_to_regs();
684
        }
685
    } /* for(;;) */
686

    
687

    
688
#if defined(TARGET_I386)
689
    /* restore flags in standard format */
690
    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
691
#elif defined(TARGET_ARM)
692
    /* XXX: Save/restore host fpu exception state?.  */
693
#elif defined(TARGET_SPARC)
694
#if defined(reg_REGWPTR)
695
    REGWPTR = saved_regwptr;
696
#endif
697
#elif defined(TARGET_PPC)
698
#elif defined(TARGET_M68K)
699
    cpu_m68k_flush_flags(env, env->cc_op);
700
    env->cc_op = CC_OP_FLAGS;
701
    env->sr = (env->sr & 0xffe0)
702
              | env->cc_dest | (env->cc_x << 4);
703
#elif defined(TARGET_MIPS)
704
#elif defined(TARGET_SH4)
705
#elif defined(TARGET_ALPHA)
706
#elif defined(TARGET_CRIS)
707
    /* XXXXX */
708
#else
709
#error unsupported target CPU
710
#endif
711

    
712
    /* restore global registers */
713
#if defined(__sparc__) && !defined(HOST_SOLARIS)
714
    asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
715
#endif
716
#include "hostregs_helper.h"
717

    
718
    /* fail safe : never use cpu_single_env outside cpu_exec() */
719
    cpu_single_env = NULL;
720
    return ret;
721
}
722

    
723
/* must only be called from the generated code as an exception can be
724
   generated */
725
void tb_invalidate_page_range(target_ulong start, target_ulong end)
726
{
727
    /* XXX: cannot enable it yet because it yields to MMU exception
728
       where NIP != read address on PowerPC */
729
#if 0
730
    target_ulong phys_addr;
731
    phys_addr = get_phys_addr_code(env, start);
732
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
733
#endif
734
}
735

    
736
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
737

    
738
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
739
{
740
    CPUX86State *saved_env;
741

    
742
    saved_env = env;
743
    env = s;
744
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
745
        selector &= 0xffff;
746
        cpu_x86_load_seg_cache(env, seg_reg, selector,
747
                               (selector << 4), 0xffff, 0);
748
    } else {
749
        load_seg(seg_reg, selector);
750
    }
751
    env = saved_env;
752
}
753

    
754
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
755
{
756
    CPUX86State *saved_env;
757

    
758
    saved_env = env;
759
    env = s;
760

    
761
    helper_fsave((target_ulong)ptr, data32);
762

    
763
    env = saved_env;
764
}
765

    
766
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
767
{
768
    CPUX86State *saved_env;
769

    
770
    saved_env = env;
771
    env = s;
772

    
773
    helper_frstor((target_ulong)ptr, data32);
774

    
775
    env = saved_env;
776
}
777

    
778
#endif /* TARGET_I386 */
779

    
780
#if !defined(CONFIG_SOFTMMU)
781

    
782
#if defined(TARGET_I386)
783

    
784
/* 'pc' is the host PC at which the exception was raised. 'address' is
785
   the effective address of the memory exception. 'is_write' is 1 if a
786
   write caused the exception and otherwise 0'. 'old_set' is the
787
   signal set which should be restored */
788
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
789
                                    int is_write, sigset_t *old_set,
790
                                    void *puc)
791
{
792
    TranslationBlock *tb;
793
    int ret;
794

    
795
    if (cpu_single_env)
796
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
797
#if defined(DEBUG_SIGNAL)
798
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
799
                pc, address, is_write, *(unsigned long *)old_set);
800
#endif
801
    /* XXX: locking issue */
802
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
803
        return 1;
804
    }
805

    
806
    /* see if it is an MMU fault */
807
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
808
    if (ret < 0)
809
        return 0; /* not an MMU fault */
810
    if (ret == 0)
811
        return 1; /* the MMU fault was handled without causing real CPU fault */
812
    /* now we have a real cpu fault */
813
    tb = tb_find_pc(pc);
814
    if (tb) {
815
        /* the PC is inside the translated code. It means that we have
816
           a virtual CPU fault */
817
        cpu_restore_state(tb, env, pc, puc);
818
    }
819
    if (ret == 1) {
820
#if 0
821
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
822
               env->eip, env->cr[2], env->error_code);
823
#endif
824
        /* we restore the process signal mask as the sigreturn should
825
           do it (XXX: use sigsetjmp) */
826
        sigprocmask(SIG_SETMASK, old_set, NULL);
827
        raise_exception_err(env->exception_index, env->error_code);
828
    } else {
829
        /* activate soft MMU for this block */
830
        env->hflags |= HF_SOFTMMU_MASK;
831
        cpu_resume_from_signal(env, puc);
832
    }
833
    /* never comes here */
834
    return 1;
835
}
836

    
837
#elif defined(TARGET_ARM)
838
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
839
                                    int is_write, sigset_t *old_set,
840
                                    void *puc)
841
{
842
    TranslationBlock *tb;
843
    int ret;
844

    
845
    if (cpu_single_env)
846
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
847
#if defined(DEBUG_SIGNAL)
848
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
849
           pc, address, is_write, *(unsigned long *)old_set);
850
#endif
851
    /* XXX: locking issue */
852
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
853
        return 1;
854
    }
855
    /* see if it is an MMU fault */
856
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
857
    if (ret < 0)
858
        return 0; /* not an MMU fault */
859
    if (ret == 0)
860
        return 1; /* the MMU fault was handled without causing real CPU fault */
861
    /* now we have a real cpu fault */
862
    tb = tb_find_pc(pc);
863
    if (tb) {
864
        /* the PC is inside the translated code. It means that we have
865
           a virtual CPU fault */
866
        cpu_restore_state(tb, env, pc, puc);
867
    }
868
    /* we restore the process signal mask as the sigreturn should
869
       do it (XXX: use sigsetjmp) */
870
    sigprocmask(SIG_SETMASK, old_set, NULL);
871
    cpu_loop_exit();
872
}
873
#elif defined(TARGET_SPARC)
874
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
875
                                    int is_write, sigset_t *old_set,
876
                                    void *puc)
877
{
878
    TranslationBlock *tb;
879
    int ret;
880

    
881
    if (cpu_single_env)
882
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
883
#if defined(DEBUG_SIGNAL)
884
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
885
           pc, address, is_write, *(unsigned long *)old_set);
886
#endif
887
    /* XXX: locking issue */
888
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
889
        return 1;
890
    }
891
    /* see if it is an MMU fault */
892
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
893
    if (ret < 0)
894
        return 0; /* not an MMU fault */
895
    if (ret == 0)
896
        return 1; /* the MMU fault was handled without causing real CPU fault */
897
    /* now we have a real cpu fault */
898
    tb = tb_find_pc(pc);
899
    if (tb) {
900
        /* the PC is inside the translated code. It means that we have
901
           a virtual CPU fault */
902
        cpu_restore_state(tb, env, pc, puc);
903
    }
904
    /* we restore the process signal mask as the sigreturn should
905
       do it (XXX: use sigsetjmp) */
906
    sigprocmask(SIG_SETMASK, old_set, NULL);
907
    cpu_loop_exit();
908
}
909
#elif defined (TARGET_PPC)
910
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
911
                                    int is_write, sigset_t *old_set,
912
                                    void *puc)
913
{
914
    TranslationBlock *tb;
915
    int ret;
916

    
917
    if (cpu_single_env)
918
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
919
#if defined(DEBUG_SIGNAL)
920
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
921
           pc, address, is_write, *(unsigned long *)old_set);
922
#endif
923
    /* XXX: locking issue */
924
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
925
        return 1;
926
    }
927

    
928
    /* see if it is an MMU fault */
929
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
930
    if (ret < 0)
931
        return 0; /* not an MMU fault */
932
    if (ret == 0)
933
        return 1; /* the MMU fault was handled without causing real CPU fault */
934

    
935
    /* now we have a real cpu fault */
936
    tb = tb_find_pc(pc);
937
    if (tb) {
938
        /* the PC is inside the translated code. It means that we have
939
           a virtual CPU fault */
940
        cpu_restore_state(tb, env, pc, puc);
941
    }
942
    if (ret == 1) {
943
#if 0
944
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
945
               env->nip, env->error_code, tb);
946
#endif
947
    /* we restore the process signal mask as the sigreturn should
948
       do it (XXX: use sigsetjmp) */
949
        sigprocmask(SIG_SETMASK, old_set, NULL);
950
        do_raise_exception_err(env->exception_index, env->error_code);
951
    } else {
952
        /* activate soft MMU for this block */
953
        cpu_resume_from_signal(env, puc);
954
    }
955
    /* never comes here */
956
    return 1;
957
}
958

    
959
#elif defined(TARGET_M68K)
960
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
961
                                    int is_write, sigset_t *old_set,
962
                                    void *puc)
963
{
964
    TranslationBlock *tb;
965
    int ret;
966

    
967
    if (cpu_single_env)
968
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
969
#if defined(DEBUG_SIGNAL)
970
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
971
           pc, address, is_write, *(unsigned long *)old_set);
972
#endif
973
    /* XXX: locking issue */
974
    if (is_write && page_unprotect(address, pc, puc)) {
975
        return 1;
976
    }
977
    /* see if it is an MMU fault */
978
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
979
    if (ret < 0)
980
        return 0; /* not an MMU fault */
981
    if (ret == 0)
982
        return 1; /* the MMU fault was handled without causing real CPU fault */
983
    /* now we have a real cpu fault */
984
    tb = tb_find_pc(pc);
985
    if (tb) {
986
        /* the PC is inside the translated code. It means that we have
987
           a virtual CPU fault */
988
        cpu_restore_state(tb, env, pc, puc);
989
    }
990
    /* we restore the process signal mask as the sigreturn should
991
       do it (XXX: use sigsetjmp) */
992
    sigprocmask(SIG_SETMASK, old_set, NULL);
993
    cpu_loop_exit();
994
    /* never comes here */
995
    return 1;
996
}
997

    
998
#elif defined (TARGET_MIPS)
999
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000
                                    int is_write, sigset_t *old_set,
1001
                                    void *puc)
1002
{
1003
    TranslationBlock *tb;
1004
    int ret;
1005

    
1006
    if (cpu_single_env)
1007
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008
#if defined(DEBUG_SIGNAL)
1009
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010
           pc, address, is_write, *(unsigned long *)old_set);
1011
#endif
1012
    /* XXX: locking issue */
1013
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014
        return 1;
1015
    }
1016

    
1017
    /* see if it is an MMU fault */
1018
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1019
    if (ret < 0)
1020
        return 0; /* not an MMU fault */
1021
    if (ret == 0)
1022
        return 1; /* the MMU fault was handled without causing real CPU fault */
1023

    
1024
    /* now we have a real cpu fault */
1025
    tb = tb_find_pc(pc);
1026
    if (tb) {
1027
        /* the PC is inside the translated code. It means that we have
1028
           a virtual CPU fault */
1029
        cpu_restore_state(tb, env, pc, puc);
1030
    }
1031
    if (ret == 1) {
1032
#if 0
1033
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1034
               env->PC, env->error_code, tb);
1035
#endif
1036
    /* we restore the process signal mask as the sigreturn should
1037
       do it (XXX: use sigsetjmp) */
1038
        sigprocmask(SIG_SETMASK, old_set, NULL);
1039
        do_raise_exception_err(env->exception_index, env->error_code);
1040
    } else {
1041
        /* activate soft MMU for this block */
1042
        cpu_resume_from_signal(env, puc);
1043
    }
1044
    /* never comes here */
1045
    return 1;
1046
}
1047

    
1048
#elif defined (TARGET_SH4)
1049
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1050
                                    int is_write, sigset_t *old_set,
1051
                                    void *puc)
1052
{
1053
    TranslationBlock *tb;
1054
    int ret;
1055

    
1056
    if (cpu_single_env)
1057
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1058
#if defined(DEBUG_SIGNAL)
1059
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1060
           pc, address, is_write, *(unsigned long *)old_set);
1061
#endif
1062
    /* XXX: locking issue */
1063
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1064
        return 1;
1065
    }
1066

    
1067
    /* see if it is an MMU fault */
1068
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1069
    if (ret < 0)
1070
        return 0; /* not an MMU fault */
1071
    if (ret == 0)
1072
        return 1; /* the MMU fault was handled without causing real CPU fault */
1073

    
1074
    /* now we have a real cpu fault */
1075
    tb = tb_find_pc(pc);
1076
    if (tb) {
1077
        /* the PC is inside the translated code. It means that we have
1078
           a virtual CPU fault */
1079
        cpu_restore_state(tb, env, pc, puc);
1080
    }
1081
#if 0
1082
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1083
               env->nip, env->error_code, tb);
1084
#endif
1085
    /* we restore the process signal mask as the sigreturn should
1086
       do it (XXX: use sigsetjmp) */
1087
    sigprocmask(SIG_SETMASK, old_set, NULL);
1088
    cpu_loop_exit();
1089
    /* never comes here */
1090
    return 1;
1091
}
1092

    
1093
#elif defined (TARGET_ALPHA)
1094
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1095
                                    int is_write, sigset_t *old_set,
1096
                                    void *puc)
1097
{
1098
    TranslationBlock *tb;
1099
    int ret;
1100

    
1101
    if (cpu_single_env)
1102
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1103
#if defined(DEBUG_SIGNAL)
1104
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1105
           pc, address, is_write, *(unsigned long *)old_set);
1106
#endif
1107
    /* XXX: locking issue */
1108
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1109
        return 1;
1110
    }
1111

    
1112
    /* see if it is an MMU fault */
1113
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1114
    if (ret < 0)
1115
        return 0; /* not an MMU fault */
1116
    if (ret == 0)
1117
        return 1; /* the MMU fault was handled without causing real CPU fault */
1118

    
1119
    /* now we have a real cpu fault */
1120
    tb = tb_find_pc(pc);
1121
    if (tb) {
1122
        /* the PC is inside the translated code. It means that we have
1123
           a virtual CPU fault */
1124
        cpu_restore_state(tb, env, pc, puc);
1125
    }
1126
#if 0
1127
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1128
               env->nip, env->error_code, tb);
1129
#endif
1130
    /* we restore the process signal mask as the sigreturn should
1131
       do it (XXX: use sigsetjmp) */
1132
    sigprocmask(SIG_SETMASK, old_set, NULL);
1133
    cpu_loop_exit();
1134
    /* never comes here */
1135
    return 1;
1136
}
1137
#elif defined (TARGET_CRIS)
1138
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1139
                                    int is_write, sigset_t *old_set,
1140
                                    void *puc)
1141
{
1142
    TranslationBlock *tb;
1143
    int ret;
1144

    
1145
    if (cpu_single_env)
1146
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1147
#if defined(DEBUG_SIGNAL)
1148
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1149
           pc, address, is_write, *(unsigned long *)old_set);
1150
#endif
1151
    /* XXX: locking issue */
1152
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1153
        return 1;
1154
    }
1155

    
1156
    /* see if it is an MMU fault */
1157
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1158
    if (ret < 0)
1159
        return 0; /* not an MMU fault */
1160
    if (ret == 0)
1161
        return 1; /* the MMU fault was handled without causing real CPU fault */
1162

    
1163
    /* now we have a real cpu fault */
1164
    tb = tb_find_pc(pc);
1165
    if (tb) {
1166
        /* the PC is inside the translated code. It means that we have
1167
           a virtual CPU fault */
1168
        cpu_restore_state(tb, env, pc, puc);
1169
    }
1170
#if 0
1171
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1172
               env->nip, env->error_code, tb);
1173
#endif
1174
    /* we restore the process signal mask as the sigreturn should
1175
       do it (XXX: use sigsetjmp) */
1176
    sigprocmask(SIG_SETMASK, old_set, NULL);
1177
    cpu_loop_exit();
1178
    /* never comes here */
1179
    return 1;
1180
}
1181

    
1182
#else
1183
#error unsupported target CPU
1184
#endif
1185

    
1186
#if defined(__i386__)
1187

    
1188
#if defined(__APPLE__)
1189
# include <sys/ucontext.h>
1190

    
1191
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1192
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1193
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1194
#else
1195
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1196
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1197
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1198
#endif
1199

    
1200
int cpu_signal_handler(int host_signum, void *pinfo,
1201
                       void *puc)
1202
{
1203
    siginfo_t *info = pinfo;
1204
    struct ucontext *uc = puc;
1205
    unsigned long pc;
1206
    int trapno;
1207

    
1208
#ifndef REG_EIP
1209
/* for glibc 2.1 */
1210
#define REG_EIP    EIP
1211
#define REG_ERR    ERR
1212
#define REG_TRAPNO TRAPNO
1213
#endif
1214
    pc = EIP_sig(uc);
1215
    trapno = TRAP_sig(uc);
1216
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1217
                             trapno == 0xe ?
1218
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1219
                             &uc->uc_sigmask, puc);
1220
}
1221

    
1222
#elif defined(__x86_64__)
1223

    
1224
int cpu_signal_handler(int host_signum, void *pinfo,
1225
                       void *puc)
1226
{
1227
    siginfo_t *info = pinfo;
1228
    struct ucontext *uc = puc;
1229
    unsigned long pc;
1230

    
1231
    pc = uc->uc_mcontext.gregs[REG_RIP];
1232
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1233
                             uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1234
                             (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1235
                             &uc->uc_sigmask, puc);
1236
}
1237

    
1238
#elif defined(__powerpc__)
1239

    
1240
/***********************************************************************
1241
 * signal context platform-specific definitions
1242
 * From Wine
1243
 */
1244
#ifdef linux
1245
/* All Registers access - only for local access */
1246
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1247
/* Gpr Registers access  */
1248
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1249
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1250
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1251
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1252
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1253
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1254
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1255
/* Float Registers access  */
1256
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1257
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1258
/* Exception Registers access */
1259
# define DAR_sig(context)                        REG_sig(dar, context)
1260
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1261
# define TRAP_sig(context)                        REG_sig(trap, context)
1262
#endif /* linux */
1263

    
1264
#ifdef __APPLE__
1265
# include <sys/ucontext.h>
1266
typedef struct ucontext SIGCONTEXT;
1267
/* All Registers access - only for local access */
1268
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1269
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1270
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1271
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1272
/* Gpr Registers access */
1273
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1274
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1275
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1276
# define CTR_sig(context)                        REG_sig(ctr, context)
1277
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1278
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1279
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1280
/* Float Registers access */
1281
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1282
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1283
/* Exception Registers access */
1284
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1285
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1286
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1287
#endif /* __APPLE__ */
1288

    
1289
int cpu_signal_handler(int host_signum, void *pinfo,
1290
                       void *puc)
1291
{
1292
    siginfo_t *info = pinfo;
1293
    struct ucontext *uc = puc;
1294
    unsigned long pc;
1295
    int is_write;
1296

    
1297
    pc = IAR_sig(uc);
1298
    is_write = 0;
1299
#if 0
1300
    /* ppc 4xx case */
1301
    if (DSISR_sig(uc) & 0x00800000)
1302
        is_write = 1;
1303
#else
1304
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1305
        is_write = 1;
1306
#endif
1307
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1308
                             is_write, &uc->uc_sigmask, puc);
1309
}
1310

    
1311
#elif defined(__alpha__)
1312

    
1313
int cpu_signal_handler(int host_signum, void *pinfo,
1314
                           void *puc)
1315
{
1316
    siginfo_t *info = pinfo;
1317
    struct ucontext *uc = puc;
1318
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1319
    uint32_t insn = *pc;
1320
    int is_write = 0;
1321

    
1322
    /* XXX: need kernel patch to get write flag faster */
1323
    switch (insn >> 26) {
1324
    case 0x0d: // stw
1325
    case 0x0e: // stb
1326
    case 0x0f: // stq_u
1327
    case 0x24: // stf
1328
    case 0x25: // stg
1329
    case 0x26: // sts
1330
    case 0x27: // stt
1331
    case 0x2c: // stl
1332
    case 0x2d: // stq
1333
    case 0x2e: // stl_c
1334
    case 0x2f: // stq_c
1335
        is_write = 1;
1336
    }
1337

    
1338
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1339
                             is_write, &uc->uc_sigmask, puc);
1340
}
1341
#elif defined(__sparc__)
1342

    
1343
int cpu_signal_handler(int host_signum, void *pinfo,
1344
                       void *puc)
1345
{
1346
    siginfo_t *info = pinfo;
1347
    uint32_t *regs = (uint32_t *)(info + 1);
1348
    void *sigmask = (regs + 20);
1349
    unsigned long pc;
1350
    int is_write;
1351
    uint32_t insn;
1352

    
1353
    /* XXX: is there a standard glibc define ? */
1354
    pc = regs[1];
1355
    /* XXX: need kernel patch to get write flag faster */
1356
    is_write = 0;
1357
    insn = *(uint32_t *)pc;
1358
    if ((insn >> 30) == 3) {
1359
      switch((insn >> 19) & 0x3f) {
1360
      case 0x05: // stb
1361
      case 0x06: // sth
1362
      case 0x04: // st
1363
      case 0x07: // std
1364
      case 0x24: // stf
1365
      case 0x27: // stdf
1366
      case 0x25: // stfsr
1367
        is_write = 1;
1368
        break;
1369
      }
1370
    }
1371
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1372
                             is_write, sigmask, NULL);
1373
}
1374

    
1375
#elif defined(__arm__)
1376

    
1377
int cpu_signal_handler(int host_signum, void *pinfo,
1378
                       void *puc)
1379
{
1380
    siginfo_t *info = pinfo;
1381
    struct ucontext *uc = puc;
1382
    unsigned long pc;
1383
    int is_write;
1384

    
1385
    pc = uc->uc_mcontext.gregs[R15];
1386
    /* XXX: compute is_write */
1387
    is_write = 0;
1388
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389
                             is_write,
1390
                             &uc->uc_sigmask, puc);
1391
}
1392

    
1393
#elif defined(__mc68000)
1394

    
1395
int cpu_signal_handler(int host_signum, void *pinfo,
1396
                       void *puc)
1397
{
1398
    siginfo_t *info = pinfo;
1399
    struct ucontext *uc = puc;
1400
    unsigned long pc;
1401
    int is_write;
1402

    
1403
    pc = uc->uc_mcontext.gregs[16];
1404
    /* XXX: compute is_write */
1405
    is_write = 0;
1406
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1407
                             is_write,
1408
                             &uc->uc_sigmask, puc);
1409
}
1410

    
1411
#elif defined(__ia64)
1412

    
1413
#ifndef __ISR_VALID
1414
  /* This ought to be in <bits/siginfo.h>... */
1415
# define __ISR_VALID        1
1416
#endif
1417

    
1418
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1419
{
1420
    siginfo_t *info = pinfo;
1421
    struct ucontext *uc = puc;
1422
    unsigned long ip;
1423
    int is_write = 0;
1424

    
1425
    ip = uc->uc_mcontext.sc_ip;
1426
    switch (host_signum) {
1427
      case SIGILL:
1428
      case SIGFPE:
1429
      case SIGSEGV:
1430
      case SIGBUS:
1431
      case SIGTRAP:
1432
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1433
              /* ISR.W (write-access) is bit 33:  */
1434
              is_write = (info->si_isr >> 33) & 1;
1435
          break;
1436

    
1437
      default:
1438
          break;
1439
    }
1440
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1441
                             is_write,
1442
                             &uc->uc_sigmask, puc);
1443
}
1444

    
1445
#elif defined(__s390__)
1446

    
1447
int cpu_signal_handler(int host_signum, void *pinfo,
1448
                       void *puc)
1449
{
1450
    siginfo_t *info = pinfo;
1451
    struct ucontext *uc = puc;
1452
    unsigned long pc;
1453
    int is_write;
1454

    
1455
    pc = uc->uc_mcontext.psw.addr;
1456
    /* XXX: compute is_write */
1457
    is_write = 0;
1458
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1459
                             is_write, &uc->uc_sigmask, puc);
1460
}
1461

    
1462
#elif defined(__mips__)
1463

    
1464
int cpu_signal_handler(int host_signum, void *pinfo,
1465
                       void *puc)
1466
{
1467
    siginfo_t *info = pinfo;
1468
    struct ucontext *uc = puc;
1469
    greg_t pc = uc->uc_mcontext.pc;
1470
    int is_write;
1471

    
1472
    /* XXX: compute is_write */
1473
    is_write = 0;
1474
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1475
                             is_write, &uc->uc_sigmask, puc);
1476
}
1477

    
1478
#else
1479

    
1480
#error host CPU specific signal handler needed
1481

    
1482
#endif
1483

    
1484
#endif /* !defined(CONFIG_SOFTMMU) */