Statistics
| Branch: | Revision:

root / cpu-exec.c @ f54b3f92

History | View | Annotate | Download (51 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#include "exec.h"
22
#include "disas.h"
23

    
24
#if !defined(CONFIG_SOFTMMU)
25
#undef EAX
26
#undef ECX
27
#undef EDX
28
#undef EBX
29
#undef ESP
30
#undef EBP
31
#undef ESI
32
#undef EDI
33
#undef EIP
34
#include <signal.h>
35
#include <sys/ucontext.h>
36
#endif
37

    
38
int tb_invalidated_flag;
39

    
40
//#define DEBUG_EXEC
41
//#define DEBUG_SIGNAL
42

    
43
#define SAVE_GLOBALS()
44
#define RESTORE_GLOBALS()
45

    
46
#if defined(__sparc__) && !defined(HOST_SOLARIS)
47
#include <features.h>
48
#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49
                           ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50
// Work around ugly bugs in glibc that mangle global register contents
51

    
52
static volatile void *saved_env;
53
static volatile unsigned long saved_t0, saved_i7;
54
#undef SAVE_GLOBALS
55
#define SAVE_GLOBALS() do {                                     \
56
        saved_env = env;                                        \
57
        saved_t0 = T0;                                          \
58
        asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7));     \
59
    } while(0)
60

    
61
#undef RESTORE_GLOBALS
62
#define RESTORE_GLOBALS() do {                                  \
63
        env = (void *)saved_env;                                \
64
        T0 = saved_t0;                                          \
65
        asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7));     \
66
    } while(0)
67

    
68
static int sparc_setjmp(jmp_buf buf)
69
{
70
    int ret;
71

    
72
    SAVE_GLOBALS();
73
    ret = setjmp(buf);
74
    RESTORE_GLOBALS();
75
    return ret;
76
}
77
#undef setjmp
78
#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
79

    
80
static void sparc_longjmp(jmp_buf buf, int val)
81
{
82
    SAVE_GLOBALS();
83
    longjmp(buf, val);
84
}
85
#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86
#endif
87
#endif
88

    
89
void cpu_loop_exit(void)
90
{
91
    /* NOTE: the register at this point must be saved by hand because
92
       longjmp restore them */
93
    regs_to_env();
94
    longjmp(env->jmp_env, 1);
95
}
96

    
97
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
98
#define reg_T2
99
#endif
100

    
101
/* exit the current TB from a signal handler. The host registers are
102
   restored in a state compatible with the CPU emulator
103
 */
104
void cpu_resume_from_signal(CPUState *env1, void *puc)
105
{
106
#if !defined(CONFIG_SOFTMMU)
107
    struct ucontext *uc = puc;
108
#endif
109

    
110
    env = env1;
111

    
112
    /* XXX: restore cpu registers saved in host registers */
113

    
114
#if !defined(CONFIG_SOFTMMU)
115
    if (puc) {
116
        /* XXX: use siglongjmp ? */
117
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
118
    }
119
#endif
120
    longjmp(env->jmp_env, 1);
121
}
122

    
123
static TranslationBlock *tb_find_slow(target_ulong pc,
124
                                      target_ulong cs_base,
125
                                      uint64_t flags)
126
{
127
    TranslationBlock *tb, **ptb1;
128
    int code_gen_size;
129
    unsigned int h;
130
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131
    uint8_t *tc_ptr;
132

    
133
    spin_lock(&tb_lock);
134

    
135
    tb_invalidated_flag = 0;
136

    
137
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
138

    
139
    /* find translated block using physical mappings */
140
    phys_pc = get_phys_addr_code(env, pc);
141
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
142
    phys_page2 = -1;
143
    h = tb_phys_hash_func(phys_pc);
144
    ptb1 = &tb_phys_hash[h];
145
    for(;;) {
146
        tb = *ptb1;
147
        if (!tb)
148
            goto not_found;
149
        if (tb->pc == pc &&
150
            tb->page_addr[0] == phys_page1 &&
151
            tb->cs_base == cs_base &&
152
            tb->flags == flags) {
153
            /* check next page if needed */
154
            if (tb->page_addr[1] != -1) {
155
                virt_page2 = (pc & TARGET_PAGE_MASK) +
156
                    TARGET_PAGE_SIZE;
157
                phys_page2 = get_phys_addr_code(env, virt_page2);
158
                if (tb->page_addr[1] == phys_page2)
159
                    goto found;
160
            } else {
161
                goto found;
162
            }
163
        }
164
        ptb1 = &tb->phys_hash_next;
165
    }
166
 not_found:
167
    /* if no translated code available, then translate it now */
168
    tb = tb_alloc(pc);
169
    if (!tb) {
170
        /* flush must be done */
171
        tb_flush(env);
172
        /* cannot fail at this point */
173
        tb = tb_alloc(pc);
174
        /* don't forget to invalidate previous TB info */
175
        tb_invalidated_flag = 1;
176
    }
177
    tc_ptr = code_gen_ptr;
178
    tb->tc_ptr = tc_ptr;
179
    tb->cs_base = cs_base;
180
    tb->flags = flags;
181
    SAVE_GLOBALS();
182
    cpu_gen_code(env, tb, &code_gen_size);
183
    RESTORE_GLOBALS();
184
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
185

    
186
    /* check next page if needed */
187
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
188
    phys_page2 = -1;
189
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190
        phys_page2 = get_phys_addr_code(env, virt_page2);
191
    }
192
    tb_link_phys(tb, phys_pc, phys_page2);
193

    
194
 found:
195
    /* we add the TB in the virtual pc hash table */
196
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197
    spin_unlock(&tb_lock);
198
    return tb;
199
}
200

    
201
static inline TranslationBlock *tb_find_fast(void)
202
{
203
    TranslationBlock *tb;
204
    target_ulong cs_base, pc;
205
    uint64_t flags;
206

    
207
    /* we record a subset of the CPU state. It will
208
       always be the same before a given translated block
209
       is executed. */
210
#if defined(TARGET_I386)
211
    flags = env->hflags;
212
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
213
    flags |= env->intercept;
214
    cs_base = env->segs[R_CS].base;
215
    pc = cs_base + env->eip;
216
#elif defined(TARGET_ARM)
217
    flags = env->thumb | (env->vfp.vec_len << 1)
218
            | (env->vfp.vec_stride << 4);
219
    if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
220
        flags |= (1 << 6);
221
    if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
222
        flags |= (1 << 7);
223
    flags |= (env->condexec_bits << 8);
224
    cs_base = 0;
225
    pc = env->regs[15];
226
#elif defined(TARGET_SPARC)
227
#ifdef TARGET_SPARC64
228
    // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229
    flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230
        | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
231
#else
232
    // FPU enable . Supervisor
233
    flags = (env->psref << 4) | env->psrs;
234
#endif
235
    cs_base = env->npc;
236
    pc = env->pc;
237
#elif defined(TARGET_PPC)
238
    flags = env->hflags;
239
    cs_base = 0;
240
    pc = env->nip;
241
#elif defined(TARGET_MIPS)
242
    flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
243
    cs_base = 0;
244
    pc = env->PC[env->current_tc];
245
#elif defined(TARGET_M68K)
246
    flags = (env->fpcr & M68K_FPCR_PREC)  /* Bit  6 */
247
            | (env->sr & SR_S)            /* Bit  13 */
248
            | ((env->macsr >> 4) & 0xf);  /* Bits 0-3 */
249
    cs_base = 0;
250
    pc = env->pc;
251
#elif defined(TARGET_SH4)
252
    flags = env->flags;
253
    cs_base = 0;
254
    pc = env->pc;
255
#elif defined(TARGET_ALPHA)
256
    flags = env->ps;
257
    cs_base = 0;
258
    pc = env->pc;
259
#elif defined(TARGET_CRIS)
260
    flags = 0;
261
    cs_base = 0;
262
    pc = env->pc;
263
#else
264
#error unsupported CPU
265
#endif
266
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
267
    if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268
                         tb->flags != flags, 0)) {
269
        tb = tb_find_slow(pc, cs_base, flags);
270
        /* Note: we do it here to avoid a gcc bug on Mac OS X when
271
           doing it in tb_find_slow */
272
        if (tb_invalidated_flag) {
273
            /* as some TB could have been invalidated because
274
               of memory exceptions while generating the code, we
275
               must recompute the hash index here */
276
            T0 = 0;
277
        }
278
    }
279
    return tb;
280
}
281

    
282
#define BREAK_CHAIN T0 = 0
283

    
284
/* main execution loop */
285

    
286
int cpu_exec(CPUState *env1)
287
{
288
#define DECLARE_HOST_REGS 1
289
#include "hostregs_helper.h"
290
#if defined(TARGET_SPARC)
291
#if defined(reg_REGWPTR)
292
    uint32_t *saved_regwptr;
293
#endif
294
#endif
295
    int ret, interrupt_request;
296
    long (*gen_func)(void);
297
    TranslationBlock *tb;
298
    uint8_t *tc_ptr;
299

    
300
    if (cpu_halted(env1) == EXCP_HALTED)
301
        return EXCP_HALTED;
302

    
303
    cpu_single_env = env1;
304

    
305
    /* first we save global registers */
306
#define SAVE_HOST_REGS 1
307
#include "hostregs_helper.h"
308
    env = env1;
309
    SAVE_GLOBALS();
310

    
311
    env_to_regs();
312
#if defined(TARGET_I386)
313
    /* put eflags in CPU temporary format */
314
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
316
    CC_OP = CC_OP_EFLAGS;
317
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318
#elif defined(TARGET_SPARC)
319
#if defined(reg_REGWPTR)
320
    saved_regwptr = REGWPTR;
321
#endif
322
#elif defined(TARGET_M68K)
323
    env->cc_op = CC_OP_FLAGS;
324
    env->cc_dest = env->sr & 0xf;
325
    env->cc_x = (env->sr >> 4) & 1;
326
#elif defined(TARGET_ALPHA)
327
#elif defined(TARGET_ARM)
328
#elif defined(TARGET_PPC)
329
#elif defined(TARGET_MIPS)
330
#elif defined(TARGET_SH4)
331
#elif defined(TARGET_CRIS)
332
    /* XXXXX */
333
#else
334
#error unsupported target CPU
335
#endif
336
    env->exception_index = -1;
337

    
338
    /* prepare setjmp context for exception handling */
339
    for(;;) {
340
        if (setjmp(env->jmp_env) == 0) {
341
            env->current_tb = NULL;
342
            /* if an exception is pending, we execute it here */
343
            if (env->exception_index >= 0) {
344
                if (env->exception_index >= EXCP_INTERRUPT) {
345
                    /* exit request from the cpu execution loop */
346
                    ret = env->exception_index;
347
                    break;
348
                } else if (env->user_mode_only) {
349
                    /* if user mode only, we simulate a fake exception
350
                       which will be handled outside the cpu execution
351
                       loop */
352
#if defined(TARGET_I386)
353
                    do_interrupt_user(env->exception_index,
354
                                      env->exception_is_int,
355
                                      env->error_code,
356
                                      env->exception_next_eip);
357
#endif
358
                    ret = env->exception_index;
359
                    break;
360
                } else {
361
#if defined(TARGET_I386)
362
                    /* simulate a real cpu exception. On i386, it can
363
                       trigger new exceptions, but we do not handle
364
                       double or triple faults yet. */
365
                    do_interrupt(env->exception_index,
366
                                 env->exception_is_int,
367
                                 env->error_code,
368
                                 env->exception_next_eip, 0);
369
                    /* successfully delivered */
370
                    env->old_exception = -1;
371
#elif defined(TARGET_PPC)
372
                    do_interrupt(env);
373
#elif defined(TARGET_MIPS)
374
                    do_interrupt(env);
375
#elif defined(TARGET_SPARC)
376
                    do_interrupt(env->exception_index);
377
#elif defined(TARGET_ARM)
378
                    do_interrupt(env);
379
#elif defined(TARGET_SH4)
380
                    do_interrupt(env);
381
#elif defined(TARGET_ALPHA)
382
                    do_interrupt(env);
383
#elif defined(TARGET_CRIS)
384
                    do_interrupt(env);
385
#elif defined(TARGET_M68K)
386
                    do_interrupt(0);
387
#endif
388
                }
389
                env->exception_index = -1;
390
            }
391
#ifdef USE_KQEMU
392
            if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393
                int ret;
394
                env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395
                ret = kqemu_cpu_exec(env);
396
                /* put eflags in CPU temporary format */
397
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
399
                CC_OP = CC_OP_EFLAGS;
400
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401
                if (ret == 1) {
402
                    /* exception */
403
                    longjmp(env->jmp_env, 1);
404
                } else if (ret == 2) {
405
                    /* softmmu execution needed */
406
                } else {
407
                    if (env->interrupt_request != 0) {
408
                        /* hardware interrupt will be executed just after */
409
                    } else {
410
                        /* otherwise, we restart */
411
                        longjmp(env->jmp_env, 1);
412
                    }
413
                }
414
            }
415
#endif
416

    
417
            T0 = 0; /* force lookup of first TB */
418
            for(;;) {
419
                SAVE_GLOBALS();
420
                interrupt_request = env->interrupt_request;
421
                if (__builtin_expect(interrupt_request, 0)
422
#if defined(TARGET_I386)
423
                        && env->hflags & HF_GIF_MASK
424
#endif
425
                                ) {
426
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428
                        env->exception_index = EXCP_DEBUG;
429
                        cpu_loop_exit();
430
                    }
431
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
434
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435
                        env->halted = 1;
436
                        env->exception_index = EXCP_HLT;
437
                        cpu_loop_exit();
438
                    }
439
#endif
440
#if defined(TARGET_I386)
441
                    if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442
                        !(env->hflags & HF_SMM_MASK)) {
443
                        svm_check_intercept(SVM_EXIT_SMI);
444
                        env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445
                        do_smm_enter();
446
                        BREAK_CHAIN;
447
                    } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
448
                        (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
449
                        !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
450
                        int intno;
451
                        svm_check_intercept(SVM_EXIT_INTR);
452
                        env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
453
                        intno = cpu_get_pic_interrupt(env);
454
                        if (loglevel & CPU_LOG_TB_IN_ASM) {
455
                            fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
456
                        }
457
                        do_interrupt(intno, 0, 0, 0, 1);
458
                        /* ensure that no TB jump will be modified as
459
                           the program flow was changed */
460
                        BREAK_CHAIN;
461
#if !defined(CONFIG_USER_ONLY)
462
                    } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
463
                        (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
464
                         int intno;
465
                         /* FIXME: this should respect TPR */
466
                         env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
467
                         svm_check_intercept(SVM_EXIT_VINTR);
468
                         intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
469
                         if (loglevel & CPU_LOG_TB_IN_ASM)
470
                             fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
471
                         do_interrupt(intno, 0, 0, -1, 1);
472
                         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
473
                                  ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
474
                        BREAK_CHAIN;
475
#endif
476
                    }
477
#elif defined(TARGET_PPC)
478
#if 0
479
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
480
                        cpu_ppc_reset(env);
481
                    }
482
#endif
483
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
484
                        ppc_hw_interrupt(env);
485
                        if (env->pending_interrupts == 0)
486
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
487
                        BREAK_CHAIN;
488
                    }
489
#elif defined(TARGET_MIPS)
490
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
492
                        (env->CP0_Status & (1 << CP0St_IE)) &&
493
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
494
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
495
                        !(env->hflags & MIPS_HFLAG_DM)) {
496
                        /* Raise it */
497
                        env->exception_index = EXCP_EXT_INTERRUPT;
498
                        env->error_code = 0;
499
                        do_interrupt(env);
500
                        BREAK_CHAIN;
501
                    }
502
#elif defined(TARGET_SPARC)
503
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504
                        (env->psret != 0)) {
505
                        int pil = env->interrupt_index & 15;
506
                        int type = env->interrupt_index & 0xf0;
507

    
508
                        if (((type == TT_EXTINT) &&
509
                             (pil == 15 || pil > env->psrpil)) ||
510
                            type != TT_EXTINT) {
511
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
512
                            do_interrupt(env->interrupt_index);
513
                            env->interrupt_index = 0;
514
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
515
                            cpu_check_irqs(env);
516
#endif
517
                        BREAK_CHAIN;
518
                        }
519
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
520
                        //do_interrupt(0, 0, 0, 0, 0);
521
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
522
                    }
523
#elif defined(TARGET_ARM)
524
                    if (interrupt_request & CPU_INTERRUPT_FIQ
525
                        && !(env->uncached_cpsr & CPSR_F)) {
526
                        env->exception_index = EXCP_FIQ;
527
                        do_interrupt(env);
528
                        BREAK_CHAIN;
529
                    }
530
                    /* ARMv7-M interrupt return works by loading a magic value
531
                       into the PC.  On real hardware the load causes the
532
                       return to occur.  The qemu implementation performs the
533
                       jump normally, then does the exception return when the
534
                       CPU tries to execute code at the magic address.
535
                       This will cause the magic PC value to be pushed to
536
                       the stack if an interrupt occured at the wrong time.
537
                       We avoid this by disabling interrupts when
538
                       pc contains a magic address.  */
539
                    if (interrupt_request & CPU_INTERRUPT_HARD
540
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
541
                            || !(env->uncached_cpsr & CPSR_I))) {
542
                        env->exception_index = EXCP_IRQ;
543
                        do_interrupt(env);
544
                        BREAK_CHAIN;
545
                    }
546
#elif defined(TARGET_SH4)
547
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
548
                        do_interrupt(env);
549
                        BREAK_CHAIN;
550
                    }
551
#elif defined(TARGET_ALPHA)
552
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
553
                        do_interrupt(env);
554
                        BREAK_CHAIN;
555
                    }
556
#elif defined(TARGET_CRIS)
557
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
558
                        do_interrupt(env);
559
                        BREAK_CHAIN;
560
                    }
561
#elif defined(TARGET_M68K)
562
                    if (interrupt_request & CPU_INTERRUPT_HARD
563
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
564
                            < env->pending_level) {
565
                        /* Real hardware gets the interrupt vector via an
566
                           IACK cycle at this point.  Current emulated
567
                           hardware doesn't rely on this, so we
568
                           provide/save the vector when the interrupt is
569
                           first signalled.  */
570
                        env->exception_index = env->pending_vector;
571
                        do_interrupt(1);
572
                        BREAK_CHAIN;
573
                    }
574
#endif
575
                   /* Don't use the cached interupt_request value,
576
                      do_interrupt may have updated the EXITTB flag. */
577
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
578
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
579
                        /* ensure that no TB jump will be modified as
580
                           the program flow was changed */
581
                        BREAK_CHAIN;
582
                    }
583
                    if (interrupt_request & CPU_INTERRUPT_EXIT) {
584
                        env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
585
                        env->exception_index = EXCP_INTERRUPT;
586
                        cpu_loop_exit();
587
                    }
588
                }
589
#ifdef DEBUG_EXEC
590
                if ((loglevel & CPU_LOG_TB_CPU)) {
591
                    /* restore flags in standard format */
592
                    regs_to_env();
593
#if defined(TARGET_I386)
594
                    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
595
                    cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
596
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
597
#elif defined(TARGET_ARM)
598
                    cpu_dump_state(env, logfile, fprintf, 0);
599
#elif defined(TARGET_SPARC)
600
                    REGWPTR = env->regbase + (env->cwp * 16);
601
                    env->regwptr = REGWPTR;
602
                    cpu_dump_state(env, logfile, fprintf, 0);
603
#elif defined(TARGET_PPC)
604
                    cpu_dump_state(env, logfile, fprintf, 0);
605
#elif defined(TARGET_M68K)
606
                    cpu_m68k_flush_flags(env, env->cc_op);
607
                    env->cc_op = CC_OP_FLAGS;
608
                    env->sr = (env->sr & 0xffe0)
609
                              | env->cc_dest | (env->cc_x << 4);
610
                    cpu_dump_state(env, logfile, fprintf, 0);
611
#elif defined(TARGET_MIPS)
612
                    cpu_dump_state(env, logfile, fprintf, 0);
613
#elif defined(TARGET_SH4)
614
                    cpu_dump_state(env, logfile, fprintf, 0);
615
#elif defined(TARGET_ALPHA)
616
                    cpu_dump_state(env, logfile, fprintf, 0);
617
#elif defined(TARGET_CRIS)
618
                    cpu_dump_state(env, logfile, fprintf, 0);
619
#else
620
#error unsupported target CPU
621
#endif
622
                }
623
#endif
624
                tb = tb_find_fast();
625
#ifdef DEBUG_EXEC
626
                if ((loglevel & CPU_LOG_EXEC)) {
627
                    fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
628
                            (long)tb->tc_ptr, tb->pc,
629
                            lookup_symbol(tb->pc));
630
                }
631
#endif
632
                RESTORE_GLOBALS();
633
                /* see if we can patch the calling TB. When the TB
634
                   spans two pages, we cannot safely do a direct
635
                   jump. */
636
                {
637
                    if (T0 != 0 &&
638
#if USE_KQEMU
639
                        (env->kqemu_enabled != 2) &&
640
#endif
641
                        tb->page_addr[1] == -1) {
642
                    spin_lock(&tb_lock);
643
                    tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
644
                    spin_unlock(&tb_lock);
645
                }
646
                }
647
                tc_ptr = tb->tc_ptr;
648
                env->current_tb = tb;
649
                /* execute the generated code */
650
                gen_func = (void *)tc_ptr;
651
#if defined(__sparc__)
652
                __asm__ __volatile__("call        %0\n\t"
653
                                     "mov        %%o7,%%i0"
654
                                     : /* no outputs */
655
                                     : "r" (gen_func)
656
                                     : "i0", "i1", "i2", "i3", "i4", "i5",
657
                                       "o0", "o1", "o2", "o3", "o4", "o5",
658
                                       "l0", "l1", "l2", "l3", "l4", "l5",
659
                                       "l6", "l7");
660
#elif defined(__hppa__)
661
                asm volatile ("ble  0(%%sr4,%1)\n"
662
                              "copy %%r31,%%r18\n"
663
                              "copy %%r28,%0\n"
664
                              : "=r" (T0)
665
                              : "r" (gen_func)
666
                              : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
667
                                "r8", "r9", "r10", "r11", "r12", "r13",
668
                                "r18", "r19", "r20", "r21", "r22", "r23",
669
                                "r24", "r25", "r26", "r27", "r28", "r29",
670
                                "r30", "r31");
671
#elif defined(__arm__)
672
                asm volatile ("mov pc, %0\n\t"
673
                              ".global exec_loop\n\t"
674
                              "exec_loop:\n\t"
675
                              : /* no outputs */
676
                              : "r" (gen_func)
677
                              : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
678
#elif defined(__ia64)
679
                struct fptr {
680
                        void *ip;
681
                        void *gp;
682
                } fp;
683

    
684
                fp.ip = tc_ptr;
685
                fp.gp = code_gen_buffer + 2 * (1 << 20);
686
                (*(void (*)(void)) &fp)();
687
#else
688
                T0 = gen_func();
689
#endif
690
                env->current_tb = NULL;
691
                /* reset soft MMU for next block (it can currently
692
                   only be set by a memory fault) */
693
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
694
                if (env->hflags & HF_SOFTMMU_MASK) {
695
                    env->hflags &= ~HF_SOFTMMU_MASK;
696
                    /* do not allow linking to another block */
697
                    T0 = 0;
698
                }
699
#endif
700
#if defined(USE_KQEMU)
701
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
702
                if (kqemu_is_ok(env) &&
703
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
704
                    cpu_loop_exit();
705
                }
706
#endif
707
            } /* for(;;) */
708
        } else {
709
            env_to_regs();
710
        }
711
    } /* for(;;) */
712

    
713

    
714
#if defined(TARGET_I386)
715
    /* restore flags in standard format */
716
    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
717
#elif defined(TARGET_ARM)
718
    /* XXX: Save/restore host fpu exception state?.  */
719
#elif defined(TARGET_SPARC)
720
#if defined(reg_REGWPTR)
721
    REGWPTR = saved_regwptr;
722
#endif
723
#elif defined(TARGET_PPC)
724
#elif defined(TARGET_M68K)
725
    cpu_m68k_flush_flags(env, env->cc_op);
726
    env->cc_op = CC_OP_FLAGS;
727
    env->sr = (env->sr & 0xffe0)
728
              | env->cc_dest | (env->cc_x << 4);
729
#elif defined(TARGET_MIPS)
730
#elif defined(TARGET_SH4)
731
#elif defined(TARGET_ALPHA)
732
#elif defined(TARGET_CRIS)
733
    /* XXXXX */
734
#else
735
#error unsupported target CPU
736
#endif
737

    
738
    /* restore global registers */
739
    RESTORE_GLOBALS();
740
#include "hostregs_helper.h"
741

    
742
    /* fail safe : never use cpu_single_env outside cpu_exec() */
743
    cpu_single_env = NULL;
744
    return ret;
745
}
746

    
747
/* must only be called from the generated code as an exception can be
748
   generated */
749
void tb_invalidate_page_range(target_ulong start, target_ulong end)
750
{
751
    /* XXX: cannot enable it yet because it yields to MMU exception
752
       where NIP != read address on PowerPC */
753
#if 0
754
    target_ulong phys_addr;
755
    phys_addr = get_phys_addr_code(env, start);
756
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
757
#endif
758
}
759

    
760
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
761

    
762
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
763
{
764
    CPUX86State *saved_env;
765

    
766
    saved_env = env;
767
    env = s;
768
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
769
        selector &= 0xffff;
770
        cpu_x86_load_seg_cache(env, seg_reg, selector,
771
                               (selector << 4), 0xffff, 0);
772
    } else {
773
        load_seg(seg_reg, selector);
774
    }
775
    env = saved_env;
776
}
777

    
778
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
779
{
780
    CPUX86State *saved_env;
781

    
782
    saved_env = env;
783
    env = s;
784

    
785
    helper_fsave(ptr, data32);
786

    
787
    env = saved_env;
788
}
789

    
790
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
791
{
792
    CPUX86State *saved_env;
793

    
794
    saved_env = env;
795
    env = s;
796

    
797
    helper_frstor(ptr, data32);
798

    
799
    env = saved_env;
800
}
801

    
802
#endif /* TARGET_I386 */
803

    
804
#if !defined(CONFIG_SOFTMMU)
805

    
806
#if defined(TARGET_I386)
807

    
808
/* 'pc' is the host PC at which the exception was raised. 'address' is
809
   the effective address of the memory exception. 'is_write' is 1 if a
810
   write caused the exception and otherwise 0'. 'old_set' is the
811
   signal set which should be restored */
812
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
813
                                    int is_write, sigset_t *old_set,
814
                                    void *puc)
815
{
816
    TranslationBlock *tb;
817
    int ret;
818

    
819
    if (cpu_single_env)
820
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
821
#if defined(DEBUG_SIGNAL)
822
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
823
                pc, address, is_write, *(unsigned long *)old_set);
824
#endif
825
    /* XXX: locking issue */
826
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
827
        return 1;
828
    }
829

    
830
    /* see if it is an MMU fault */
831
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
832
    if (ret < 0)
833
        return 0; /* not an MMU fault */
834
    if (ret == 0)
835
        return 1; /* the MMU fault was handled without causing real CPU fault */
836
    /* now we have a real cpu fault */
837
    tb = tb_find_pc(pc);
838
    if (tb) {
839
        /* the PC is inside the translated code. It means that we have
840
           a virtual CPU fault */
841
        cpu_restore_state(tb, env, pc, puc);
842
    }
843
    if (ret == 1) {
844
#if 0
845
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
846
               env->eip, env->cr[2], env->error_code);
847
#endif
848
        /* we restore the process signal mask as the sigreturn should
849
           do it (XXX: use sigsetjmp) */
850
        sigprocmask(SIG_SETMASK, old_set, NULL);
851
        raise_exception_err(env->exception_index, env->error_code);
852
    } else {
853
        /* activate soft MMU for this block */
854
        env->hflags |= HF_SOFTMMU_MASK;
855
        cpu_resume_from_signal(env, puc);
856
    }
857
    /* never comes here */
858
    return 1;
859
}
860

    
861
#elif defined(TARGET_ARM)
862
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
863
                                    int is_write, sigset_t *old_set,
864
                                    void *puc)
865
{
866
    TranslationBlock *tb;
867
    int ret;
868

    
869
    if (cpu_single_env)
870
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
871
#if defined(DEBUG_SIGNAL)
872
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
873
           pc, address, is_write, *(unsigned long *)old_set);
874
#endif
875
    /* XXX: locking issue */
876
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
877
        return 1;
878
    }
879
    /* see if it is an MMU fault */
880
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
881
    if (ret < 0)
882
        return 0; /* not an MMU fault */
883
    if (ret == 0)
884
        return 1; /* the MMU fault was handled without causing real CPU fault */
885
    /* now we have a real cpu fault */
886
    tb = tb_find_pc(pc);
887
    if (tb) {
888
        /* the PC is inside the translated code. It means that we have
889
           a virtual CPU fault */
890
        cpu_restore_state(tb, env, pc, puc);
891
    }
892
    /* we restore the process signal mask as the sigreturn should
893
       do it (XXX: use sigsetjmp) */
894
    sigprocmask(SIG_SETMASK, old_set, NULL);
895
    cpu_loop_exit();
896
    /* never comes here */
897
    return 1;
898
}
899
#elif defined(TARGET_SPARC)
900
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
901
                                    int is_write, sigset_t *old_set,
902
                                    void *puc)
903
{
904
    TranslationBlock *tb;
905
    int ret;
906

    
907
    if (cpu_single_env)
908
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
909
#if defined(DEBUG_SIGNAL)
910
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
911
           pc, address, is_write, *(unsigned long *)old_set);
912
#endif
913
    /* XXX: locking issue */
914
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
915
        return 1;
916
    }
917
    /* see if it is an MMU fault */
918
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
919
    if (ret < 0)
920
        return 0; /* not an MMU fault */
921
    if (ret == 0)
922
        return 1; /* the MMU fault was handled without causing real CPU fault */
923
    /* now we have a real cpu fault */
924
    tb = tb_find_pc(pc);
925
    if (tb) {
926
        /* the PC is inside the translated code. It means that we have
927
           a virtual CPU fault */
928
        cpu_restore_state(tb, env, pc, puc);
929
    }
930
    /* we restore the process signal mask as the sigreturn should
931
       do it (XXX: use sigsetjmp) */
932
    sigprocmask(SIG_SETMASK, old_set, NULL);
933
    cpu_loop_exit();
934
    /* never comes here */
935
    return 1;
936
}
937
#elif defined (TARGET_PPC)
938
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
939
                                    int is_write, sigset_t *old_set,
940
                                    void *puc)
941
{
942
    TranslationBlock *tb;
943
    int ret;
944

    
945
    if (cpu_single_env)
946
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
947
#if defined(DEBUG_SIGNAL)
948
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
949
           pc, address, is_write, *(unsigned long *)old_set);
950
#endif
951
    /* XXX: locking issue */
952
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
953
        return 1;
954
    }
955

    
956
    /* see if it is an MMU fault */
957
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
958
    if (ret < 0)
959
        return 0; /* not an MMU fault */
960
    if (ret == 0)
961
        return 1; /* the MMU fault was handled without causing real CPU fault */
962

    
963
    /* now we have a real cpu fault */
964
    tb = tb_find_pc(pc);
965
    if (tb) {
966
        /* the PC is inside the translated code. It means that we have
967
           a virtual CPU fault */
968
        cpu_restore_state(tb, env, pc, puc);
969
    }
970
    if (ret == 1) {
971
#if 0
972
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
973
               env->nip, env->error_code, tb);
974
#endif
975
    /* we restore the process signal mask as the sigreturn should
976
       do it (XXX: use sigsetjmp) */
977
        sigprocmask(SIG_SETMASK, old_set, NULL);
978
        do_raise_exception_err(env->exception_index, env->error_code);
979
    } else {
980
        /* activate soft MMU for this block */
981
        cpu_resume_from_signal(env, puc);
982
    }
983
    /* never comes here */
984
    return 1;
985
}
986

    
987
#elif defined(TARGET_M68K)
988
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
989
                                    int is_write, sigset_t *old_set,
990
                                    void *puc)
991
{
992
    TranslationBlock *tb;
993
    int ret;
994

    
995
    if (cpu_single_env)
996
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
997
#if defined(DEBUG_SIGNAL)
998
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
999
           pc, address, is_write, *(unsigned long *)old_set);
1000
#endif
1001
    /* XXX: locking issue */
1002
    if (is_write && page_unprotect(address, pc, puc)) {
1003
        return 1;
1004
    }
1005
    /* see if it is an MMU fault */
1006
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1007
    if (ret < 0)
1008
        return 0; /* not an MMU fault */
1009
    if (ret == 0)
1010
        return 1; /* the MMU fault was handled without causing real CPU fault */
1011
    /* now we have a real cpu fault */
1012
    tb = tb_find_pc(pc);
1013
    if (tb) {
1014
        /* the PC is inside the translated code. It means that we have
1015
           a virtual CPU fault */
1016
        cpu_restore_state(tb, env, pc, puc);
1017
    }
1018
    /* we restore the process signal mask as the sigreturn should
1019
       do it (XXX: use sigsetjmp) */
1020
    sigprocmask(SIG_SETMASK, old_set, NULL);
1021
    cpu_loop_exit();
1022
    /* never comes here */
1023
    return 1;
1024
}
1025

    
1026
#elif defined (TARGET_MIPS)
1027
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1028
                                    int is_write, sigset_t *old_set,
1029
                                    void *puc)
1030
{
1031
    TranslationBlock *tb;
1032
    int ret;
1033

    
1034
    if (cpu_single_env)
1035
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1036
#if defined(DEBUG_SIGNAL)
1037
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1038
           pc, address, is_write, *(unsigned long *)old_set);
1039
#endif
1040
    /* XXX: locking issue */
1041
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1042
        return 1;
1043
    }
1044

    
1045
    /* see if it is an MMU fault */
1046
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1047
    if (ret < 0)
1048
        return 0; /* not an MMU fault */
1049
    if (ret == 0)
1050
        return 1; /* the MMU fault was handled without causing real CPU fault */
1051

    
1052
    /* now we have a real cpu fault */
1053
    tb = tb_find_pc(pc);
1054
    if (tb) {
1055
        /* the PC is inside the translated code. It means that we have
1056
           a virtual CPU fault */
1057
        cpu_restore_state(tb, env, pc, puc);
1058
    }
1059
    if (ret == 1) {
1060
#if 0
1061
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1062
               env->PC, env->error_code, tb);
1063
#endif
1064
    /* we restore the process signal mask as the sigreturn should
1065
       do it (XXX: use sigsetjmp) */
1066
        sigprocmask(SIG_SETMASK, old_set, NULL);
1067
        do_raise_exception_err(env->exception_index, env->error_code);
1068
    } else {
1069
        /* activate soft MMU for this block */
1070
        cpu_resume_from_signal(env, puc);
1071
    }
1072
    /* never comes here */
1073
    return 1;
1074
}
1075

    
1076
#elif defined (TARGET_SH4)
1077
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078
                                    int is_write, sigset_t *old_set,
1079
                                    void *puc)
1080
{
1081
    TranslationBlock *tb;
1082
    int ret;
1083

    
1084
    if (cpu_single_env)
1085
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086
#if defined(DEBUG_SIGNAL)
1087
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088
           pc, address, is_write, *(unsigned long *)old_set);
1089
#endif
1090
    /* XXX: locking issue */
1091
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092
        return 1;
1093
    }
1094

    
1095
    /* see if it is an MMU fault */
1096
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097
    if (ret < 0)
1098
        return 0; /* not an MMU fault */
1099
    if (ret == 0)
1100
        return 1; /* the MMU fault was handled without causing real CPU fault */
1101

    
1102
    /* now we have a real cpu fault */
1103
    tb = tb_find_pc(pc);
1104
    if (tb) {
1105
        /* the PC is inside the translated code. It means that we have
1106
           a virtual CPU fault */
1107
        cpu_restore_state(tb, env, pc, puc);
1108
    }
1109
#if 0
1110
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111
               env->nip, env->error_code, tb);
1112
#endif
1113
    /* we restore the process signal mask as the sigreturn should
1114
       do it (XXX: use sigsetjmp) */
1115
    sigprocmask(SIG_SETMASK, old_set, NULL);
1116
    cpu_loop_exit();
1117
    /* never comes here */
1118
    return 1;
1119
}
1120

    
1121
#elif defined (TARGET_ALPHA)
1122
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1123
                                    int is_write, sigset_t *old_set,
1124
                                    void *puc)
1125
{
1126
    TranslationBlock *tb;
1127
    int ret;
1128

    
1129
    if (cpu_single_env)
1130
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1131
#if defined(DEBUG_SIGNAL)
1132
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1133
           pc, address, is_write, *(unsigned long *)old_set);
1134
#endif
1135
    /* XXX: locking issue */
1136
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1137
        return 1;
1138
    }
1139

    
1140
    /* see if it is an MMU fault */
1141
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1142
    if (ret < 0)
1143
        return 0; /* not an MMU fault */
1144
    if (ret == 0)
1145
        return 1; /* the MMU fault was handled without causing real CPU fault */
1146

    
1147
    /* now we have a real cpu fault */
1148
    tb = tb_find_pc(pc);
1149
    if (tb) {
1150
        /* the PC is inside the translated code. It means that we have
1151
           a virtual CPU fault */
1152
        cpu_restore_state(tb, env, pc, puc);
1153
    }
1154
#if 0
1155
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1156
               env->nip, env->error_code, tb);
1157
#endif
1158
    /* we restore the process signal mask as the sigreturn should
1159
       do it (XXX: use sigsetjmp) */
1160
    sigprocmask(SIG_SETMASK, old_set, NULL);
1161
    cpu_loop_exit();
1162
    /* never comes here */
1163
    return 1;
1164
}
1165
#elif defined (TARGET_CRIS)
1166
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1167
                                    int is_write, sigset_t *old_set,
1168
                                    void *puc)
1169
{
1170
    TranslationBlock *tb;
1171
    int ret;
1172

    
1173
    if (cpu_single_env)
1174
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1175
#if defined(DEBUG_SIGNAL)
1176
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1177
           pc, address, is_write, *(unsigned long *)old_set);
1178
#endif
1179
    /* XXX: locking issue */
1180
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1181
        return 1;
1182
    }
1183

    
1184
    /* see if it is an MMU fault */
1185
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1186
    if (ret < 0)
1187
        return 0; /* not an MMU fault */
1188
    if (ret == 0)
1189
        return 1; /* the MMU fault was handled without causing real CPU fault */
1190

    
1191
    /* now we have a real cpu fault */
1192
    tb = tb_find_pc(pc);
1193
    if (tb) {
1194
        /* the PC is inside the translated code. It means that we have
1195
           a virtual CPU fault */
1196
        cpu_restore_state(tb, env, pc, puc);
1197
    }
1198
    /* we restore the process signal mask as the sigreturn should
1199
       do it (XXX: use sigsetjmp) */
1200
    sigprocmask(SIG_SETMASK, old_set, NULL);
1201
    cpu_loop_exit();
1202
    /* never comes here */
1203
    return 1;
1204
}
1205

    
1206
#else
1207
#error unsupported target CPU
1208
#endif
1209

    
1210
#if defined(__i386__)
1211

    
1212
#if defined(__APPLE__)
1213
# include <sys/ucontext.h>
1214

    
1215
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1216
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1217
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1218
#else
1219
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1220
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1221
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1222
#endif
1223

    
1224
int cpu_signal_handler(int host_signum, void *pinfo,
1225
                       void *puc)
1226
{
1227
    siginfo_t *info = pinfo;
1228
    struct ucontext *uc = puc;
1229
    unsigned long pc;
1230
    int trapno;
1231

    
1232
#ifndef REG_EIP
1233
/* for glibc 2.1 */
1234
#define REG_EIP    EIP
1235
#define REG_ERR    ERR
1236
#define REG_TRAPNO TRAPNO
1237
#endif
1238
    pc = EIP_sig(uc);
1239
    trapno = TRAP_sig(uc);
1240
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1241
                             trapno == 0xe ?
1242
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1243
                             &uc->uc_sigmask, puc);
1244
}
1245

    
1246
#elif defined(__x86_64__)
1247

    
1248
int cpu_signal_handler(int host_signum, void *pinfo,
1249
                       void *puc)
1250
{
1251
    siginfo_t *info = pinfo;
1252
    struct ucontext *uc = puc;
1253
    unsigned long pc;
1254

    
1255
    pc = uc->uc_mcontext.gregs[REG_RIP];
1256
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1257
                             uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1258
                             (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1259
                             &uc->uc_sigmask, puc);
1260
}
1261

    
1262
#elif defined(__powerpc__)
1263

    
1264
/***********************************************************************
1265
 * signal context platform-specific definitions
1266
 * From Wine
1267
 */
1268
#ifdef linux
1269
/* All Registers access - only for local access */
1270
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1271
/* Gpr Registers access  */
1272
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1273
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1274
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1275
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1276
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1277
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1278
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1279
/* Float Registers access  */
1280
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1281
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1282
/* Exception Registers access */
1283
# define DAR_sig(context)                        REG_sig(dar, context)
1284
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1285
# define TRAP_sig(context)                        REG_sig(trap, context)
1286
#endif /* linux */
1287

    
1288
#ifdef __APPLE__
1289
# include <sys/ucontext.h>
1290
typedef struct ucontext SIGCONTEXT;
1291
/* All Registers access - only for local access */
1292
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1293
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1294
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1295
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1296
/* Gpr Registers access */
1297
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1298
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1299
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1300
# define CTR_sig(context)                        REG_sig(ctr, context)
1301
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1302
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1303
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1304
/* Float Registers access */
1305
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1306
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1307
/* Exception Registers access */
1308
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1309
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1310
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1311
#endif /* __APPLE__ */
1312

    
1313
int cpu_signal_handler(int host_signum, void *pinfo,
1314
                       void *puc)
1315
{
1316
    siginfo_t *info = pinfo;
1317
    struct ucontext *uc = puc;
1318
    unsigned long pc;
1319
    int is_write;
1320

    
1321
    pc = IAR_sig(uc);
1322
    is_write = 0;
1323
#if 0
1324
    /* ppc 4xx case */
1325
    if (DSISR_sig(uc) & 0x00800000)
1326
        is_write = 1;
1327
#else
1328
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1329
        is_write = 1;
1330
#endif
1331
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1332
                             is_write, &uc->uc_sigmask, puc);
1333
}
1334

    
1335
#elif defined(__alpha__)
1336

    
1337
int cpu_signal_handler(int host_signum, void *pinfo,
1338
                           void *puc)
1339
{
1340
    siginfo_t *info = pinfo;
1341
    struct ucontext *uc = puc;
1342
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1343
    uint32_t insn = *pc;
1344
    int is_write = 0;
1345

    
1346
    /* XXX: need kernel patch to get write flag faster */
1347
    switch (insn >> 26) {
1348
    case 0x0d: // stw
1349
    case 0x0e: // stb
1350
    case 0x0f: // stq_u
1351
    case 0x24: // stf
1352
    case 0x25: // stg
1353
    case 0x26: // sts
1354
    case 0x27: // stt
1355
    case 0x2c: // stl
1356
    case 0x2d: // stq
1357
    case 0x2e: // stl_c
1358
    case 0x2f: // stq_c
1359
        is_write = 1;
1360
    }
1361

    
1362
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1363
                             is_write, &uc->uc_sigmask, puc);
1364
}
1365
#elif defined(__sparc__)
1366

    
1367
int cpu_signal_handler(int host_signum, void *pinfo,
1368
                       void *puc)
1369
{
1370
    siginfo_t *info = pinfo;
1371
    uint32_t *regs = (uint32_t *)(info + 1);
1372
    void *sigmask = (regs + 20);
1373
    unsigned long pc;
1374
    int is_write;
1375
    uint32_t insn;
1376

    
1377
    /* XXX: is there a standard glibc define ? */
1378
    pc = regs[1];
1379
    /* XXX: need kernel patch to get write flag faster */
1380
    is_write = 0;
1381
    insn = *(uint32_t *)pc;
1382
    if ((insn >> 30) == 3) {
1383
      switch((insn >> 19) & 0x3f) {
1384
      case 0x05: // stb
1385
      case 0x06: // sth
1386
      case 0x04: // st
1387
      case 0x07: // std
1388
      case 0x24: // stf
1389
      case 0x27: // stdf
1390
      case 0x25: // stfsr
1391
        is_write = 1;
1392
        break;
1393
      }
1394
    }
1395
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1396
                             is_write, sigmask, NULL);
1397
}
1398

    
1399
#elif defined(__arm__)
1400

    
1401
int cpu_signal_handler(int host_signum, void *pinfo,
1402
                       void *puc)
1403
{
1404
    siginfo_t *info = pinfo;
1405
    struct ucontext *uc = puc;
1406
    unsigned long pc;
1407
    int is_write;
1408

    
1409
    pc = uc->uc_mcontext.gregs[R15];
1410
    /* XXX: compute is_write */
1411
    is_write = 0;
1412
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1413
                             is_write,
1414
                             &uc->uc_sigmask, puc);
1415
}
1416

    
1417
#elif defined(__mc68000)
1418

    
1419
int cpu_signal_handler(int host_signum, void *pinfo,
1420
                       void *puc)
1421
{
1422
    siginfo_t *info = pinfo;
1423
    struct ucontext *uc = puc;
1424
    unsigned long pc;
1425
    int is_write;
1426

    
1427
    pc = uc->uc_mcontext.gregs[16];
1428
    /* XXX: compute is_write */
1429
    is_write = 0;
1430
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1431
                             is_write,
1432
                             &uc->uc_sigmask, puc);
1433
}
1434

    
1435
#elif defined(__ia64)
1436

    
1437
#ifndef __ISR_VALID
1438
  /* This ought to be in <bits/siginfo.h>... */
1439
# define __ISR_VALID        1
1440
#endif
1441

    
1442
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1443
{
1444
    siginfo_t *info = pinfo;
1445
    struct ucontext *uc = puc;
1446
    unsigned long ip;
1447
    int is_write = 0;
1448

    
1449
    ip = uc->uc_mcontext.sc_ip;
1450
    switch (host_signum) {
1451
      case SIGILL:
1452
      case SIGFPE:
1453
      case SIGSEGV:
1454
      case SIGBUS:
1455
      case SIGTRAP:
1456
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1457
              /* ISR.W (write-access) is bit 33:  */
1458
              is_write = (info->si_isr >> 33) & 1;
1459
          break;
1460

    
1461
      default:
1462
          break;
1463
    }
1464
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1465
                             is_write,
1466
                             &uc->uc_sigmask, puc);
1467
}
1468

    
1469
#elif defined(__s390__)
1470

    
1471
int cpu_signal_handler(int host_signum, void *pinfo,
1472
                       void *puc)
1473
{
1474
    siginfo_t *info = pinfo;
1475
    struct ucontext *uc = puc;
1476
    unsigned long pc;
1477
    int is_write;
1478

    
1479
    pc = uc->uc_mcontext.psw.addr;
1480
    /* XXX: compute is_write */
1481
    is_write = 0;
1482
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1483
                             is_write, &uc->uc_sigmask, puc);
1484
}
1485

    
1486
#elif defined(__mips__)
1487

    
1488
int cpu_signal_handler(int host_signum, void *pinfo,
1489
                       void *puc)
1490
{
1491
    siginfo_t *info = pinfo;
1492
    struct ucontext *uc = puc;
1493
    greg_t pc = uc->uc_mcontext.pc;
1494
    int is_write;
1495

    
1496
    /* XXX: compute is_write */
1497
    is_write = 0;
1498
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1499
                             is_write, &uc->uc_sigmask, puc);
1500
}
1501

    
1502
#elif defined(__hppa__)
1503

    
1504
int cpu_signal_handler(int host_signum, void *pinfo,
1505
                       void *puc)
1506
{
1507
    struct siginfo *info = pinfo;
1508
    struct ucontext *uc = puc;
1509
    unsigned long pc;
1510
    int is_write;
1511

    
1512
    pc = uc->uc_mcontext.sc_iaoq[0];
1513
    /* FIXME: compute is_write */
1514
    is_write = 0;
1515
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1516
                             is_write,
1517
                             &uc->uc_sigmask, puc);
1518
}
1519

    
1520
#else
1521

    
1522
#error host CPU specific signal handler needed
1523

    
1524
#endif
1525

    
1526
#endif /* !defined(CONFIG_SOFTMMU) */