Statistics
| Branch: | Revision:

root / cpu-exec.c @ 21b20814

History | View | Annotate | Download (49.7 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#define CPU_NO_GLOBAL_REGS
22
#include "exec.h"
23
#include "disas.h"
24
#include "tcg.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#include <sys/ucontext.h>
38
#endif
39

    
40
int tb_invalidated_flag;
41
static unsigned long next_tb;
42

    
43
//#define DEBUG_EXEC
44
//#define DEBUG_SIGNAL
45

    
46
#define SAVE_GLOBALS()
47
#define RESTORE_GLOBALS()
48

    
49
#if defined(__sparc__) && !defined(HOST_SOLARIS)
50
#include <features.h>
51
#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52
                           ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53
// Work around ugly bugs in glibc that mangle global register contents
54

    
55
static volatile void *saved_env;
56
#undef SAVE_GLOBALS
57
#define SAVE_GLOBALS() do {                                     \
58
        saved_env = env;                                        \
59
    } while(0)
60

    
61
#undef RESTORE_GLOBALS
62
#define RESTORE_GLOBALS() do {                                  \
63
        env = (void *)saved_env;                                \
64
    } while(0)
65

    
66
static int sparc_setjmp(jmp_buf buf)
67
{
68
    int ret;
69

    
70
    SAVE_GLOBALS();
71
    ret = setjmp(buf);
72
    RESTORE_GLOBALS();
73
    return ret;
74
}
75
#undef setjmp
76
#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
77

    
78
static void sparc_longjmp(jmp_buf buf, int val)
79
{
80
    SAVE_GLOBALS();
81
    longjmp(buf, val);
82
}
83
#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
84
#endif
85
#endif
86

    
87
void cpu_loop_exit(void)
88
{
89
    /* NOTE: the register at this point must be saved by hand because
90
       longjmp restore them */
91
    regs_to_env();
92
    longjmp(env->jmp_env, 1);
93
}
94

    
95
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
96
#define reg_T2
97
#endif
98

    
99
/* exit the current TB from a signal handler. The host registers are
100
   restored in a state compatible with the CPU emulator
101
 */
102
void cpu_resume_from_signal(CPUState *env1, void *puc)
103
{
104
#if !defined(CONFIG_SOFTMMU)
105
    struct ucontext *uc = puc;
106
#endif
107

    
108
    env = env1;
109

    
110
    /* XXX: restore cpu registers saved in host registers */
111

    
112
#if !defined(CONFIG_SOFTMMU)
113
    if (puc) {
114
        /* XXX: use siglongjmp ? */
115
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
116
    }
117
#endif
118
    longjmp(env->jmp_env, 1);
119
}
120

    
121
static TranslationBlock *tb_find_slow(target_ulong pc,
122
                                      target_ulong cs_base,
123
                                      uint64_t flags)
124
{
125
    TranslationBlock *tb, **ptb1;
126
    int code_gen_size;
127
    unsigned int h;
128
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
    uint8_t *tc_ptr;
130

    
131
    spin_lock(&tb_lock);
132

    
133
    tb_invalidated_flag = 0;
134

    
135
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136

    
137
    /* find translated block using physical mappings */
138
    phys_pc = get_phys_addr_code(env, pc);
139
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
140
    phys_page2 = -1;
141
    h = tb_phys_hash_func(phys_pc);
142
    ptb1 = &tb_phys_hash[h];
143
    for(;;) {
144
        tb = *ptb1;
145
        if (!tb)
146
            goto not_found;
147
        if (tb->pc == pc &&
148
            tb->page_addr[0] == phys_page1 &&
149
            tb->cs_base == cs_base &&
150
            tb->flags == flags) {
151
            /* check next page if needed */
152
            if (tb->page_addr[1] != -1) {
153
                virt_page2 = (pc & TARGET_PAGE_MASK) +
154
                    TARGET_PAGE_SIZE;
155
                phys_page2 = get_phys_addr_code(env, virt_page2);
156
                if (tb->page_addr[1] == phys_page2)
157
                    goto found;
158
            } else {
159
                goto found;
160
            }
161
        }
162
        ptb1 = &tb->phys_hash_next;
163
    }
164
 not_found:
165
    /* if no translated code available, then translate it now */
166
    tb = tb_alloc(pc);
167
    if (!tb) {
168
        /* flush must be done */
169
        tb_flush(env);
170
        /* cannot fail at this point */
171
        tb = tb_alloc(pc);
172
        /* don't forget to invalidate previous TB info */
173
        tb_invalidated_flag = 1;
174
    }
175
    tc_ptr = code_gen_ptr;
176
    tb->tc_ptr = tc_ptr;
177
    tb->cs_base = cs_base;
178
    tb->flags = flags;
179
    SAVE_GLOBALS();
180
    cpu_gen_code(env, tb, &code_gen_size);
181
    RESTORE_GLOBALS();
182
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
183

    
184
    /* check next page if needed */
185
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
186
    phys_page2 = -1;
187
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
188
        phys_page2 = get_phys_addr_code(env, virt_page2);
189
    }
190
    tb_link_phys(tb, phys_pc, phys_page2);
191

    
192
 found:
193
    /* we add the TB in the virtual pc hash table */
194
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
195
    spin_unlock(&tb_lock);
196
    return tb;
197
}
198

    
199
static inline TranslationBlock *tb_find_fast(void)
200
{
201
    TranslationBlock *tb;
202
    target_ulong cs_base, pc;
203
    uint64_t flags;
204

    
205
    /* we record a subset of the CPU state. It will
206
       always be the same before a given translated block
207
       is executed. */
208
#if defined(TARGET_I386)
209
    flags = env->hflags;
210
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
211
    flags |= env->intercept;
212
    cs_base = env->segs[R_CS].base;
213
    pc = cs_base + env->eip;
214
#elif defined(TARGET_ARM)
215
    flags = env->thumb | (env->vfp.vec_len << 1)
216
            | (env->vfp.vec_stride << 4);
217
    if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
218
        flags |= (1 << 6);
219
    if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
220
        flags |= (1 << 7);
221
    flags |= (env->condexec_bits << 8);
222
    cs_base = 0;
223
    pc = env->regs[15];
224
#elif defined(TARGET_SPARC)
225
#ifdef TARGET_SPARC64
226
    // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
227
    flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
228
        | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
229
#else
230
    // FPU enable . Supervisor
231
    flags = (env->psref << 4) | env->psrs;
232
#endif
233
    cs_base = env->npc;
234
    pc = env->pc;
235
#elif defined(TARGET_PPC)
236
    flags = env->hflags;
237
    cs_base = 0;
238
    pc = env->nip;
239
#elif defined(TARGET_MIPS)
240
    flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
241
    cs_base = 0;
242
    pc = env->PC[env->current_tc];
243
#elif defined(TARGET_M68K)
244
    flags = (env->fpcr & M68K_FPCR_PREC)  /* Bit  6 */
245
            | (env->sr & SR_S)            /* Bit  13 */
246
            | ((env->macsr >> 4) & 0xf);  /* Bits 0-3 */
247
    cs_base = 0;
248
    pc = env->pc;
249
#elif defined(TARGET_SH4)
250
    flags = env->flags;
251
    cs_base = 0;
252
    pc = env->pc;
253
#elif defined(TARGET_ALPHA)
254
    flags = env->ps;
255
    cs_base = 0;
256
    pc = env->pc;
257
#elif defined(TARGET_CRIS)
258
    flags = env->pregs[PR_CCS] & U_FLAG;
259
    flags |= env->dslot;
260
    cs_base = 0;
261
    pc = env->pc;
262
#else
263
#error unsupported CPU
264
#endif
265
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
266
    if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
267
                         tb->flags != flags, 0)) {
268
        tb = tb_find_slow(pc, cs_base, flags);
269
        /* Note: we do it here to avoid a gcc bug on Mac OS X when
270
           doing it in tb_find_slow */
271
        if (tb_invalidated_flag) {
272
            /* as some TB could have been invalidated because
273
               of memory exceptions while generating the code, we
274
               must recompute the hash index here */
275
            next_tb = 0;
276
        }
277
    }
278
    return tb;
279
}
280

    
281
/* main execution loop */
282

    
283
int cpu_exec(CPUState *env1)
284
{
285
#define DECLARE_HOST_REGS 1
286
#include "hostregs_helper.h"
287
#if defined(TARGET_SPARC)
288
#if defined(reg_REGWPTR)
289
    uint32_t *saved_regwptr;
290
#endif
291
#endif
292
    int ret, interrupt_request;
293
    TranslationBlock *tb;
294
    uint8_t *tc_ptr;
295

    
296
    if (cpu_halted(env1) == EXCP_HALTED)
297
        return EXCP_HALTED;
298

    
299
    cpu_single_env = env1;
300

    
301
    /* first we save global registers */
302
#define SAVE_HOST_REGS 1
303
#include "hostregs_helper.h"
304
    env = env1;
305
    SAVE_GLOBALS();
306

    
307
    env_to_regs();
308
#if defined(TARGET_I386)
309
    /* put eflags in CPU temporary format */
310
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
311
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
312
    CC_OP = CC_OP_EFLAGS;
313
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
314
#elif defined(TARGET_SPARC)
315
#if defined(reg_REGWPTR)
316
    saved_regwptr = REGWPTR;
317
#endif
318
#elif defined(TARGET_M68K)
319
    env->cc_op = CC_OP_FLAGS;
320
    env->cc_dest = env->sr & 0xf;
321
    env->cc_x = (env->sr >> 4) & 1;
322
#elif defined(TARGET_ALPHA)
323
#elif defined(TARGET_ARM)
324
#elif defined(TARGET_PPC)
325
#elif defined(TARGET_MIPS)
326
#elif defined(TARGET_SH4)
327
#elif defined(TARGET_CRIS)
328
    /* XXXXX */
329
#else
330
#error unsupported target CPU
331
#endif
332
    env->exception_index = -1;
333

    
334
    /* prepare setjmp context for exception handling */
335
    for(;;) {
336
        if (setjmp(env->jmp_env) == 0) {
337
            env->current_tb = NULL;
338
            /* if an exception is pending, we execute it here */
339
            if (env->exception_index >= 0) {
340
                if (env->exception_index >= EXCP_INTERRUPT) {
341
                    /* exit request from the cpu execution loop */
342
                    ret = env->exception_index;
343
                    break;
344
                } else if (env->user_mode_only) {
345
                    /* if user mode only, we simulate a fake exception
346
                       which will be handled outside the cpu execution
347
                       loop */
348
#if defined(TARGET_I386)
349
                    do_interrupt_user(env->exception_index,
350
                                      env->exception_is_int,
351
                                      env->error_code,
352
                                      env->exception_next_eip);
353
                    /* successfully delivered */
354
                    env->old_exception = -1;
355
#endif
356
                    ret = env->exception_index;
357
                    break;
358
                } else {
359
#if defined(TARGET_I386)
360
                    /* simulate a real cpu exception. On i386, it can
361
                       trigger new exceptions, but we do not handle
362
                       double or triple faults yet. */
363
                    do_interrupt(env->exception_index,
364
                                 env->exception_is_int,
365
                                 env->error_code,
366
                                 env->exception_next_eip, 0);
367
                    /* successfully delivered */
368
                    env->old_exception = -1;
369
#elif defined(TARGET_PPC)
370
                    do_interrupt(env);
371
#elif defined(TARGET_MIPS)
372
                    do_interrupt(env);
373
#elif defined(TARGET_SPARC)
374
                    do_interrupt(env->exception_index);
375
#elif defined(TARGET_ARM)
376
                    do_interrupt(env);
377
#elif defined(TARGET_SH4)
378
                    do_interrupt(env);
379
#elif defined(TARGET_ALPHA)
380
                    do_interrupt(env);
381
#elif defined(TARGET_CRIS)
382
                    do_interrupt(env);
383
#elif defined(TARGET_M68K)
384
                    do_interrupt(0);
385
#endif
386
                }
387
                env->exception_index = -1;
388
            }
389
#ifdef USE_KQEMU
390
            if (kqemu_is_ok(env) && env->interrupt_request == 0) {
391
                int ret;
392
                env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
393
                ret = kqemu_cpu_exec(env);
394
                /* put eflags in CPU temporary format */
395
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
396
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
397
                CC_OP = CC_OP_EFLAGS;
398
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
399
                if (ret == 1) {
400
                    /* exception */
401
                    longjmp(env->jmp_env, 1);
402
                } else if (ret == 2) {
403
                    /* softmmu execution needed */
404
                } else {
405
                    if (env->interrupt_request != 0) {
406
                        /* hardware interrupt will be executed just after */
407
                    } else {
408
                        /* otherwise, we restart */
409
                        longjmp(env->jmp_env, 1);
410
                    }
411
                }
412
            }
413
#endif
414

    
415
            next_tb = 0; /* force lookup of first TB */
416
            for(;;) {
417
                SAVE_GLOBALS();
418
                interrupt_request = env->interrupt_request;
419
                if (__builtin_expect(interrupt_request, 0)
420
#if defined(TARGET_I386)
421
                        && env->hflags & HF_GIF_MASK
422
#endif
423
            && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
424
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
425
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
426
                        env->exception_index = EXCP_DEBUG;
427
                        cpu_loop_exit();
428
                    }
429
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
430
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
431
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
432
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
433
                        env->halted = 1;
434
                        env->exception_index = EXCP_HLT;
435
                        cpu_loop_exit();
436
                    }
437
#endif
438
#if defined(TARGET_I386)
439
                    if ((interrupt_request & CPU_INTERRUPT_SMI) &&
440
                        !(env->hflags & HF_SMM_MASK)) {
441
                        svm_check_intercept(SVM_EXIT_SMI);
442
                        env->interrupt_request &= ~CPU_INTERRUPT_SMI;
443
                        do_smm_enter();
444
                        next_tb = 0;
445
                    } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
446
                        !(env->hflags & HF_NMI_MASK)) {
447
                        env->interrupt_request &= ~CPU_INTERRUPT_NMI;
448
                        env->hflags |= HF_NMI_MASK;
449
                        do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
450
                        next_tb = 0;
451
                    } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
452
                        (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
453
                        !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
454
                        int intno;
455
                        svm_check_intercept(SVM_EXIT_INTR);
456
                        env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
457
                        intno = cpu_get_pic_interrupt(env);
458
                        if (loglevel & CPU_LOG_TB_IN_ASM) {
459
                            fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
460
                        }
461
                        do_interrupt(intno, 0, 0, 0, 1);
462
                        /* ensure that no TB jump will be modified as
463
                           the program flow was changed */
464
                        next_tb = 0;
465
#if !defined(CONFIG_USER_ONLY)
466
                    } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
467
                        (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
468
                         int intno;
469
                         /* FIXME: this should respect TPR */
470
                         env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
471
                         svm_check_intercept(SVM_EXIT_VINTR);
472
                         intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
473
                         if (loglevel & CPU_LOG_TB_IN_ASM)
474
                             fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
475
                         do_interrupt(intno, 0, 0, -1, 1);
476
                         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
477
                                  ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
478
                        next_tb = 0;
479
#endif
480
                    }
481
#elif defined(TARGET_PPC)
482
#if 0
483
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
484
                        cpu_ppc_reset(env);
485
                    }
486
#endif
487
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
488
                        ppc_hw_interrupt(env);
489
                        if (env->pending_interrupts == 0)
490
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
491
                        next_tb = 0;
492
                    }
493
#elif defined(TARGET_MIPS)
494
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
495
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
496
                        (env->CP0_Status & (1 << CP0St_IE)) &&
497
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
498
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
499
                        !(env->hflags & MIPS_HFLAG_DM)) {
500
                        /* Raise it */
501
                        env->exception_index = EXCP_EXT_INTERRUPT;
502
                        env->error_code = 0;
503
                        do_interrupt(env);
504
                        next_tb = 0;
505
                    }
506
#elif defined(TARGET_SPARC)
507
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
508
                        (env->psret != 0)) {
509
                        int pil = env->interrupt_index & 15;
510
                        int type = env->interrupt_index & 0xf0;
511

    
512
                        if (((type == TT_EXTINT) &&
513
                             (pil == 15 || pil > env->psrpil)) ||
514
                            type != TT_EXTINT) {
515
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
516
                            do_interrupt(env->interrupt_index);
517
                            env->interrupt_index = 0;
518
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
519
                            cpu_check_irqs(env);
520
#endif
521
                        next_tb = 0;
522
                        }
523
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
524
                        //do_interrupt(0, 0, 0, 0, 0);
525
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
526
                    }
527
#elif defined(TARGET_ARM)
528
                    if (interrupt_request & CPU_INTERRUPT_FIQ
529
                        && !(env->uncached_cpsr & CPSR_F)) {
530
                        env->exception_index = EXCP_FIQ;
531
                        do_interrupt(env);
532
                        next_tb = 0;
533
                    }
534
                    /* ARMv7-M interrupt return works by loading a magic value
535
                       into the PC.  On real hardware the load causes the
536
                       return to occur.  The qemu implementation performs the
537
                       jump normally, then does the exception return when the
538
                       CPU tries to execute code at the magic address.
539
                       This will cause the magic PC value to be pushed to
540
                       the stack if an interrupt occured at the wrong time.
541
                       We avoid this by disabling interrupts when
542
                       pc contains a magic address.  */
543
                    if (interrupt_request & CPU_INTERRUPT_HARD
544
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
545
                            || !(env->uncached_cpsr & CPSR_I))) {
546
                        env->exception_index = EXCP_IRQ;
547
                        do_interrupt(env);
548
                        next_tb = 0;
549
                    }
550
#elif defined(TARGET_SH4)
551
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
552
                        do_interrupt(env);
553
                        next_tb = 0;
554
                    }
555
#elif defined(TARGET_ALPHA)
556
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
557
                        do_interrupt(env);
558
                        next_tb = 0;
559
                    }
560
#elif defined(TARGET_CRIS)
561
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
562
                        do_interrupt(env);
563
                        next_tb = 0;
564
                    }
565
#elif defined(TARGET_M68K)
566
                    if (interrupt_request & CPU_INTERRUPT_HARD
567
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
568
                            < env->pending_level) {
569
                        /* Real hardware gets the interrupt vector via an
570
                           IACK cycle at this point.  Current emulated
571
                           hardware doesn't rely on this, so we
572
                           provide/save the vector when the interrupt is
573
                           first signalled.  */
574
                        env->exception_index = env->pending_vector;
575
                        do_interrupt(1);
576
                        next_tb = 0;
577
                    }
578
#endif
579
                   /* Don't use the cached interupt_request value,
580
                      do_interrupt may have updated the EXITTB flag. */
581
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
582
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
583
                        /* ensure that no TB jump will be modified as
584
                           the program flow was changed */
585
                        next_tb = 0;
586
                    }
587
                    if (interrupt_request & CPU_INTERRUPT_EXIT) {
588
                        env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
589
                        env->exception_index = EXCP_INTERRUPT;
590
                        cpu_loop_exit();
591
                    }
592
                }
593
#ifdef DEBUG_EXEC
594
                if ((loglevel & CPU_LOG_TB_CPU)) {
595
                    /* restore flags in standard format */
596
                    regs_to_env();
597
#if defined(TARGET_I386)
598
                    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
599
                    cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
600
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
601
#elif defined(TARGET_ARM)
602
                    cpu_dump_state(env, logfile, fprintf, 0);
603
#elif defined(TARGET_SPARC)
604
                    REGWPTR = env->regbase + (env->cwp * 16);
605
                    env->regwptr = REGWPTR;
606
                    cpu_dump_state(env, logfile, fprintf, 0);
607
#elif defined(TARGET_PPC)
608
                    cpu_dump_state(env, logfile, fprintf, 0);
609
#elif defined(TARGET_M68K)
610
                    cpu_m68k_flush_flags(env, env->cc_op);
611
                    env->cc_op = CC_OP_FLAGS;
612
                    env->sr = (env->sr & 0xffe0)
613
                              | env->cc_dest | (env->cc_x << 4);
614
                    cpu_dump_state(env, logfile, fprintf, 0);
615
#elif defined(TARGET_MIPS)
616
                    cpu_dump_state(env, logfile, fprintf, 0);
617
#elif defined(TARGET_SH4)
618
                    cpu_dump_state(env, logfile, fprintf, 0);
619
#elif defined(TARGET_ALPHA)
620
                    cpu_dump_state(env, logfile, fprintf, 0);
621
#elif defined(TARGET_CRIS)
622
                    cpu_dump_state(env, logfile, fprintf, 0);
623
#else
624
#error unsupported target CPU
625
#endif
626
                }
627
#endif
628
                tb = tb_find_fast();
629
#ifdef DEBUG_EXEC
630
                if ((loglevel & CPU_LOG_EXEC)) {
631
                    fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
632
                            (long)tb->tc_ptr, tb->pc,
633
                            lookup_symbol(tb->pc));
634
                }
635
#endif
636
                RESTORE_GLOBALS();
637
                /* see if we can patch the calling TB. When the TB
638
                   spans two pages, we cannot safely do a direct
639
                   jump. */
640
                {
641
                    if (next_tb != 0 &&
642
#ifdef USE_KQEMU
643
                        (env->kqemu_enabled != 2) &&
644
#endif
645
                        tb->page_addr[1] == -1) {
646
                    spin_lock(&tb_lock);
647
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
648
                    spin_unlock(&tb_lock);
649
                }
650
                }
651
                tc_ptr = tb->tc_ptr;
652
                env->current_tb = tb;
653
                /* execute the generated code */
654
                next_tb = tcg_qemu_tb_exec(tc_ptr);
655
                env->current_tb = NULL;
656
                /* reset soft MMU for next block (it can currently
657
                   only be set by a memory fault) */
658
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
659
                if (env->hflags & HF_SOFTMMU_MASK) {
660
                    env->hflags &= ~HF_SOFTMMU_MASK;
661
                    /* do not allow linking to another block */
662
                    next_tb = 0;
663
                }
664
#endif
665
#if defined(USE_KQEMU)
666
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
667
                if (kqemu_is_ok(env) &&
668
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
669
                    cpu_loop_exit();
670
                }
671
#endif
672
            } /* for(;;) */
673
        } else {
674
            env_to_regs();
675
        }
676
    } /* for(;;) */
677

    
678

    
679
#if defined(TARGET_I386)
680
    /* restore flags in standard format */
681
    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
682
#elif defined(TARGET_ARM)
683
    /* XXX: Save/restore host fpu exception state?.  */
684
#elif defined(TARGET_SPARC)
685
#if defined(reg_REGWPTR)
686
    REGWPTR = saved_regwptr;
687
#endif
688
#elif defined(TARGET_PPC)
689
#elif defined(TARGET_M68K)
690
    cpu_m68k_flush_flags(env, env->cc_op);
691
    env->cc_op = CC_OP_FLAGS;
692
    env->sr = (env->sr & 0xffe0)
693
              | env->cc_dest | (env->cc_x << 4);
694
#elif defined(TARGET_MIPS)
695
#elif defined(TARGET_SH4)
696
#elif defined(TARGET_ALPHA)
697
#elif defined(TARGET_CRIS)
698
    /* XXXXX */
699
#else
700
#error unsupported target CPU
701
#endif
702

    
703
    /* restore global registers */
704
    RESTORE_GLOBALS();
705
#include "hostregs_helper.h"
706

    
707
    /* fail safe : never use cpu_single_env outside cpu_exec() */
708
    cpu_single_env = NULL;
709
    return ret;
710
}
711

    
712
/* must only be called from the generated code as an exception can be
713
   generated */
714
void tb_invalidate_page_range(target_ulong start, target_ulong end)
715
{
716
    /* XXX: cannot enable it yet because it yields to MMU exception
717
       where NIP != read address on PowerPC */
718
#if 0
719
    target_ulong phys_addr;
720
    phys_addr = get_phys_addr_code(env, start);
721
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
722
#endif
723
}
724

    
725
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
726

    
727
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
728
{
729
    CPUX86State *saved_env;
730

    
731
    saved_env = env;
732
    env = s;
733
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
734
        selector &= 0xffff;
735
        cpu_x86_load_seg_cache(env, seg_reg, selector,
736
                               (selector << 4), 0xffff, 0);
737
    } else {
738
        helper_load_seg(seg_reg, selector);
739
    }
740
    env = saved_env;
741
}
742

    
743
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
744
{
745
    CPUX86State *saved_env;
746

    
747
    saved_env = env;
748
    env = s;
749

    
750
    helper_fsave(ptr, data32);
751

    
752
    env = saved_env;
753
}
754

    
755
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
756
{
757
    CPUX86State *saved_env;
758

    
759
    saved_env = env;
760
    env = s;
761

    
762
    helper_frstor(ptr, data32);
763

    
764
    env = saved_env;
765
}
766

    
767
#endif /* TARGET_I386 */
768

    
769
#if !defined(CONFIG_SOFTMMU)
770

    
771
#if defined(TARGET_I386)
772

    
773
/* 'pc' is the host PC at which the exception was raised. 'address' is
774
   the effective address of the memory exception. 'is_write' is 1 if a
775
   write caused the exception and otherwise 0'. 'old_set' is the
776
   signal set which should be restored */
777
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
778
                                    int is_write, sigset_t *old_set,
779
                                    void *puc)
780
{
781
    TranslationBlock *tb;
782
    int ret;
783

    
784
    if (cpu_single_env)
785
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
786
#if defined(DEBUG_SIGNAL)
787
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
788
                pc, address, is_write, *(unsigned long *)old_set);
789
#endif
790
    /* XXX: locking issue */
791
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
792
        return 1;
793
    }
794

    
795
    /* see if it is an MMU fault */
796
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
797
    if (ret < 0)
798
        return 0; /* not an MMU fault */
799
    if (ret == 0)
800
        return 1; /* the MMU fault was handled without causing real CPU fault */
801
    /* now we have a real cpu fault */
802
    tb = tb_find_pc(pc);
803
    if (tb) {
804
        /* the PC is inside the translated code. It means that we have
805
           a virtual CPU fault */
806
        cpu_restore_state(tb, env, pc, puc);
807
    }
808
    if (ret == 1) {
809
#if 0
810
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
811
               env->eip, env->cr[2], env->error_code);
812
#endif
813
        /* we restore the process signal mask as the sigreturn should
814
           do it (XXX: use sigsetjmp) */
815
        sigprocmask(SIG_SETMASK, old_set, NULL);
816
        raise_exception_err(env->exception_index, env->error_code);
817
    } else {
818
        /* activate soft MMU for this block */
819
        env->hflags |= HF_SOFTMMU_MASK;
820
        cpu_resume_from_signal(env, puc);
821
    }
822
    /* never comes here */
823
    return 1;
824
}
825

    
826
#elif defined(TARGET_ARM)
827
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
828
                                    int is_write, sigset_t *old_set,
829
                                    void *puc)
830
{
831
    TranslationBlock *tb;
832
    int ret;
833

    
834
    if (cpu_single_env)
835
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
836
#if defined(DEBUG_SIGNAL)
837
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
838
           pc, address, is_write, *(unsigned long *)old_set);
839
#endif
840
    /* XXX: locking issue */
841
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
842
        return 1;
843
    }
844
    /* see if it is an MMU fault */
845
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
846
    if (ret < 0)
847
        return 0; /* not an MMU fault */
848
    if (ret == 0)
849
        return 1; /* the MMU fault was handled without causing real CPU fault */
850
    /* now we have a real cpu fault */
851
    tb = tb_find_pc(pc);
852
    if (tb) {
853
        /* the PC is inside the translated code. It means that we have
854
           a virtual CPU fault */
855
        cpu_restore_state(tb, env, pc, puc);
856
    }
857
    /* we restore the process signal mask as the sigreturn should
858
       do it (XXX: use sigsetjmp) */
859
    sigprocmask(SIG_SETMASK, old_set, NULL);
860
    cpu_loop_exit();
861
    /* never comes here */
862
    return 1;
863
}
864
#elif defined(TARGET_SPARC)
865
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
866
                                    int is_write, sigset_t *old_set,
867
                                    void *puc)
868
{
869
    TranslationBlock *tb;
870
    int ret;
871

    
872
    if (cpu_single_env)
873
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
874
#if defined(DEBUG_SIGNAL)
875
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
876
           pc, address, is_write, *(unsigned long *)old_set);
877
#endif
878
    /* XXX: locking issue */
879
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
880
        return 1;
881
    }
882
    /* see if it is an MMU fault */
883
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
884
    if (ret < 0)
885
        return 0; /* not an MMU fault */
886
    if (ret == 0)
887
        return 1; /* the MMU fault was handled without causing real CPU fault */
888
    /* now we have a real cpu fault */
889
    tb = tb_find_pc(pc);
890
    if (tb) {
891
        /* the PC is inside the translated code. It means that we have
892
           a virtual CPU fault */
893
        cpu_restore_state(tb, env, pc, puc);
894
    }
895
    /* we restore the process signal mask as the sigreturn should
896
       do it (XXX: use sigsetjmp) */
897
    sigprocmask(SIG_SETMASK, old_set, NULL);
898
    cpu_loop_exit();
899
    /* never comes here */
900
    return 1;
901
}
902
#elif defined (TARGET_PPC)
903
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
904
                                    int is_write, sigset_t *old_set,
905
                                    void *puc)
906
{
907
    TranslationBlock *tb;
908
    int ret;
909

    
910
    if (cpu_single_env)
911
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
912
#if defined(DEBUG_SIGNAL)
913
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
914
           pc, address, is_write, *(unsigned long *)old_set);
915
#endif
916
    /* XXX: locking issue */
917
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
918
        return 1;
919
    }
920

    
921
    /* see if it is an MMU fault */
922
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
923
    if (ret < 0)
924
        return 0; /* not an MMU fault */
925
    if (ret == 0)
926
        return 1; /* the MMU fault was handled without causing real CPU fault */
927

    
928
    /* now we have a real cpu fault */
929
    tb = tb_find_pc(pc);
930
    if (tb) {
931
        /* the PC is inside the translated code. It means that we have
932
           a virtual CPU fault */
933
        cpu_restore_state(tb, env, pc, puc);
934
    }
935
    if (ret == 1) {
936
#if 0
937
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
938
               env->nip, env->error_code, tb);
939
#endif
940
    /* we restore the process signal mask as the sigreturn should
941
       do it (XXX: use sigsetjmp) */
942
        sigprocmask(SIG_SETMASK, old_set, NULL);
943
        do_raise_exception_err(env->exception_index, env->error_code);
944
    } else {
945
        /* activate soft MMU for this block */
946
        cpu_resume_from_signal(env, puc);
947
    }
948
    /* never comes here */
949
    return 1;
950
}
951

    
952
#elif defined(TARGET_M68K)
953
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
954
                                    int is_write, sigset_t *old_set,
955
                                    void *puc)
956
{
957
    TranslationBlock *tb;
958
    int ret;
959

    
960
    if (cpu_single_env)
961
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
962
#if defined(DEBUG_SIGNAL)
963
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
964
           pc, address, is_write, *(unsigned long *)old_set);
965
#endif
966
    /* XXX: locking issue */
967
    if (is_write && page_unprotect(address, pc, puc)) {
968
        return 1;
969
    }
970
    /* see if it is an MMU fault */
971
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
972
    if (ret < 0)
973
        return 0; /* not an MMU fault */
974
    if (ret == 0)
975
        return 1; /* the MMU fault was handled without causing real CPU fault */
976
    /* now we have a real cpu fault */
977
    tb = tb_find_pc(pc);
978
    if (tb) {
979
        /* the PC is inside the translated code. It means that we have
980
           a virtual CPU fault */
981
        cpu_restore_state(tb, env, pc, puc);
982
    }
983
    /* we restore the process signal mask as the sigreturn should
984
       do it (XXX: use sigsetjmp) */
985
    sigprocmask(SIG_SETMASK, old_set, NULL);
986
    cpu_loop_exit();
987
    /* never comes here */
988
    return 1;
989
}
990

    
991
#elif defined (TARGET_MIPS)
992
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
993
                                    int is_write, sigset_t *old_set,
994
                                    void *puc)
995
{
996
    TranslationBlock *tb;
997
    int ret;
998

    
999
    if (cpu_single_env)
1000
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1001
#if defined(DEBUG_SIGNAL)
1002
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1003
           pc, address, is_write, *(unsigned long *)old_set);
1004
#endif
1005
    /* XXX: locking issue */
1006
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1007
        return 1;
1008
    }
1009

    
1010
    /* see if it is an MMU fault */
1011
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1012
    if (ret < 0)
1013
        return 0; /* not an MMU fault */
1014
    if (ret == 0)
1015
        return 1; /* the MMU fault was handled without causing real CPU fault */
1016

    
1017
    /* now we have a real cpu fault */
1018
    tb = tb_find_pc(pc);
1019
    if (tb) {
1020
        /* the PC is inside the translated code. It means that we have
1021
           a virtual CPU fault */
1022
        cpu_restore_state(tb, env, pc, puc);
1023
    }
1024
    if (ret == 1) {
1025
#if 0
1026
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1027
               env->PC, env->error_code, tb);
1028
#endif
1029
    /* we restore the process signal mask as the sigreturn should
1030
       do it (XXX: use sigsetjmp) */
1031
        sigprocmask(SIG_SETMASK, old_set, NULL);
1032
        do_raise_exception_err(env->exception_index, env->error_code);
1033
    } else {
1034
        /* activate soft MMU for this block */
1035
        cpu_resume_from_signal(env, puc);
1036
    }
1037
    /* never comes here */
1038
    return 1;
1039
}
1040

    
1041
#elif defined (TARGET_SH4)
1042
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1043
                                    int is_write, sigset_t *old_set,
1044
                                    void *puc)
1045
{
1046
    TranslationBlock *tb;
1047
    int ret;
1048

    
1049
    if (cpu_single_env)
1050
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1051
#if defined(DEBUG_SIGNAL)
1052
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1053
           pc, address, is_write, *(unsigned long *)old_set);
1054
#endif
1055
    /* XXX: locking issue */
1056
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1057
        return 1;
1058
    }
1059

    
1060
    /* see if it is an MMU fault */
1061
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1062
    if (ret < 0)
1063
        return 0; /* not an MMU fault */
1064
    if (ret == 0)
1065
        return 1; /* the MMU fault was handled without causing real CPU fault */
1066

    
1067
    /* now we have a real cpu fault */
1068
    tb = tb_find_pc(pc);
1069
    if (tb) {
1070
        /* the PC is inside the translated code. It means that we have
1071
           a virtual CPU fault */
1072
        cpu_restore_state(tb, env, pc, puc);
1073
    }
1074
#if 0
1075
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1076
               env->nip, env->error_code, tb);
1077
#endif
1078
    /* we restore the process signal mask as the sigreturn should
1079
       do it (XXX: use sigsetjmp) */
1080
    sigprocmask(SIG_SETMASK, old_set, NULL);
1081
    cpu_loop_exit();
1082
    /* never comes here */
1083
    return 1;
1084
}
1085

    
1086
#elif defined (TARGET_ALPHA)
1087
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1088
                                    int is_write, sigset_t *old_set,
1089
                                    void *puc)
1090
{
1091
    TranslationBlock *tb;
1092
    int ret;
1093

    
1094
    if (cpu_single_env)
1095
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1096
#if defined(DEBUG_SIGNAL)
1097
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1098
           pc, address, is_write, *(unsigned long *)old_set);
1099
#endif
1100
    /* XXX: locking issue */
1101
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1102
        return 1;
1103
    }
1104

    
1105
    /* see if it is an MMU fault */
1106
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1107
    if (ret < 0)
1108
        return 0; /* not an MMU fault */
1109
    if (ret == 0)
1110
        return 1; /* the MMU fault was handled without causing real CPU fault */
1111

    
1112
    /* now we have a real cpu fault */
1113
    tb = tb_find_pc(pc);
1114
    if (tb) {
1115
        /* the PC is inside the translated code. It means that we have
1116
           a virtual CPU fault */
1117
        cpu_restore_state(tb, env, pc, puc);
1118
    }
1119
#if 0
1120
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1121
               env->nip, env->error_code, tb);
1122
#endif
1123
    /* we restore the process signal mask as the sigreturn should
1124
       do it (XXX: use sigsetjmp) */
1125
    sigprocmask(SIG_SETMASK, old_set, NULL);
1126
    cpu_loop_exit();
1127
    /* never comes here */
1128
    return 1;
1129
}
1130
#elif defined (TARGET_CRIS)
1131
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1132
                                    int is_write, sigset_t *old_set,
1133
                                    void *puc)
1134
{
1135
    TranslationBlock *tb;
1136
    int ret;
1137

    
1138
    if (cpu_single_env)
1139
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1140
#if defined(DEBUG_SIGNAL)
1141
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1142
           pc, address, is_write, *(unsigned long *)old_set);
1143
#endif
1144
    /* XXX: locking issue */
1145
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1146
        return 1;
1147
    }
1148

    
1149
    /* see if it is an MMU fault */
1150
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1151
    if (ret < 0)
1152
        return 0; /* not an MMU fault */
1153
    if (ret == 0)
1154
        return 1; /* the MMU fault was handled without causing real CPU fault */
1155

    
1156
    /* now we have a real cpu fault */
1157
    tb = tb_find_pc(pc);
1158
    if (tb) {
1159
        /* the PC is inside the translated code. It means that we have
1160
           a virtual CPU fault */
1161
        cpu_restore_state(tb, env, pc, puc);
1162
    }
1163
    /* we restore the process signal mask as the sigreturn should
1164
       do it (XXX: use sigsetjmp) */
1165
    sigprocmask(SIG_SETMASK, old_set, NULL);
1166
    cpu_loop_exit();
1167
    /* never comes here */
1168
    return 1;
1169
}
1170

    
1171
#else
1172
#error unsupported target CPU
1173
#endif
1174

    
1175
#if defined(__i386__)
1176

    
1177
#if defined(__APPLE__)
1178
# include <sys/ucontext.h>
1179

    
1180
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1181
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1182
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1183
#else
1184
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1185
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1186
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1187
#endif
1188

    
1189
int cpu_signal_handler(int host_signum, void *pinfo,
1190
                       void *puc)
1191
{
1192
    siginfo_t *info = pinfo;
1193
    struct ucontext *uc = puc;
1194
    unsigned long pc;
1195
    int trapno;
1196

    
1197
#ifndef REG_EIP
1198
/* for glibc 2.1 */
1199
#define REG_EIP    EIP
1200
#define REG_ERR    ERR
1201
#define REG_TRAPNO TRAPNO
1202
#endif
1203
    pc = EIP_sig(uc);
1204
    trapno = TRAP_sig(uc);
1205
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1206
                             trapno == 0xe ?
1207
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1208
                             &uc->uc_sigmask, puc);
1209
}
1210

    
1211
#elif defined(__x86_64__)
1212

    
1213
int cpu_signal_handler(int host_signum, void *pinfo,
1214
                       void *puc)
1215
{
1216
    siginfo_t *info = pinfo;
1217
    struct ucontext *uc = puc;
1218
    unsigned long pc;
1219

    
1220
    pc = uc->uc_mcontext.gregs[REG_RIP];
1221
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1222
                             uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1223
                             (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1224
                             &uc->uc_sigmask, puc);
1225
}
1226

    
1227
#elif defined(__powerpc__)
1228

    
1229
/***********************************************************************
1230
 * signal context platform-specific definitions
1231
 * From Wine
1232
 */
1233
#ifdef linux
1234
/* All Registers access - only for local access */
1235
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1236
/* Gpr Registers access  */
1237
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1238
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1239
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1240
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1241
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1242
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1243
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1244
/* Float Registers access  */
1245
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1246
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1247
/* Exception Registers access */
1248
# define DAR_sig(context)                        REG_sig(dar, context)
1249
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1250
# define TRAP_sig(context)                        REG_sig(trap, context)
1251
#endif /* linux */
1252

    
1253
#ifdef __APPLE__
1254
# include <sys/ucontext.h>
1255
typedef struct ucontext SIGCONTEXT;
1256
/* All Registers access - only for local access */
1257
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1258
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1259
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1260
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1261
/* Gpr Registers access */
1262
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1263
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1264
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1265
# define CTR_sig(context)                        REG_sig(ctr, context)
1266
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1267
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1268
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1269
/* Float Registers access */
1270
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1271
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1272
/* Exception Registers access */
1273
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1274
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1275
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1276
#endif /* __APPLE__ */
1277

    
1278
int cpu_signal_handler(int host_signum, void *pinfo,
1279
                       void *puc)
1280
{
1281
    siginfo_t *info = pinfo;
1282
    struct ucontext *uc = puc;
1283
    unsigned long pc;
1284
    int is_write;
1285

    
1286
    pc = IAR_sig(uc);
1287
    is_write = 0;
1288
#if 0
1289
    /* ppc 4xx case */
1290
    if (DSISR_sig(uc) & 0x00800000)
1291
        is_write = 1;
1292
#else
1293
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1294
        is_write = 1;
1295
#endif
1296
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1297
                             is_write, &uc->uc_sigmask, puc);
1298
}
1299

    
1300
#elif defined(__alpha__)
1301

    
1302
int cpu_signal_handler(int host_signum, void *pinfo,
1303
                           void *puc)
1304
{
1305
    siginfo_t *info = pinfo;
1306
    struct ucontext *uc = puc;
1307
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1308
    uint32_t insn = *pc;
1309
    int is_write = 0;
1310

    
1311
    /* XXX: need kernel patch to get write flag faster */
1312
    switch (insn >> 26) {
1313
    case 0x0d: // stw
1314
    case 0x0e: // stb
1315
    case 0x0f: // stq_u
1316
    case 0x24: // stf
1317
    case 0x25: // stg
1318
    case 0x26: // sts
1319
    case 0x27: // stt
1320
    case 0x2c: // stl
1321
    case 0x2d: // stq
1322
    case 0x2e: // stl_c
1323
    case 0x2f: // stq_c
1324
        is_write = 1;
1325
    }
1326

    
1327
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1328
                             is_write, &uc->uc_sigmask, puc);
1329
}
1330
#elif defined(__sparc__)
1331

    
1332
int cpu_signal_handler(int host_signum, void *pinfo,
1333
                       void *puc)
1334
{
1335
    siginfo_t *info = pinfo;
1336
    uint32_t *regs = (uint32_t *)(info + 1);
1337
    void *sigmask = (regs + 20);
1338
    unsigned long pc;
1339
    int is_write;
1340
    uint32_t insn;
1341

    
1342
    /* XXX: is there a standard glibc define ? */
1343
    pc = regs[1];
1344
    /* XXX: need kernel patch to get write flag faster */
1345
    is_write = 0;
1346
    insn = *(uint32_t *)pc;
1347
    if ((insn >> 30) == 3) {
1348
      switch((insn >> 19) & 0x3f) {
1349
      case 0x05: // stb
1350
      case 0x06: // sth
1351
      case 0x04: // st
1352
      case 0x07: // std
1353
      case 0x24: // stf
1354
      case 0x27: // stdf
1355
      case 0x25: // stfsr
1356
        is_write = 1;
1357
        break;
1358
      }
1359
    }
1360
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1361
                             is_write, sigmask, NULL);
1362
}
1363

    
1364
#elif defined(__arm__)
1365

    
1366
int cpu_signal_handler(int host_signum, void *pinfo,
1367
                       void *puc)
1368
{
1369
    siginfo_t *info = pinfo;
1370
    struct ucontext *uc = puc;
1371
    unsigned long pc;
1372
    int is_write;
1373

    
1374
    pc = uc->uc_mcontext.arm_pc;
1375
    /* XXX: compute is_write */
1376
    is_write = 0;
1377
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1378
                             is_write,
1379
                             &uc->uc_sigmask, puc);
1380
}
1381

    
1382
#elif defined(__mc68000)
1383

    
1384
int cpu_signal_handler(int host_signum, void *pinfo,
1385
                       void *puc)
1386
{
1387
    siginfo_t *info = pinfo;
1388
    struct ucontext *uc = puc;
1389
    unsigned long pc;
1390
    int is_write;
1391

    
1392
    pc = uc->uc_mcontext.gregs[16];
1393
    /* XXX: compute is_write */
1394
    is_write = 0;
1395
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1396
                             is_write,
1397
                             &uc->uc_sigmask, puc);
1398
}
1399

    
1400
#elif defined(__ia64)
1401

    
1402
#ifndef __ISR_VALID
1403
  /* This ought to be in <bits/siginfo.h>... */
1404
# define __ISR_VALID        1
1405
#endif
1406

    
1407
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1408
{
1409
    siginfo_t *info = pinfo;
1410
    struct ucontext *uc = puc;
1411
    unsigned long ip;
1412
    int is_write = 0;
1413

    
1414
    ip = uc->uc_mcontext.sc_ip;
1415
    switch (host_signum) {
1416
      case SIGILL:
1417
      case SIGFPE:
1418
      case SIGSEGV:
1419
      case SIGBUS:
1420
      case SIGTRAP:
1421
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1422
              /* ISR.W (write-access) is bit 33:  */
1423
              is_write = (info->si_isr >> 33) & 1;
1424
          break;
1425

    
1426
      default:
1427
          break;
1428
    }
1429
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1430
                             is_write,
1431
                             &uc->uc_sigmask, puc);
1432
}
1433

    
1434
#elif defined(__s390__)
1435

    
1436
int cpu_signal_handler(int host_signum, void *pinfo,
1437
                       void *puc)
1438
{
1439
    siginfo_t *info = pinfo;
1440
    struct ucontext *uc = puc;
1441
    unsigned long pc;
1442
    int is_write;
1443

    
1444
    pc = uc->uc_mcontext.psw.addr;
1445
    /* XXX: compute is_write */
1446
    is_write = 0;
1447
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1448
                             is_write, &uc->uc_sigmask, puc);
1449
}
1450

    
1451
#elif defined(__mips__)
1452

    
1453
int cpu_signal_handler(int host_signum, void *pinfo,
1454
                       void *puc)
1455
{
1456
    siginfo_t *info = pinfo;
1457
    struct ucontext *uc = puc;
1458
    greg_t pc = uc->uc_mcontext.pc;
1459
    int is_write;
1460

    
1461
    /* XXX: compute is_write */
1462
    is_write = 0;
1463
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1464
                             is_write, &uc->uc_sigmask, puc);
1465
}
1466

    
1467
#elif defined(__hppa__)
1468

    
1469
int cpu_signal_handler(int host_signum, void *pinfo,
1470
                       void *puc)
1471
{
1472
    struct siginfo *info = pinfo;
1473
    struct ucontext *uc = puc;
1474
    unsigned long pc;
1475
    int is_write;
1476

    
1477
    pc = uc->uc_mcontext.sc_iaoq[0];
1478
    /* FIXME: compute is_write */
1479
    is_write = 0;
1480
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1481
                             is_write,
1482
                             &uc->uc_sigmask, puc);
1483
}
1484

    
1485
#else
1486

    
1487
#error host CPU specific signal handler needed
1488

    
1489
#endif
1490

    
1491
#endif /* !defined(CONFIG_SOFTMMU) */