Statistics
| Branch: | Revision:

root / cpu-exec.c @ 7ba1e619

History | View | Annotate | Download (52.2 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#define CPU_NO_GLOBAL_REGS
22
#include "exec.h"
23
#include "disas.h"
24
#include "tcg.h"
25
#include "kvm.h"
26

    
27
#if !defined(CONFIG_SOFTMMU)
28
#undef EAX
29
#undef ECX
30
#undef EDX
31
#undef EBX
32
#undef ESP
33
#undef EBP
34
#undef ESI
35
#undef EDI
36
#undef EIP
37
#include <signal.h>
38
#ifdef __linux__
39
#include <sys/ucontext.h>
40
#endif
41
#endif
42

    
43
#if defined(__sparc__) && !defined(HOST_SOLARIS)
44
// Work around ugly bugs in glibc that mangle global register contents
45
#undef env
46
#define env cpu_single_env
47
#endif
48

    
49
int tb_invalidated_flag;
50

    
51
//#define DEBUG_EXEC
52
//#define DEBUG_SIGNAL
53

    
54
void cpu_loop_exit(void)
55
{
56
    /* NOTE: the register at this point must be saved by hand because
57
       longjmp restore them */
58
    regs_to_env();
59
    longjmp(env->jmp_env, 1);
60
}
61

    
62
#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
63
#define reg_T2
64
#endif
65

    
66
/* exit the current TB from a signal handler. The host registers are
67
   restored in a state compatible with the CPU emulator
68
 */
69
void cpu_resume_from_signal(CPUState *env1, void *puc)
70
{
71
#if !defined(CONFIG_SOFTMMU)
72
#ifdef __linux__
73
    struct ucontext *uc = puc;
74
#elif defined(__OpenBSD__)
75
    struct sigcontext *uc = puc;
76
#endif
77
#endif
78

    
79
    env = env1;
80

    
81
    /* XXX: restore cpu registers saved in host registers */
82

    
83
#if !defined(CONFIG_SOFTMMU)
84
    if (puc) {
85
        /* XXX: use siglongjmp ? */
86
#ifdef __linux__
87
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88
#elif defined(__OpenBSD__)
89
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90
#endif
91
    }
92
#endif
93
    longjmp(env->jmp_env, 1);
94
}
95

    
96
/* Execute the code without caching the generated code. An interpreter
97
   could be used if available. */
98
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99
{
100
    unsigned long next_tb;
101
    TranslationBlock *tb;
102

    
103
    /* Should never happen.
104
       We only end up here when an existing TB is too long.  */
105
    if (max_cycles > CF_COUNT_MASK)
106
        max_cycles = CF_COUNT_MASK;
107

    
108
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109
                     max_cycles);
110
    env->current_tb = tb;
111
    /* execute the generated code */
112
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113

    
114
    if ((next_tb & 3) == 2) {
115
        /* Restore PC.  This may happen if async event occurs before
116
           the TB starts executing.  */
117
        CPU_PC_FROM_TB(env, tb);
118
    }
119
    tb_phys_invalidate(tb, -1);
120
    tb_free(tb);
121
}
122

    
123
static TranslationBlock *tb_find_slow(target_ulong pc,
124
                                      target_ulong cs_base,
125
                                      uint64_t flags)
126
{
127
    TranslationBlock *tb, **ptb1;
128
    unsigned int h;
129
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130

    
131
    tb_invalidated_flag = 0;
132

    
133
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
134

    
135
    /* find translated block using physical mappings */
136
    phys_pc = get_phys_addr_code(env, pc);
137
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
138
    phys_page2 = -1;
139
    h = tb_phys_hash_func(phys_pc);
140
    ptb1 = &tb_phys_hash[h];
141
    for(;;) {
142
        tb = *ptb1;
143
        if (!tb)
144
            goto not_found;
145
        if (tb->pc == pc &&
146
            tb->page_addr[0] == phys_page1 &&
147
            tb->cs_base == cs_base &&
148
            tb->flags == flags) {
149
            /* check next page if needed */
150
            if (tb->page_addr[1] != -1) {
151
                virt_page2 = (pc & TARGET_PAGE_MASK) +
152
                    TARGET_PAGE_SIZE;
153
                phys_page2 = get_phys_addr_code(env, virt_page2);
154
                if (tb->page_addr[1] == phys_page2)
155
                    goto found;
156
            } else {
157
                goto found;
158
            }
159
        }
160
        ptb1 = &tb->phys_hash_next;
161
    }
162
 not_found:
163
   /* if no translated code available, then translate it now */
164
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
165

    
166
 found:
167
    /* we add the TB in the virtual pc hash table */
168
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169
    return tb;
170
}
171

    
172
static inline TranslationBlock *tb_find_fast(void)
173
{
174
    TranslationBlock *tb;
175
    target_ulong cs_base, pc;
176
    uint64_t flags;
177

    
178
    /* we record a subset of the CPU state. It will
179
       always be the same before a given translated block
180
       is executed. */
181
#if defined(TARGET_I386)
182
    flags = env->hflags;
183
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
184
    cs_base = env->segs[R_CS].base;
185
    pc = cs_base + env->eip;
186
#elif defined(TARGET_ARM)
187
    flags = env->thumb | (env->vfp.vec_len << 1)
188
            | (env->vfp.vec_stride << 4);
189
    if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
190
        flags |= (1 << 6);
191
    if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
192
        flags |= (1 << 7);
193
    flags |= (env->condexec_bits << 8);
194
    cs_base = 0;
195
    pc = env->regs[15];
196
#elif defined(TARGET_SPARC)
197
#ifdef TARGET_SPARC64
198
    // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
199
    flags = ((env->pstate & PS_AM) << 2)
200
        | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
201
        | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
202
#else
203
    // FPU enable . Supervisor
204
    flags = (env->psref << 4) | env->psrs;
205
#endif
206
    cs_base = env->npc;
207
    pc = env->pc;
208
#elif defined(TARGET_PPC)
209
    flags = env->hflags;
210
    cs_base = 0;
211
    pc = env->nip;
212
#elif defined(TARGET_MIPS)
213
    flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
214
    cs_base = 0;
215
    pc = env->active_tc.PC;
216
#elif defined(TARGET_M68K)
217
    flags = (env->fpcr & M68K_FPCR_PREC)  /* Bit  6 */
218
            | (env->sr & SR_S)            /* Bit  13 */
219
            | ((env->macsr >> 4) & 0xf);  /* Bits 0-3 */
220
    cs_base = 0;
221
    pc = env->pc;
222
#elif defined(TARGET_SH4)
223
    flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
224
                    | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME))   /* Bits  0- 3 */
225
            | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR))  /* Bits 19-21 */
226
            | (env->sr & (SR_MD | SR_RB));                     /* Bits 29-30 */
227
    cs_base = 0;
228
    pc = env->pc;
229
#elif defined(TARGET_ALPHA)
230
    flags = env->ps;
231
    cs_base = 0;
232
    pc = env->pc;
233
#elif defined(TARGET_CRIS)
234
    flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
235
    flags |= env->dslot;
236
    cs_base = 0;
237
    pc = env->pc;
238
#else
239
#error unsupported CPU
240
#endif
241
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
242
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
243
                 tb->flags != flags)) {
244
        tb = tb_find_slow(pc, cs_base, flags);
245
    }
246
    return tb;
247
}
248

    
249
/* main execution loop */
250

    
251
int cpu_exec(CPUState *env1)
252
{
253
#define DECLARE_HOST_REGS 1
254
#include "hostregs_helper.h"
255
    int ret, interrupt_request;
256
    TranslationBlock *tb;
257
    uint8_t *tc_ptr;
258
    unsigned long next_tb;
259

    
260
    if (cpu_halted(env1) == EXCP_HALTED)
261
        return EXCP_HALTED;
262

    
263
    cpu_single_env = env1;
264

    
265
    /* first we save global registers */
266
#define SAVE_HOST_REGS 1
267
#include "hostregs_helper.h"
268
    env = env1;
269

    
270
    env_to_regs();
271
#if defined(TARGET_I386)
272
    /* put eflags in CPU temporary format */
273
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
274
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
275
    CC_OP = CC_OP_EFLAGS;
276
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
277
#elif defined(TARGET_SPARC)
278
#elif defined(TARGET_M68K)
279
    env->cc_op = CC_OP_FLAGS;
280
    env->cc_dest = env->sr & 0xf;
281
    env->cc_x = (env->sr >> 4) & 1;
282
#elif defined(TARGET_ALPHA)
283
#elif defined(TARGET_ARM)
284
#elif defined(TARGET_PPC)
285
#elif defined(TARGET_MIPS)
286
#elif defined(TARGET_SH4)
287
#elif defined(TARGET_CRIS)
288
    /* XXXXX */
289
#else
290
#error unsupported target CPU
291
#endif
292
    env->exception_index = -1;
293

    
294
    /* prepare setjmp context for exception handling */
295
    for(;;) {
296
        if (setjmp(env->jmp_env) == 0) {
297
            env->current_tb = NULL;
298
            /* if an exception is pending, we execute it here */
299
            if (env->exception_index >= 0) {
300
                if (env->exception_index >= EXCP_INTERRUPT) {
301
                    /* exit request from the cpu execution loop */
302
                    ret = env->exception_index;
303
                    break;
304
                } else if (env->user_mode_only) {
305
                    /* if user mode only, we simulate a fake exception
306
                       which will be handled outside the cpu execution
307
                       loop */
308
#if defined(TARGET_I386)
309
                    do_interrupt_user(env->exception_index,
310
                                      env->exception_is_int,
311
                                      env->error_code,
312
                                      env->exception_next_eip);
313
                    /* successfully delivered */
314
                    env->old_exception = -1;
315
#endif
316
                    ret = env->exception_index;
317
                    break;
318
                } else {
319
#if defined(TARGET_I386)
320
                    /* simulate a real cpu exception. On i386, it can
321
                       trigger new exceptions, but we do not handle
322
                       double or triple faults yet. */
323
                    do_interrupt(env->exception_index,
324
                                 env->exception_is_int,
325
                                 env->error_code,
326
                                 env->exception_next_eip, 0);
327
                    /* successfully delivered */
328
                    env->old_exception = -1;
329
#elif defined(TARGET_PPC)
330
                    do_interrupt(env);
331
#elif defined(TARGET_MIPS)
332
                    do_interrupt(env);
333
#elif defined(TARGET_SPARC)
334
                    do_interrupt(env);
335
#elif defined(TARGET_ARM)
336
                    do_interrupt(env);
337
#elif defined(TARGET_SH4)
338
                    do_interrupt(env);
339
#elif defined(TARGET_ALPHA)
340
                    do_interrupt(env);
341
#elif defined(TARGET_CRIS)
342
                    do_interrupt(env);
343
#elif defined(TARGET_M68K)
344
                    do_interrupt(0);
345
#endif
346
                }
347
                env->exception_index = -1;
348
            }
349
#ifdef USE_KQEMU
350
            if (kqemu_is_ok(env) && env->interrupt_request == 0) {
351
                int ret;
352
                env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
353
                ret = kqemu_cpu_exec(env);
354
                /* put eflags in CPU temporary format */
355
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
356
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
357
                CC_OP = CC_OP_EFLAGS;
358
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
359
                if (ret == 1) {
360
                    /* exception */
361
                    longjmp(env->jmp_env, 1);
362
                } else if (ret == 2) {
363
                    /* softmmu execution needed */
364
                } else {
365
                    if (env->interrupt_request != 0) {
366
                        /* hardware interrupt will be executed just after */
367
                    } else {
368
                        /* otherwise, we restart */
369
                        longjmp(env->jmp_env, 1);
370
                    }
371
                }
372
            }
373
#endif
374

    
375
            if (kvm_enabled()) {
376
                int ret;
377
                ret = kvm_cpu_exec(env);
378
                if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
379
                    env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
380
                    env->exception_index = EXCP_INTERRUPT;
381
                    cpu_loop_exit();
382
                } else if (env->halted) {
383
                    cpu_loop_exit();
384
                } else
385
                    longjmp(env->jmp_env, 1);
386
            }
387

    
388
            next_tb = 0; /* force lookup of first TB */
389
            for(;;) {
390
                interrupt_request = env->interrupt_request;
391
                if (unlikely(interrupt_request) &&
392
                    likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
393
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395
                        env->exception_index = EXCP_DEBUG;
396
                        cpu_loop_exit();
397
                    }
398
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
400
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
401
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
402
                        env->halted = 1;
403
                        env->exception_index = EXCP_HLT;
404
                        cpu_loop_exit();
405
                    }
406
#endif
407
#if defined(TARGET_I386)
408
                    if (env->hflags2 & HF2_GIF_MASK) {
409
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
410
                            !(env->hflags & HF_SMM_MASK)) {
411
                            svm_check_intercept(SVM_EXIT_SMI);
412
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
413
                            do_smm_enter();
414
                            next_tb = 0;
415
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
416
                                   !(env->hflags2 & HF2_NMI_MASK)) {
417
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
418
                            env->hflags2 |= HF2_NMI_MASK;
419
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
420
                            next_tb = 0;
421
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
422
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
423
                                     (env->hflags2 & HF2_HIF_MASK)) ||
424
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
425
                                     (env->eflags & IF_MASK && 
426
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
427
                            int intno;
428
                            svm_check_intercept(SVM_EXIT_INTR);
429
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
430
                            intno = cpu_get_pic_interrupt(env);
431
                            if (loglevel & CPU_LOG_TB_IN_ASM) {
432
                                fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
433
                            }
434
                            do_interrupt(intno, 0, 0, 0, 1);
435
                            /* ensure that no TB jump will be modified as
436
                               the program flow was changed */
437
                            next_tb = 0;
438
#if !defined(CONFIG_USER_ONLY)
439
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
440
                                   (env->eflags & IF_MASK) && 
441
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
442
                            int intno;
443
                            /* FIXME: this should respect TPR */
444
                            svm_check_intercept(SVM_EXIT_VINTR);
445
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
446
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
447
                            if (loglevel & CPU_LOG_TB_IN_ASM)
448
                                fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
449
                            do_interrupt(intno, 0, 0, 0, 1);
450
                            next_tb = 0;
451
#endif
452
                        }
453
                    }
454
#elif defined(TARGET_PPC)
455
#if 0
456
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
457
                        cpu_ppc_reset(env);
458
                    }
459
#endif
460
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
461
                        ppc_hw_interrupt(env);
462
                        if (env->pending_interrupts == 0)
463
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
464
                        next_tb = 0;
465
                    }
466
#elif defined(TARGET_MIPS)
467
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
468
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
469
                        (env->CP0_Status & (1 << CP0St_IE)) &&
470
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
471
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
472
                        !(env->hflags & MIPS_HFLAG_DM)) {
473
                        /* Raise it */
474
                        env->exception_index = EXCP_EXT_INTERRUPT;
475
                        env->error_code = 0;
476
                        do_interrupt(env);
477
                        next_tb = 0;
478
                    }
479
#elif defined(TARGET_SPARC)
480
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
481
                        (env->psret != 0)) {
482
                        int pil = env->interrupt_index & 15;
483
                        int type = env->interrupt_index & 0xf0;
484

    
485
                        if (((type == TT_EXTINT) &&
486
                             (pil == 15 || pil > env->psrpil)) ||
487
                            type != TT_EXTINT) {
488
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
489
                            env->exception_index = env->interrupt_index;
490
                            do_interrupt(env);
491
                            env->interrupt_index = 0;
492
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
493
                            cpu_check_irqs(env);
494
#endif
495
                        next_tb = 0;
496
                        }
497
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
498
                        //do_interrupt(0, 0, 0, 0, 0);
499
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
500
                    }
501
#elif defined(TARGET_ARM)
502
                    if (interrupt_request & CPU_INTERRUPT_FIQ
503
                        && !(env->uncached_cpsr & CPSR_F)) {
504
                        env->exception_index = EXCP_FIQ;
505
                        do_interrupt(env);
506
                        next_tb = 0;
507
                    }
508
                    /* ARMv7-M interrupt return works by loading a magic value
509
                       into the PC.  On real hardware the load causes the
510
                       return to occur.  The qemu implementation performs the
511
                       jump normally, then does the exception return when the
512
                       CPU tries to execute code at the magic address.
513
                       This will cause the magic PC value to be pushed to
514
                       the stack if an interrupt occured at the wrong time.
515
                       We avoid this by disabling interrupts when
516
                       pc contains a magic address.  */
517
                    if (interrupt_request & CPU_INTERRUPT_HARD
518
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
519
                            || !(env->uncached_cpsr & CPSR_I))) {
520
                        env->exception_index = EXCP_IRQ;
521
                        do_interrupt(env);
522
                        next_tb = 0;
523
                    }
524
#elif defined(TARGET_SH4)
525
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
526
                        do_interrupt(env);
527
                        next_tb = 0;
528
                    }
529
#elif defined(TARGET_ALPHA)
530
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
531
                        do_interrupt(env);
532
                        next_tb = 0;
533
                    }
534
#elif defined(TARGET_CRIS)
535
                    if (interrupt_request & CPU_INTERRUPT_HARD
536
                        && (env->pregs[PR_CCS] & I_FLAG)) {
537
                        env->exception_index = EXCP_IRQ;
538
                        do_interrupt(env);
539
                        next_tb = 0;
540
                    }
541
                    if (interrupt_request & CPU_INTERRUPT_NMI
542
                        && (env->pregs[PR_CCS] & M_FLAG)) {
543
                        env->exception_index = EXCP_NMI;
544
                        do_interrupt(env);
545
                        next_tb = 0;
546
                    }
547
#elif defined(TARGET_M68K)
548
                    if (interrupt_request & CPU_INTERRUPT_HARD
549
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
550
                            < env->pending_level) {
551
                        /* Real hardware gets the interrupt vector via an
552
                           IACK cycle at this point.  Current emulated
553
                           hardware doesn't rely on this, so we
554
                           provide/save the vector when the interrupt is
555
                           first signalled.  */
556
                        env->exception_index = env->pending_vector;
557
                        do_interrupt(1);
558
                        next_tb = 0;
559
                    }
560
#endif
561
                   /* Don't use the cached interupt_request value,
562
                      do_interrupt may have updated the EXITTB flag. */
563
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
564
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
565
                        /* ensure that no TB jump will be modified as
566
                           the program flow was changed */
567
                        next_tb = 0;
568
                    }
569
                    if (interrupt_request & CPU_INTERRUPT_EXIT) {
570
                        env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
571
                        env->exception_index = EXCP_INTERRUPT;
572
                        cpu_loop_exit();
573
                    }
574
                }
575
#ifdef DEBUG_EXEC
576
                if ((loglevel & CPU_LOG_TB_CPU)) {
577
                    /* restore flags in standard format */
578
                    regs_to_env();
579
#if defined(TARGET_I386)
580
                    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
581
                    cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
582
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
583
#elif defined(TARGET_ARM)
584
                    cpu_dump_state(env, logfile, fprintf, 0);
585
#elif defined(TARGET_SPARC)
586
                    cpu_dump_state(env, logfile, fprintf, 0);
587
#elif defined(TARGET_PPC)
588
                    cpu_dump_state(env, logfile, fprintf, 0);
589
#elif defined(TARGET_M68K)
590
                    cpu_m68k_flush_flags(env, env->cc_op);
591
                    env->cc_op = CC_OP_FLAGS;
592
                    env->sr = (env->sr & 0xffe0)
593
                              | env->cc_dest | (env->cc_x << 4);
594
                    cpu_dump_state(env, logfile, fprintf, 0);
595
#elif defined(TARGET_MIPS)
596
                    cpu_dump_state(env, logfile, fprintf, 0);
597
#elif defined(TARGET_SH4)
598
                    cpu_dump_state(env, logfile, fprintf, 0);
599
#elif defined(TARGET_ALPHA)
600
                    cpu_dump_state(env, logfile, fprintf, 0);
601
#elif defined(TARGET_CRIS)
602
                    cpu_dump_state(env, logfile, fprintf, 0);
603
#else
604
#error unsupported target CPU
605
#endif
606
                }
607
#endif
608
                spin_lock(&tb_lock);
609
                tb = tb_find_fast();
610
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
611
                   doing it in tb_find_slow */
612
                if (tb_invalidated_flag) {
613
                    /* as some TB could have been invalidated because
614
                       of memory exceptions while generating the code, we
615
                       must recompute the hash index here */
616
                    next_tb = 0;
617
                    tb_invalidated_flag = 0;
618
                }
619
#ifdef DEBUG_EXEC
620
                if ((loglevel & CPU_LOG_EXEC)) {
621
                    fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
622
                            (long)tb->tc_ptr, tb->pc,
623
                            lookup_symbol(tb->pc));
624
                }
625
#endif
626
                /* see if we can patch the calling TB. When the TB
627
                   spans two pages, we cannot safely do a direct
628
                   jump. */
629
                {
630
                    if (next_tb != 0 &&
631
#ifdef USE_KQEMU
632
                        (env->kqemu_enabled != 2) &&
633
#endif
634
                        tb->page_addr[1] == -1) {
635
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
636
                }
637
                }
638
                spin_unlock(&tb_lock);
639
                env->current_tb = tb;
640

    
641
                /* cpu_interrupt might be called while translating the
642
                   TB, but before it is linked into a potentially
643
                   infinite loop and becomes env->current_tb. Avoid
644
                   starting execution if there is a pending interrupt. */
645
                if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
646
                    env->current_tb = NULL;
647

    
648
                while (env->current_tb) {
649
                    tc_ptr = tb->tc_ptr;
650
                /* execute the generated code */
651
#if defined(__sparc__) && !defined(HOST_SOLARIS)
652
#undef env
653
                    env = cpu_single_env;
654
#define env cpu_single_env
655
#endif
656
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
657
                    env->current_tb = NULL;
658
                    if ((next_tb & 3) == 2) {
659
                        /* Instruction counter expired.  */
660
                        int insns_left;
661
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
662
                        /* Restore PC.  */
663
                        CPU_PC_FROM_TB(env, tb);
664
                        insns_left = env->icount_decr.u32;
665
                        if (env->icount_extra && insns_left >= 0) {
666
                            /* Refill decrementer and continue execution.  */
667
                            env->icount_extra += insns_left;
668
                            if (env->icount_extra > 0xffff) {
669
                                insns_left = 0xffff;
670
                            } else {
671
                                insns_left = env->icount_extra;
672
                            }
673
                            env->icount_extra -= insns_left;
674
                            env->icount_decr.u16.low = insns_left;
675
                        } else {
676
                            if (insns_left > 0) {
677
                                /* Execute remaining instructions.  */
678
                                cpu_exec_nocache(insns_left, tb);
679
                            }
680
                            env->exception_index = EXCP_INTERRUPT;
681
                            next_tb = 0;
682
                            cpu_loop_exit();
683
                        }
684
                    }
685
                }
686
                /* reset soft MMU for next block (it can currently
687
                   only be set by a memory fault) */
688
#if defined(USE_KQEMU)
689
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
690
                if (kqemu_is_ok(env) &&
691
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
692
                    cpu_loop_exit();
693
                }
694
#endif
695
            } /* for(;;) */
696
        } else {
697
            env_to_regs();
698
        }
699
    } /* for(;;) */
700

    
701

    
702
#if defined(TARGET_I386)
703
    /* restore flags in standard format */
704
    env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
705
#elif defined(TARGET_ARM)
706
    /* XXX: Save/restore host fpu exception state?.  */
707
#elif defined(TARGET_SPARC)
708
#elif defined(TARGET_PPC)
709
#elif defined(TARGET_M68K)
710
    cpu_m68k_flush_flags(env, env->cc_op);
711
    env->cc_op = CC_OP_FLAGS;
712
    env->sr = (env->sr & 0xffe0)
713
              | env->cc_dest | (env->cc_x << 4);
714
#elif defined(TARGET_MIPS)
715
#elif defined(TARGET_SH4)
716
#elif defined(TARGET_ALPHA)
717
#elif defined(TARGET_CRIS)
718
    /* XXXXX */
719
#else
720
#error unsupported target CPU
721
#endif
722

    
723
    /* restore global registers */
724
#include "hostregs_helper.h"
725

    
726
    /* fail safe : never use cpu_single_env outside cpu_exec() */
727
    cpu_single_env = NULL;
728
    return ret;
729
}
730

    
731
/* must only be called from the generated code as an exception can be
732
   generated */
733
void tb_invalidate_page_range(target_ulong start, target_ulong end)
734
{
735
    /* XXX: cannot enable it yet because it yields to MMU exception
736
       where NIP != read address on PowerPC */
737
#if 0
738
    target_ulong phys_addr;
739
    phys_addr = get_phys_addr_code(env, start);
740
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
741
#endif
742
}
743

    
744
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
745

    
746
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
747
{
748
    CPUX86State *saved_env;
749

    
750
    saved_env = env;
751
    env = s;
752
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
753
        selector &= 0xffff;
754
        cpu_x86_load_seg_cache(env, seg_reg, selector,
755
                               (selector << 4), 0xffff, 0);
756
    } else {
757
        helper_load_seg(seg_reg, selector);
758
    }
759
    env = saved_env;
760
}
761

    
762
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
763
{
764
    CPUX86State *saved_env;
765

    
766
    saved_env = env;
767
    env = s;
768

    
769
    helper_fsave(ptr, data32);
770

    
771
    env = saved_env;
772
}
773

    
774
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
775
{
776
    CPUX86State *saved_env;
777

    
778
    saved_env = env;
779
    env = s;
780

    
781
    helper_frstor(ptr, data32);
782

    
783
    env = saved_env;
784
}
785

    
786
#endif /* TARGET_I386 */
787

    
788
#if !defined(CONFIG_SOFTMMU)
789

    
790
#if defined(TARGET_I386)
791

    
792
/* 'pc' is the host PC at which the exception was raised. 'address' is
793
   the effective address of the memory exception. 'is_write' is 1 if a
794
   write caused the exception and otherwise 0'. 'old_set' is the
795
   signal set which should be restored */
796
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
797
                                    int is_write, sigset_t *old_set,
798
                                    void *puc)
799
{
800
    TranslationBlock *tb;
801
    int ret;
802

    
803
    if (cpu_single_env)
804
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
805
#if defined(DEBUG_SIGNAL)
806
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
807
                pc, address, is_write, *(unsigned long *)old_set);
808
#endif
809
    /* XXX: locking issue */
810
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
811
        return 1;
812
    }
813

    
814
    /* see if it is an MMU fault */
815
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
816
    if (ret < 0)
817
        return 0; /* not an MMU fault */
818
    if (ret == 0)
819
        return 1; /* the MMU fault was handled without causing real CPU fault */
820
    /* now we have a real cpu fault */
821
    tb = tb_find_pc(pc);
822
    if (tb) {
823
        /* the PC is inside the translated code. It means that we have
824
           a virtual CPU fault */
825
        cpu_restore_state(tb, env, pc, puc);
826
    }
827
    if (ret == 1) {
828
#if 0
829
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
830
               env->eip, env->cr[2], env->error_code);
831
#endif
832
        /* we restore the process signal mask as the sigreturn should
833
           do it (XXX: use sigsetjmp) */
834
        sigprocmask(SIG_SETMASK, old_set, NULL);
835
        raise_exception_err(env->exception_index, env->error_code);
836
    } else {
837
        /* activate soft MMU for this block */
838
        env->hflags |= HF_SOFTMMU_MASK;
839
        cpu_resume_from_signal(env, puc);
840
    }
841
    /* never comes here */
842
    return 1;
843
}
844

    
845
#elif defined(TARGET_ARM)
846
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
847
                                    int is_write, sigset_t *old_set,
848
                                    void *puc)
849
{
850
    TranslationBlock *tb;
851
    int ret;
852

    
853
    if (cpu_single_env)
854
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
855
#if defined(DEBUG_SIGNAL)
856
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
857
           pc, address, is_write, *(unsigned long *)old_set);
858
#endif
859
    /* XXX: locking issue */
860
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
861
        return 1;
862
    }
863
    /* see if it is an MMU fault */
864
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
865
    if (ret < 0)
866
        return 0; /* not an MMU fault */
867
    if (ret == 0)
868
        return 1; /* the MMU fault was handled without causing real CPU fault */
869
    /* now we have a real cpu fault */
870
    tb = tb_find_pc(pc);
871
    if (tb) {
872
        /* the PC is inside the translated code. It means that we have
873
           a virtual CPU fault */
874
        cpu_restore_state(tb, env, pc, puc);
875
    }
876
    /* we restore the process signal mask as the sigreturn should
877
       do it (XXX: use sigsetjmp) */
878
    sigprocmask(SIG_SETMASK, old_set, NULL);
879
    cpu_loop_exit();
880
    /* never comes here */
881
    return 1;
882
}
883
#elif defined(TARGET_SPARC)
884
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
885
                                    int is_write, sigset_t *old_set,
886
                                    void *puc)
887
{
888
    TranslationBlock *tb;
889
    int ret;
890

    
891
    if (cpu_single_env)
892
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
893
#if defined(DEBUG_SIGNAL)
894
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
895
           pc, address, is_write, *(unsigned long *)old_set);
896
#endif
897
    /* XXX: locking issue */
898
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
899
        return 1;
900
    }
901
    /* see if it is an MMU fault */
902
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
903
    if (ret < 0)
904
        return 0; /* not an MMU fault */
905
    if (ret == 0)
906
        return 1; /* the MMU fault was handled without causing real CPU fault */
907
    /* now we have a real cpu fault */
908
    tb = tb_find_pc(pc);
909
    if (tb) {
910
        /* the PC is inside the translated code. It means that we have
911
           a virtual CPU fault */
912
        cpu_restore_state(tb, env, pc, puc);
913
    }
914
    /* we restore the process signal mask as the sigreturn should
915
       do it (XXX: use sigsetjmp) */
916
    sigprocmask(SIG_SETMASK, old_set, NULL);
917
    cpu_loop_exit();
918
    /* never comes here */
919
    return 1;
920
}
921
#elif defined (TARGET_PPC)
922
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
923
                                    int is_write, sigset_t *old_set,
924
                                    void *puc)
925
{
926
    TranslationBlock *tb;
927
    int ret;
928

    
929
    if (cpu_single_env)
930
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
931
#if defined(DEBUG_SIGNAL)
932
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
933
           pc, address, is_write, *(unsigned long *)old_set);
934
#endif
935
    /* XXX: locking issue */
936
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
937
        return 1;
938
    }
939

    
940
    /* see if it is an MMU fault */
941
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
942
    if (ret < 0)
943
        return 0; /* not an MMU fault */
944
    if (ret == 0)
945
        return 1; /* the MMU fault was handled without causing real CPU fault */
946

    
947
    /* now we have a real cpu fault */
948
    tb = tb_find_pc(pc);
949
    if (tb) {
950
        /* the PC is inside the translated code. It means that we have
951
           a virtual CPU fault */
952
        cpu_restore_state(tb, env, pc, puc);
953
    }
954
    if (ret == 1) {
955
#if 0
956
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
957
               env->nip, env->error_code, tb);
958
#endif
959
    /* we restore the process signal mask as the sigreturn should
960
       do it (XXX: use sigsetjmp) */
961
        sigprocmask(SIG_SETMASK, old_set, NULL);
962
        do_raise_exception_err(env->exception_index, env->error_code);
963
    } else {
964
        /* activate soft MMU for this block */
965
        cpu_resume_from_signal(env, puc);
966
    }
967
    /* never comes here */
968
    return 1;
969
}
970

    
971
#elif defined(TARGET_M68K)
972
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
973
                                    int is_write, sigset_t *old_set,
974
                                    void *puc)
975
{
976
    TranslationBlock *tb;
977
    int ret;
978

    
979
    if (cpu_single_env)
980
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
981
#if defined(DEBUG_SIGNAL)
982
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
983
           pc, address, is_write, *(unsigned long *)old_set);
984
#endif
985
    /* XXX: locking issue */
986
    if (is_write && page_unprotect(address, pc, puc)) {
987
        return 1;
988
    }
989
    /* see if it is an MMU fault */
990
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
991
    if (ret < 0)
992
        return 0; /* not an MMU fault */
993
    if (ret == 0)
994
        return 1; /* the MMU fault was handled without causing real CPU fault */
995
    /* now we have a real cpu fault */
996
    tb = tb_find_pc(pc);
997
    if (tb) {
998
        /* the PC is inside the translated code. It means that we have
999
           a virtual CPU fault */
1000
        cpu_restore_state(tb, env, pc, puc);
1001
    }
1002
    /* we restore the process signal mask as the sigreturn should
1003
       do it (XXX: use sigsetjmp) */
1004
    sigprocmask(SIG_SETMASK, old_set, NULL);
1005
    cpu_loop_exit();
1006
    /* never comes here */
1007
    return 1;
1008
}
1009

    
1010
#elif defined (TARGET_MIPS)
1011
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1012
                                    int is_write, sigset_t *old_set,
1013
                                    void *puc)
1014
{
1015
    TranslationBlock *tb;
1016
    int ret;
1017

    
1018
    if (cpu_single_env)
1019
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1020
#if defined(DEBUG_SIGNAL)
1021
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1022
           pc, address, is_write, *(unsigned long *)old_set);
1023
#endif
1024
    /* XXX: locking issue */
1025
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1026
        return 1;
1027
    }
1028

    
1029
    /* see if it is an MMU fault */
1030
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1031
    if (ret < 0)
1032
        return 0; /* not an MMU fault */
1033
    if (ret == 0)
1034
        return 1; /* the MMU fault was handled without causing real CPU fault */
1035

    
1036
    /* now we have a real cpu fault */
1037
    tb = tb_find_pc(pc);
1038
    if (tb) {
1039
        /* the PC is inside the translated code. It means that we have
1040
           a virtual CPU fault */
1041
        cpu_restore_state(tb, env, pc, puc);
1042
    }
1043
    if (ret == 1) {
1044
#if 0
1045
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1046
               env->PC, env->error_code, tb);
1047
#endif
1048
    /* we restore the process signal mask as the sigreturn should
1049
       do it (XXX: use sigsetjmp) */
1050
        sigprocmask(SIG_SETMASK, old_set, NULL);
1051
        do_raise_exception_err(env->exception_index, env->error_code);
1052
    } else {
1053
        /* activate soft MMU for this block */
1054
        cpu_resume_from_signal(env, puc);
1055
    }
1056
    /* never comes here */
1057
    return 1;
1058
}
1059

    
1060
#elif defined (TARGET_SH4)
1061
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1062
                                    int is_write, sigset_t *old_set,
1063
                                    void *puc)
1064
{
1065
    TranslationBlock *tb;
1066
    int ret;
1067

    
1068
    if (cpu_single_env)
1069
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1070
#if defined(DEBUG_SIGNAL)
1071
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1072
           pc, address, is_write, *(unsigned long *)old_set);
1073
#endif
1074
    /* XXX: locking issue */
1075
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1076
        return 1;
1077
    }
1078

    
1079
    /* see if it is an MMU fault */
1080
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1081
    if (ret < 0)
1082
        return 0; /* not an MMU fault */
1083
    if (ret == 0)
1084
        return 1; /* the MMU fault was handled without causing real CPU fault */
1085

    
1086
    /* now we have a real cpu fault */
1087
    tb = tb_find_pc(pc);
1088
    if (tb) {
1089
        /* the PC is inside the translated code. It means that we have
1090
           a virtual CPU fault */
1091
        cpu_restore_state(tb, env, pc, puc);
1092
    }
1093
#if 0
1094
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1095
               env->nip, env->error_code, tb);
1096
#endif
1097
    /* we restore the process signal mask as the sigreturn should
1098
       do it (XXX: use sigsetjmp) */
1099
    sigprocmask(SIG_SETMASK, old_set, NULL);
1100
    cpu_loop_exit();
1101
    /* never comes here */
1102
    return 1;
1103
}
1104

    
1105
#elif defined (TARGET_ALPHA)
1106
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1107
                                    int is_write, sigset_t *old_set,
1108
                                    void *puc)
1109
{
1110
    TranslationBlock *tb;
1111
    int ret;
1112

    
1113
    if (cpu_single_env)
1114
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1115
#if defined(DEBUG_SIGNAL)
1116
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117
           pc, address, is_write, *(unsigned long *)old_set);
1118
#endif
1119
    /* XXX: locking issue */
1120
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1121
        return 1;
1122
    }
1123

    
1124
    /* see if it is an MMU fault */
1125
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1126
    if (ret < 0)
1127
        return 0; /* not an MMU fault */
1128
    if (ret == 0)
1129
        return 1; /* the MMU fault was handled without causing real CPU fault */
1130

    
1131
    /* now we have a real cpu fault */
1132
    tb = tb_find_pc(pc);
1133
    if (tb) {
1134
        /* the PC is inside the translated code. It means that we have
1135
           a virtual CPU fault */
1136
        cpu_restore_state(tb, env, pc, puc);
1137
    }
1138
#if 0
1139
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1140
               env->nip, env->error_code, tb);
1141
#endif
1142
    /* we restore the process signal mask as the sigreturn should
1143
       do it (XXX: use sigsetjmp) */
1144
    sigprocmask(SIG_SETMASK, old_set, NULL);
1145
    cpu_loop_exit();
1146
    /* never comes here */
1147
    return 1;
1148
}
1149
#elif defined (TARGET_CRIS)
1150
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1151
                                    int is_write, sigset_t *old_set,
1152
                                    void *puc)
1153
{
1154
    TranslationBlock *tb;
1155
    int ret;
1156

    
1157
    if (cpu_single_env)
1158
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1159
#if defined(DEBUG_SIGNAL)
1160
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1161
           pc, address, is_write, *(unsigned long *)old_set);
1162
#endif
1163
    /* XXX: locking issue */
1164
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1165
        return 1;
1166
    }
1167

    
1168
    /* see if it is an MMU fault */
1169
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1170
    if (ret < 0)
1171
        return 0; /* not an MMU fault */
1172
    if (ret == 0)
1173
        return 1; /* the MMU fault was handled without causing real CPU fault */
1174

    
1175
    /* now we have a real cpu fault */
1176
    tb = tb_find_pc(pc);
1177
    if (tb) {
1178
        /* the PC is inside the translated code. It means that we have
1179
           a virtual CPU fault */
1180
        cpu_restore_state(tb, env, pc, puc);
1181
    }
1182
    /* we restore the process signal mask as the sigreturn should
1183
       do it (XXX: use sigsetjmp) */
1184
    sigprocmask(SIG_SETMASK, old_set, NULL);
1185
    cpu_loop_exit();
1186
    /* never comes here */
1187
    return 1;
1188
}
1189

    
1190
#else
1191
#error unsupported target CPU
1192
#endif
1193

    
1194
#if defined(__i386__)
1195

    
1196
#if defined(__APPLE__)
1197
# include <sys/ucontext.h>
1198

    
1199
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1200
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1201
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1202
#else
1203
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1204
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1205
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1206
#endif
1207

    
1208
int cpu_signal_handler(int host_signum, void *pinfo,
1209
                       void *puc)
1210
{
1211
    siginfo_t *info = pinfo;
1212
    struct ucontext *uc = puc;
1213
    unsigned long pc;
1214
    int trapno;
1215

    
1216
#ifndef REG_EIP
1217
/* for glibc 2.1 */
1218
#define REG_EIP    EIP
1219
#define REG_ERR    ERR
1220
#define REG_TRAPNO TRAPNO
1221
#endif
1222
    pc = EIP_sig(uc);
1223
    trapno = TRAP_sig(uc);
1224
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1225
                             trapno == 0xe ?
1226
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1227
                             &uc->uc_sigmask, puc);
1228
}
1229

    
1230
#elif defined(__x86_64__)
1231

    
1232
int cpu_signal_handler(int host_signum, void *pinfo,
1233
                       void *puc)
1234
{
1235
    siginfo_t *info = pinfo;
1236
    struct ucontext *uc = puc;
1237
    unsigned long pc;
1238

    
1239
    pc = uc->uc_mcontext.gregs[REG_RIP];
1240
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1241
                             uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1242
                             (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1243
                             &uc->uc_sigmask, puc);
1244
}
1245

    
1246
#elif defined(__powerpc__)
1247

    
1248
/***********************************************************************
1249
 * signal context platform-specific definitions
1250
 * From Wine
1251
 */
1252
#ifdef linux
1253
/* All Registers access - only for local access */
1254
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1255
/* Gpr Registers access  */
1256
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1257
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1258
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1259
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1260
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1261
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1262
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1263
/* Float Registers access  */
1264
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1265
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1266
/* Exception Registers access */
1267
# define DAR_sig(context)                        REG_sig(dar, context)
1268
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1269
# define TRAP_sig(context)                        REG_sig(trap, context)
1270
#endif /* linux */
1271

    
1272
#ifdef __APPLE__
1273
# include <sys/ucontext.h>
1274
typedef struct ucontext SIGCONTEXT;
1275
/* All Registers access - only for local access */
1276
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1277
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1278
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1279
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1280
/* Gpr Registers access */
1281
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1282
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1283
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1284
# define CTR_sig(context)                        REG_sig(ctr, context)
1285
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1286
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1287
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1288
/* Float Registers access */
1289
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1290
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1291
/* Exception Registers access */
1292
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1293
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1294
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1295
#endif /* __APPLE__ */
1296

    
1297
int cpu_signal_handler(int host_signum, void *pinfo,
1298
                       void *puc)
1299
{
1300
    siginfo_t *info = pinfo;
1301
    struct ucontext *uc = puc;
1302
    unsigned long pc;
1303
    int is_write;
1304

    
1305
    pc = IAR_sig(uc);
1306
    is_write = 0;
1307
#if 0
1308
    /* ppc 4xx case */
1309
    if (DSISR_sig(uc) & 0x00800000)
1310
        is_write = 1;
1311
#else
1312
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1313
        is_write = 1;
1314
#endif
1315
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1316
                             is_write, &uc->uc_sigmask, puc);
1317
}
1318

    
1319
#elif defined(__alpha__)
1320

    
1321
int cpu_signal_handler(int host_signum, void *pinfo,
1322
                           void *puc)
1323
{
1324
    siginfo_t *info = pinfo;
1325
    struct ucontext *uc = puc;
1326
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1327
    uint32_t insn = *pc;
1328
    int is_write = 0;
1329

    
1330
    /* XXX: need kernel patch to get write flag faster */
1331
    switch (insn >> 26) {
1332
    case 0x0d: // stw
1333
    case 0x0e: // stb
1334
    case 0x0f: // stq_u
1335
    case 0x24: // stf
1336
    case 0x25: // stg
1337
    case 0x26: // sts
1338
    case 0x27: // stt
1339
    case 0x2c: // stl
1340
    case 0x2d: // stq
1341
    case 0x2e: // stl_c
1342
    case 0x2f: // stq_c
1343
        is_write = 1;
1344
    }
1345

    
1346
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1347
                             is_write, &uc->uc_sigmask, puc);
1348
}
1349
#elif defined(__sparc__)
1350

    
1351
int cpu_signal_handler(int host_signum, void *pinfo,
1352
                       void *puc)
1353
{
1354
    siginfo_t *info = pinfo;
1355
    int is_write;
1356
    uint32_t insn;
1357
#if !defined(__arch64__) || defined(HOST_SOLARIS)
1358
    uint32_t *regs = (uint32_t *)(info + 1);
1359
    void *sigmask = (regs + 20);
1360
    /* XXX: is there a standard glibc define ? */
1361
    unsigned long pc = regs[1];
1362
#else
1363
#ifdef __linux__
1364
    struct sigcontext *sc = puc;
1365
    unsigned long pc = sc->sigc_regs.tpc;
1366
    void *sigmask = (void *)sc->sigc_mask;
1367
#elif defined(__OpenBSD__)
1368
    struct sigcontext *uc = puc;
1369
    unsigned long pc = uc->sc_pc;
1370
    void *sigmask = (void *)(long)uc->sc_mask;
1371
#endif
1372
#endif
1373

    
1374
    /* XXX: need kernel patch to get write flag faster */
1375
    is_write = 0;
1376
    insn = *(uint32_t *)pc;
1377
    if ((insn >> 30) == 3) {
1378
      switch((insn >> 19) & 0x3f) {
1379
      case 0x05: // stb
1380
      case 0x06: // sth
1381
      case 0x04: // st
1382
      case 0x07: // std
1383
      case 0x24: // stf
1384
      case 0x27: // stdf
1385
      case 0x25: // stfsr
1386
        is_write = 1;
1387
        break;
1388
      }
1389
    }
1390
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1391
                             is_write, sigmask, NULL);
1392
}
1393

    
1394
#elif defined(__arm__)
1395

    
1396
int cpu_signal_handler(int host_signum, void *pinfo,
1397
                       void *puc)
1398
{
1399
    siginfo_t *info = pinfo;
1400
    struct ucontext *uc = puc;
1401
    unsigned long pc;
1402
    int is_write;
1403

    
1404
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1405
    pc = uc->uc_mcontext.gregs[R15];
1406
#else
1407
    pc = uc->uc_mcontext.arm_pc;
1408
#endif
1409
    /* XXX: compute is_write */
1410
    is_write = 0;
1411
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1412
                             is_write,
1413
                             &uc->uc_sigmask, puc);
1414
}
1415

    
1416
#elif defined(__mc68000)
1417

    
1418
int cpu_signal_handler(int host_signum, void *pinfo,
1419
                       void *puc)
1420
{
1421
    siginfo_t *info = pinfo;
1422
    struct ucontext *uc = puc;
1423
    unsigned long pc;
1424
    int is_write;
1425

    
1426
    pc = uc->uc_mcontext.gregs[16];
1427
    /* XXX: compute is_write */
1428
    is_write = 0;
1429
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1430
                             is_write,
1431
                             &uc->uc_sigmask, puc);
1432
}
1433

    
1434
#elif defined(__ia64)
1435

    
1436
#ifndef __ISR_VALID
1437
  /* This ought to be in <bits/siginfo.h>... */
1438
# define __ISR_VALID        1
1439
#endif
1440

    
1441
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1442
{
1443
    siginfo_t *info = pinfo;
1444
    struct ucontext *uc = puc;
1445
    unsigned long ip;
1446
    int is_write = 0;
1447

    
1448
    ip = uc->uc_mcontext.sc_ip;
1449
    switch (host_signum) {
1450
      case SIGILL:
1451
      case SIGFPE:
1452
      case SIGSEGV:
1453
      case SIGBUS:
1454
      case SIGTRAP:
1455
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1456
              /* ISR.W (write-access) is bit 33:  */
1457
              is_write = (info->si_isr >> 33) & 1;
1458
          break;
1459

    
1460
      default:
1461
          break;
1462
    }
1463
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1464
                             is_write,
1465
                             &uc->uc_sigmask, puc);
1466
}
1467

    
1468
#elif defined(__s390__)
1469

    
1470
int cpu_signal_handler(int host_signum, void *pinfo,
1471
                       void *puc)
1472
{
1473
    siginfo_t *info = pinfo;
1474
    struct ucontext *uc = puc;
1475
    unsigned long pc;
1476
    int is_write;
1477

    
1478
    pc = uc->uc_mcontext.psw.addr;
1479
    /* XXX: compute is_write */
1480
    is_write = 0;
1481
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482
                             is_write, &uc->uc_sigmask, puc);
1483
}
1484

    
1485
#elif defined(__mips__)
1486

    
1487
int cpu_signal_handler(int host_signum, void *pinfo,
1488
                       void *puc)
1489
{
1490
    siginfo_t *info = pinfo;
1491
    struct ucontext *uc = puc;
1492
    greg_t pc = uc->uc_mcontext.pc;
1493
    int is_write;
1494

    
1495
    /* XXX: compute is_write */
1496
    is_write = 0;
1497
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1498
                             is_write, &uc->uc_sigmask, puc);
1499
}
1500

    
1501
#elif defined(__hppa__)
1502

    
1503
int cpu_signal_handler(int host_signum, void *pinfo,
1504
                       void *puc)
1505
{
1506
    struct siginfo *info = pinfo;
1507
    struct ucontext *uc = puc;
1508
    unsigned long pc;
1509
    int is_write;
1510

    
1511
    pc = uc->uc_mcontext.sc_iaoq[0];
1512
    /* FIXME: compute is_write */
1513
    is_write = 0;
1514
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1515
                             is_write,
1516
                             &uc->uc_sigmask, puc);
1517
}
1518

    
1519
#else
1520

    
1521
#error host CPU specific signal handler needed
1522

    
1523
#endif
1524

    
1525
#endif /* !defined(CONFIG_SOFTMMU) */