Statistics
| Branch: | Revision:

root / cpu-exec.c @ dfe5fff3

History | View | Annotate | Download (55 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24

    
25
#if !defined(CONFIG_SOFTMMU)
26
#undef EAX
27
#undef ECX
28
#undef EDX
29
#undef EBX
30
#undef ESP
31
#undef EBP
32
#undef ESI
33
#undef EDI
34
#undef EIP
35
#include <signal.h>
36
#ifdef __linux__
37
#include <sys/ucontext.h>
38
#endif
39
#endif
40

    
41
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42
// Work around ugly bugs in glibc that mangle global register contents
43
#undef env
44
#define env cpu_single_env
45
#endif
46

    
47
int tb_invalidated_flag;
48

    
49
//#define DEBUG_EXEC
50
//#define DEBUG_SIGNAL
51

    
52
int qemu_cpu_has_work(CPUState *env)
53
{
54
    return cpu_has_work(env);
55
}
56

    
57
void cpu_loop_exit(void)
58
{
59
    /* NOTE: the register at this point must be saved by hand because
60
       longjmp restore them */
61
    regs_to_env();
62
    longjmp(env->jmp_env, 1);
63
}
64

    
65
/* exit the current TB from a signal handler. The host registers are
66
   restored in a state compatible with the CPU emulator
67
 */
68
void cpu_resume_from_signal(CPUState *env1, void *puc)
69
{
70
#if !defined(CONFIG_SOFTMMU)
71
#ifdef __linux__
72
    struct ucontext *uc = puc;
73
#elif defined(__OpenBSD__)
74
    struct sigcontext *uc = puc;
75
#endif
76
#endif
77

    
78
    env = env1;
79

    
80
    /* XXX: restore cpu registers saved in host registers */
81

    
82
#if !defined(CONFIG_SOFTMMU)
83
    if (puc) {
84
        /* XXX: use siglongjmp ? */
85
#ifdef __linux__
86
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87
#elif defined(__OpenBSD__)
88
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89
#endif
90
    }
91
#endif
92
    env->exception_index = -1;
93
    longjmp(env->jmp_env, 1);
94
}
95

    
96
/* Execute the code without caching the generated code. An interpreter
97
   could be used if available. */
98
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99
{
100
    unsigned long next_tb;
101
    TranslationBlock *tb;
102

    
103
    /* Should never happen.
104
       We only end up here when an existing TB is too long.  */
105
    if (max_cycles > CF_COUNT_MASK)
106
        max_cycles = CF_COUNT_MASK;
107

    
108
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109
                     max_cycles);
110
    env->current_tb = tb;
111
    /* execute the generated code */
112
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113

    
114
    if ((next_tb & 3) == 2) {
115
        /* Restore PC.  This may happen if async event occurs before
116
           the TB starts executing.  */
117
        cpu_pc_from_tb(env, tb);
118
    }
119
    tb_phys_invalidate(tb, -1);
120
    tb_free(tb);
121
}
122

    
123
static TranslationBlock *tb_find_slow(target_ulong pc,
124
                                      target_ulong cs_base,
125
                                      uint64_t flags)
126
{
127
    TranslationBlock *tb, **ptb1;
128
    unsigned int h;
129
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130

    
131
    tb_invalidated_flag = 0;
132

    
133
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
134

    
135
    /* find translated block using physical mappings */
136
    phys_pc = get_phys_addr_code(env, pc);
137
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
138
    phys_page2 = -1;
139
    h = tb_phys_hash_func(phys_pc);
140
    ptb1 = &tb_phys_hash[h];
141
    for(;;) {
142
        tb = *ptb1;
143
        if (!tb)
144
            goto not_found;
145
        if (tb->pc == pc &&
146
            tb->page_addr[0] == phys_page1 &&
147
            tb->cs_base == cs_base &&
148
            tb->flags == flags) {
149
            /* check next page if needed */
150
            if (tb->page_addr[1] != -1) {
151
                virt_page2 = (pc & TARGET_PAGE_MASK) +
152
                    TARGET_PAGE_SIZE;
153
                phys_page2 = get_phys_addr_code(env, virt_page2);
154
                if (tb->page_addr[1] == phys_page2)
155
                    goto found;
156
            } else {
157
                goto found;
158
            }
159
        }
160
        ptb1 = &tb->phys_hash_next;
161
    }
162
 not_found:
163
   /* if no translated code available, then translate it now */
164
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
165

    
166
 found:
167
    /* we add the TB in the virtual pc hash table */
168
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169
    return tb;
170
}
171

    
172
static inline TranslationBlock *tb_find_fast(void)
173
{
174
    TranslationBlock *tb;
175
    target_ulong cs_base, pc;
176
    int flags;
177

    
178
    /* we record a subset of the CPU state. It will
179
       always be the same before a given translated block
180
       is executed. */
181
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184
                 tb->flags != flags)) {
185
        tb = tb_find_slow(pc, cs_base, flags);
186
    }
187
    return tb;
188
}
189

    
190
static CPUDebugExcpHandler *debug_excp_handler;
191

    
192
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193
{
194
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
195

    
196
    debug_excp_handler = handler;
197
    return old_handler;
198
}
199

    
200
static void cpu_handle_debug_exception(CPUState *env)
201
{
202
    CPUWatchpoint *wp;
203

    
204
    if (!env->watchpoint_hit)
205
        TAILQ_FOREACH(wp, &env->watchpoints, entry)
206
            wp->flags &= ~BP_WATCHPOINT_HIT;
207

    
208
    if (debug_excp_handler)
209
        debug_excp_handler(env);
210
}
211

    
212
/* main execution loop */
213

    
214
int cpu_exec(CPUState *env1)
215
{
216
#define DECLARE_HOST_REGS 1
217
#include "hostregs_helper.h"
218
    int ret, interrupt_request;
219
    TranslationBlock *tb;
220
    uint8_t *tc_ptr;
221
    unsigned long next_tb;
222

    
223
    if (cpu_halted(env1) == EXCP_HALTED)
224
        return EXCP_HALTED;
225

    
226
    cpu_single_env = env1;
227

    
228
    /* first we save global registers */
229
#define SAVE_HOST_REGS 1
230
#include "hostregs_helper.h"
231
    env = env1;
232

    
233
    env_to_regs();
234
#if defined(TARGET_I386)
235
    /* put eflags in CPU temporary format */
236
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
238
    CC_OP = CC_OP_EFLAGS;
239
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
240
#elif defined(TARGET_SPARC)
241
#elif defined(TARGET_M68K)
242
    env->cc_op = CC_OP_FLAGS;
243
    env->cc_dest = env->sr & 0xf;
244
    env->cc_x = (env->sr >> 4) & 1;
245
#elif defined(TARGET_ALPHA)
246
#elif defined(TARGET_ARM)
247
#elif defined(TARGET_PPC)
248
#elif defined(TARGET_MICROBLAZE)
249
#elif defined(TARGET_MIPS)
250
#elif defined(TARGET_SH4)
251
#elif defined(TARGET_CRIS)
252
    /* XXXXX */
253
#else
254
#error unsupported target CPU
255
#endif
256
    env->exception_index = -1;
257

    
258
    /* prepare setjmp context for exception handling */
259
    for(;;) {
260
        if (setjmp(env->jmp_env) == 0) {
261
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
262
#undef env
263
                    env = cpu_single_env;
264
#define env cpu_single_env
265
#endif
266
            env->current_tb = NULL;
267
            /* if an exception is pending, we execute it here */
268
            if (env->exception_index >= 0) {
269
                if (env->exception_index >= EXCP_INTERRUPT) {
270
                    /* exit request from the cpu execution loop */
271
                    ret = env->exception_index;
272
                    if (ret == EXCP_DEBUG)
273
                        cpu_handle_debug_exception(env);
274
                    break;
275
                } else {
276
#if defined(CONFIG_USER_ONLY)
277
                    /* if user mode only, we simulate a fake exception
278
                       which will be handled outside the cpu execution
279
                       loop */
280
#if defined(TARGET_I386)
281
                    do_interrupt_user(env->exception_index,
282
                                      env->exception_is_int,
283
                                      env->error_code,
284
                                      env->exception_next_eip);
285
                    /* successfully delivered */
286
                    env->old_exception = -1;
287
#endif
288
                    ret = env->exception_index;
289
                    break;
290
#else
291
#if defined(TARGET_I386)
292
                    /* simulate a real cpu exception. On i386, it can
293
                       trigger new exceptions, but we do not handle
294
                       double or triple faults yet. */
295
                    do_interrupt(env->exception_index,
296
                                 env->exception_is_int,
297
                                 env->error_code,
298
                                 env->exception_next_eip, 0);
299
                    /* successfully delivered */
300
                    env->old_exception = -1;
301
#elif defined(TARGET_PPC)
302
                    do_interrupt(env);
303
#elif defined(TARGET_MICROBLAZE)
304
                    do_interrupt(env);
305
#elif defined(TARGET_MIPS)
306
                    do_interrupt(env);
307
#elif defined(TARGET_SPARC)
308
                    do_interrupt(env);
309
#elif defined(TARGET_ARM)
310
                    do_interrupt(env);
311
#elif defined(TARGET_SH4)
312
                    do_interrupt(env);
313
#elif defined(TARGET_ALPHA)
314
                    do_interrupt(env);
315
#elif defined(TARGET_CRIS)
316
                    do_interrupt(env);
317
#elif defined(TARGET_M68K)
318
                    do_interrupt(0);
319
#endif
320
#endif
321
                }
322
                env->exception_index = -1;
323
            }
324
#ifdef CONFIG_KQEMU
325
            if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
326
                int ret;
327
                env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
328
                ret = kqemu_cpu_exec(env);
329
                /* put eflags in CPU temporary format */
330
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
331
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
332
                CC_OP = CC_OP_EFLAGS;
333
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
334
                if (ret == 1) {
335
                    /* exception */
336
                    longjmp(env->jmp_env, 1);
337
                } else if (ret == 2) {
338
                    /* softmmu execution needed */
339
                } else {
340
                    if (env->interrupt_request != 0 || env->exit_request != 0) {
341
                        /* hardware interrupt will be executed just after */
342
                    } else {
343
                        /* otherwise, we restart */
344
                        longjmp(env->jmp_env, 1);
345
                    }
346
                }
347
            }
348
#endif
349

    
350
            if (kvm_enabled()) {
351
                kvm_cpu_exec(env);
352
                longjmp(env->jmp_env, 1);
353
            }
354

    
355
            next_tb = 0; /* force lookup of first TB */
356
            for(;;) {
357
                interrupt_request = env->interrupt_request;
358
                if (unlikely(interrupt_request)) {
359
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
360
                        /* Mask out external interrupts for this step. */
361
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
362
                                               CPU_INTERRUPT_FIQ |
363
                                               CPU_INTERRUPT_SMI |
364
                                               CPU_INTERRUPT_NMI);
365
                    }
366
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
367
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
368
                        env->exception_index = EXCP_DEBUG;
369
                        cpu_loop_exit();
370
                    }
371
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
373
    defined(TARGET_MICROBLAZE)
374
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
375
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
376
                        env->halted = 1;
377
                        env->exception_index = EXCP_HLT;
378
                        cpu_loop_exit();
379
                    }
380
#endif
381
#if defined(TARGET_I386)
382
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
383
                            svm_check_intercept(SVM_EXIT_INIT);
384
                            do_cpu_init(env);
385
                            env->exception_index = EXCP_HALTED;
386
                            cpu_loop_exit();
387
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
388
                            do_cpu_sipi(env);
389
                    } else if (env->hflags2 & HF2_GIF_MASK) {
390
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
391
                            !(env->hflags & HF_SMM_MASK)) {
392
                            svm_check_intercept(SVM_EXIT_SMI);
393
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
394
                            do_smm_enter();
395
                            next_tb = 0;
396
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
397
                                   !(env->hflags2 & HF2_NMI_MASK)) {
398
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
399
                            env->hflags2 |= HF2_NMI_MASK;
400
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
401
                            next_tb = 0;
402
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
403
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
404
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
405
                            next_tb = 0;
406
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
407
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
408
                                     (env->hflags2 & HF2_HIF_MASK)) ||
409
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
410
                                     (env->eflags & IF_MASK && 
411
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
412
                            int intno;
413
                            svm_check_intercept(SVM_EXIT_INTR);
414
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
415
                            intno = cpu_get_pic_interrupt(env);
416
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
417
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
418
#undef env
419
                    env = cpu_single_env;
420
#define env cpu_single_env
421
#endif
422
                            do_interrupt(intno, 0, 0, 0, 1);
423
                            /* ensure that no TB jump will be modified as
424
                               the program flow was changed */
425
                            next_tb = 0;
426
#if !defined(CONFIG_USER_ONLY)
427
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428
                                   (env->eflags & IF_MASK) && 
429
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
430
                            int intno;
431
                            /* FIXME: this should respect TPR */
432
                            svm_check_intercept(SVM_EXIT_VINTR);
433
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
435
                            do_interrupt(intno, 0, 0, 0, 1);
436
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
437
                            next_tb = 0;
438
#endif
439
                        }
440
                    }
441
#elif defined(TARGET_PPC)
442
#if 0
443
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
444
                        cpu_ppc_reset(env);
445
                    }
446
#endif
447
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
448
                        ppc_hw_interrupt(env);
449
                        if (env->pending_interrupts == 0)
450
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451
                        next_tb = 0;
452
                    }
453
#elif defined(TARGET_MICROBLAZE)
454
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
455
                        && (env->sregs[SR_MSR] & MSR_IE)
456
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
457
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
458
                        env->exception_index = EXCP_IRQ;
459
                        do_interrupt(env);
460
                        next_tb = 0;
461
                    }
462
#elif defined(TARGET_MIPS)
463
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
464
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
465
                        (env->CP0_Status & (1 << CP0St_IE)) &&
466
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
467
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
468
                        !(env->hflags & MIPS_HFLAG_DM)) {
469
                        /* Raise it */
470
                        env->exception_index = EXCP_EXT_INTERRUPT;
471
                        env->error_code = 0;
472
                        do_interrupt(env);
473
                        next_tb = 0;
474
                    }
475
#elif defined(TARGET_SPARC)
476
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
477
                        cpu_interrupts_enabled(env)) {
478
                        int pil = env->interrupt_index & 15;
479
                        int type = env->interrupt_index & 0xf0;
480

    
481
                        if (((type == TT_EXTINT) &&
482
                             (pil == 15 || pil > env->psrpil)) ||
483
                            type != TT_EXTINT) {
484
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
485
                            env->exception_index = env->interrupt_index;
486
                            do_interrupt(env);
487
                            env->interrupt_index = 0;
488
#if !defined(CONFIG_USER_ONLY)
489
                            cpu_check_irqs(env);
490
#endif
491
                        next_tb = 0;
492
                        }
493
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
494
                        //do_interrupt(0, 0, 0, 0, 0);
495
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
496
                    }
497
#elif defined(TARGET_ARM)
498
                    if (interrupt_request & CPU_INTERRUPT_FIQ
499
                        && !(env->uncached_cpsr & CPSR_F)) {
500
                        env->exception_index = EXCP_FIQ;
501
                        do_interrupt(env);
502
                        next_tb = 0;
503
                    }
504
                    /* ARMv7-M interrupt return works by loading a magic value
505
                       into the PC.  On real hardware the load causes the
506
                       return to occur.  The qemu implementation performs the
507
                       jump normally, then does the exception return when the
508
                       CPU tries to execute code at the magic address.
509
                       This will cause the magic PC value to be pushed to
510
                       the stack if an interrupt occured at the wrong time.
511
                       We avoid this by disabling interrupts when
512
                       pc contains a magic address.  */
513
                    if (interrupt_request & CPU_INTERRUPT_HARD
514
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
515
                            || !(env->uncached_cpsr & CPSR_I))) {
516
                        env->exception_index = EXCP_IRQ;
517
                        do_interrupt(env);
518
                        next_tb = 0;
519
                    }
520
#elif defined(TARGET_SH4)
521
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
522
                        do_interrupt(env);
523
                        next_tb = 0;
524
                    }
525
#elif defined(TARGET_ALPHA)
526
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
527
                        do_interrupt(env);
528
                        next_tb = 0;
529
                    }
530
#elif defined(TARGET_CRIS)
531
                    if (interrupt_request & CPU_INTERRUPT_HARD
532
                        && (env->pregs[PR_CCS] & I_FLAG)) {
533
                        env->exception_index = EXCP_IRQ;
534
                        do_interrupt(env);
535
                        next_tb = 0;
536
                    }
537
                    if (interrupt_request & CPU_INTERRUPT_NMI
538
                        && (env->pregs[PR_CCS] & M_FLAG)) {
539
                        env->exception_index = EXCP_NMI;
540
                        do_interrupt(env);
541
                        next_tb = 0;
542
                    }
543
#elif defined(TARGET_M68K)
544
                    if (interrupt_request & CPU_INTERRUPT_HARD
545
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
546
                            < env->pending_level) {
547
                        /* Real hardware gets the interrupt vector via an
548
                           IACK cycle at this point.  Current emulated
549
                           hardware doesn't rely on this, so we
550
                           provide/save the vector when the interrupt is
551
                           first signalled.  */
552
                        env->exception_index = env->pending_vector;
553
                        do_interrupt(1);
554
                        next_tb = 0;
555
                    }
556
#endif
557
                   /* Don't use the cached interupt_request value,
558
                      do_interrupt may have updated the EXITTB flag. */
559
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
560
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
561
                        /* ensure that no TB jump will be modified as
562
                           the program flow was changed */
563
                        next_tb = 0;
564
                    }
565
                }
566
                if (unlikely(env->exit_request)) {
567
                    env->exit_request = 0;
568
                    env->exception_index = EXCP_INTERRUPT;
569
                    cpu_loop_exit();
570
                }
571
#ifdef DEBUG_EXEC
572
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
573
                    /* restore flags in standard format */
574
                    regs_to_env();
575
#if defined(TARGET_I386)
576
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
577
                    log_cpu_state(env, X86_DUMP_CCOP);
578
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
579
#elif defined(TARGET_ARM)
580
                    log_cpu_state(env, 0);
581
#elif defined(TARGET_SPARC)
582
                    log_cpu_state(env, 0);
583
#elif defined(TARGET_PPC)
584
                    log_cpu_state(env, 0);
585
#elif defined(TARGET_M68K)
586
                    cpu_m68k_flush_flags(env, env->cc_op);
587
                    env->cc_op = CC_OP_FLAGS;
588
                    env->sr = (env->sr & 0xffe0)
589
                              | env->cc_dest | (env->cc_x << 4);
590
                    log_cpu_state(env, 0);
591
#elif defined(TARGET_MICROBLAZE)
592
                    log_cpu_state(env, 0);
593
#elif defined(TARGET_MIPS)
594
                    log_cpu_state(env, 0);
595
#elif defined(TARGET_SH4)
596
                    log_cpu_state(env, 0);
597
#elif defined(TARGET_ALPHA)
598
                    log_cpu_state(env, 0);
599
#elif defined(TARGET_CRIS)
600
                    log_cpu_state(env, 0);
601
#else
602
#error unsupported target CPU
603
#endif
604
                }
605
#endif
606
                spin_lock(&tb_lock);
607
                tb = tb_find_fast();
608
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
609
                   doing it in tb_find_slow */
610
                if (tb_invalidated_flag) {
611
                    /* as some TB could have been invalidated because
612
                       of memory exceptions while generating the code, we
613
                       must recompute the hash index here */
614
                    next_tb = 0;
615
                    tb_invalidated_flag = 0;
616
                }
617
#ifdef DEBUG_EXEC
618
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
619
                             (long)tb->tc_ptr, tb->pc,
620
                             lookup_symbol(tb->pc));
621
#endif
622
                /* see if we can patch the calling TB. When the TB
623
                   spans two pages, we cannot safely do a direct
624
                   jump. */
625
                {
626
                    if (next_tb != 0 &&
627
#ifdef CONFIG_KQEMU
628
                        (env->kqemu_enabled != 2) &&
629
#endif
630
                        tb->page_addr[1] == -1) {
631
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
632
                }
633
                }
634
                spin_unlock(&tb_lock);
635
                env->current_tb = tb;
636

    
637
                /* cpu_interrupt might be called while translating the
638
                   TB, but before it is linked into a potentially
639
                   infinite loop and becomes env->current_tb. Avoid
640
                   starting execution if there is a pending interrupt. */
641
                if (unlikely (env->exit_request))
642
                    env->current_tb = NULL;
643

    
644
                while (env->current_tb) {
645
                    tc_ptr = tb->tc_ptr;
646
                /* execute the generated code */
647
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
648
#undef env
649
                    env = cpu_single_env;
650
#define env cpu_single_env
651
#endif
652
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
653
                    env->current_tb = NULL;
654
                    if ((next_tb & 3) == 2) {
655
                        /* Instruction counter expired.  */
656
                        int insns_left;
657
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
658
                        /* Restore PC.  */
659
                        cpu_pc_from_tb(env, tb);
660
                        insns_left = env->icount_decr.u32;
661
                        if (env->icount_extra && insns_left >= 0) {
662
                            /* Refill decrementer and continue execution.  */
663
                            env->icount_extra += insns_left;
664
                            if (env->icount_extra > 0xffff) {
665
                                insns_left = 0xffff;
666
                            } else {
667
                                insns_left = env->icount_extra;
668
                            }
669
                            env->icount_extra -= insns_left;
670
                            env->icount_decr.u16.low = insns_left;
671
                        } else {
672
                            if (insns_left > 0) {
673
                                /* Execute remaining instructions.  */
674
                                cpu_exec_nocache(insns_left, tb);
675
                            }
676
                            env->exception_index = EXCP_INTERRUPT;
677
                            next_tb = 0;
678
                            cpu_loop_exit();
679
                        }
680
                    }
681
                }
682
                /* reset soft MMU for next block (it can currently
683
                   only be set by a memory fault) */
684
#if defined(CONFIG_KQEMU)
685
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
686
                if (kqemu_is_ok(env) &&
687
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
688
                    cpu_loop_exit();
689
                }
690
#endif
691
            } /* for(;;) */
692
        } else {
693
            env_to_regs();
694
        }
695
    } /* for(;;) */
696

    
697

    
698
#if defined(TARGET_I386)
699
    /* restore flags in standard format */
700
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
701
#elif defined(TARGET_ARM)
702
    /* XXX: Save/restore host fpu exception state?.  */
703
#elif defined(TARGET_SPARC)
704
#elif defined(TARGET_PPC)
705
#elif defined(TARGET_M68K)
706
    cpu_m68k_flush_flags(env, env->cc_op);
707
    env->cc_op = CC_OP_FLAGS;
708
    env->sr = (env->sr & 0xffe0)
709
              | env->cc_dest | (env->cc_x << 4);
710
#elif defined(TARGET_MICROBLAZE)
711
#elif defined(TARGET_MIPS)
712
#elif defined(TARGET_SH4)
713
#elif defined(TARGET_ALPHA)
714
#elif defined(TARGET_CRIS)
715
    /* XXXXX */
716
#else
717
#error unsupported target CPU
718
#endif
719

    
720
    /* restore global registers */
721
#include "hostregs_helper.h"
722

    
723
    /* fail safe : never use cpu_single_env outside cpu_exec() */
724
    cpu_single_env = NULL;
725
    return ret;
726
}
727

    
728
/* must only be called from the generated code as an exception can be
729
   generated */
730
void tb_invalidate_page_range(target_ulong start, target_ulong end)
731
{
732
    /* XXX: cannot enable it yet because it yields to MMU exception
733
       where NIP != read address on PowerPC */
734
#if 0
735
    target_ulong phys_addr;
736
    phys_addr = get_phys_addr_code(env, start);
737
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
738
#endif
739
}
740

    
741
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
742

    
743
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
744
{
745
    CPUX86State *saved_env;
746

    
747
    saved_env = env;
748
    env = s;
749
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
750
        selector &= 0xffff;
751
        cpu_x86_load_seg_cache(env, seg_reg, selector,
752
                               (selector << 4), 0xffff, 0);
753
    } else {
754
        helper_load_seg(seg_reg, selector);
755
    }
756
    env = saved_env;
757
}
758

    
759
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
760
{
761
    CPUX86State *saved_env;
762

    
763
    saved_env = env;
764
    env = s;
765

    
766
    helper_fsave(ptr, data32);
767

    
768
    env = saved_env;
769
}
770

    
771
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
772
{
773
    CPUX86State *saved_env;
774

    
775
    saved_env = env;
776
    env = s;
777

    
778
    helper_frstor(ptr, data32);
779

    
780
    env = saved_env;
781
}
782

    
783
#endif /* TARGET_I386 */
784

    
785
#if !defined(CONFIG_SOFTMMU)
786

    
787
#if defined(TARGET_I386)
788

    
789
/* 'pc' is the host PC at which the exception was raised. 'address' is
790
   the effective address of the memory exception. 'is_write' is 1 if a
791
   write caused the exception and otherwise 0'. 'old_set' is the
792
   signal set which should be restored */
793
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
794
                                    int is_write, sigset_t *old_set,
795
                                    void *puc)
796
{
797
    TranslationBlock *tb;
798
    int ret;
799

    
800
    if (cpu_single_env)
801
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
802
#if defined(DEBUG_SIGNAL)
803
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
804
                pc, address, is_write, *(unsigned long *)old_set);
805
#endif
806
    /* XXX: locking issue */
807
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
808
        return 1;
809
    }
810

    
811
    /* see if it is an MMU fault */
812
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
813
    if (ret < 0)
814
        return 0; /* not an MMU fault */
815
    if (ret == 0)
816
        return 1; /* the MMU fault was handled without causing real CPU fault */
817
    /* now we have a real cpu fault */
818
    tb = tb_find_pc(pc);
819
    if (tb) {
820
        /* the PC is inside the translated code. It means that we have
821
           a virtual CPU fault */
822
        cpu_restore_state(tb, env, pc, puc);
823
    }
824
    if (ret == 1) {
825
#if 0
826
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
827
               env->eip, env->cr[2], env->error_code);
828
#endif
829
        /* we restore the process signal mask as the sigreturn should
830
           do it (XXX: use sigsetjmp) */
831
        sigprocmask(SIG_SETMASK, old_set, NULL);
832
        raise_exception_err(env->exception_index, env->error_code);
833
    } else {
834
        /* activate soft MMU for this block */
835
        env->hflags |= HF_SOFTMMU_MASK;
836
        cpu_resume_from_signal(env, puc);
837
    }
838
    /* never comes here */
839
    return 1;
840
}
841

    
842
#elif defined(TARGET_ARM)
843
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
844
                                    int is_write, sigset_t *old_set,
845
                                    void *puc)
846
{
847
    TranslationBlock *tb;
848
    int ret;
849

    
850
    if (cpu_single_env)
851
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
852
#if defined(DEBUG_SIGNAL)
853
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
854
           pc, address, is_write, *(unsigned long *)old_set);
855
#endif
856
    /* XXX: locking issue */
857
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
858
        return 1;
859
    }
860
    /* see if it is an MMU fault */
861
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
862
    if (ret < 0)
863
        return 0; /* not an MMU fault */
864
    if (ret == 0)
865
        return 1; /* the MMU fault was handled without causing real CPU fault */
866
    /* now we have a real cpu fault */
867
    tb = tb_find_pc(pc);
868
    if (tb) {
869
        /* the PC is inside the translated code. It means that we have
870
           a virtual CPU fault */
871
        cpu_restore_state(tb, env, pc, puc);
872
    }
873
    /* we restore the process signal mask as the sigreturn should
874
       do it (XXX: use sigsetjmp) */
875
    sigprocmask(SIG_SETMASK, old_set, NULL);
876
    cpu_loop_exit();
877
    /* never comes here */
878
    return 1;
879
}
880
#elif defined(TARGET_SPARC)
881
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
882
                                    int is_write, sigset_t *old_set,
883
                                    void *puc)
884
{
885
    TranslationBlock *tb;
886
    int ret;
887

    
888
    if (cpu_single_env)
889
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
890
#if defined(DEBUG_SIGNAL)
891
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
892
           pc, address, is_write, *(unsigned long *)old_set);
893
#endif
894
    /* XXX: locking issue */
895
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
896
        return 1;
897
    }
898
    /* see if it is an MMU fault */
899
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
900
    if (ret < 0)
901
        return 0; /* not an MMU fault */
902
    if (ret == 0)
903
        return 1; /* the MMU fault was handled without causing real CPU fault */
904
    /* now we have a real cpu fault */
905
    tb = tb_find_pc(pc);
906
    if (tb) {
907
        /* the PC is inside the translated code. It means that we have
908
           a virtual CPU fault */
909
        cpu_restore_state(tb, env, pc, puc);
910
    }
911
    /* we restore the process signal mask as the sigreturn should
912
       do it (XXX: use sigsetjmp) */
913
    sigprocmask(SIG_SETMASK, old_set, NULL);
914
    cpu_loop_exit();
915
    /* never comes here */
916
    return 1;
917
}
918
#elif defined (TARGET_PPC)
919
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
920
                                    int is_write, sigset_t *old_set,
921
                                    void *puc)
922
{
923
    TranslationBlock *tb;
924
    int ret;
925

    
926
    if (cpu_single_env)
927
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
928
#if defined(DEBUG_SIGNAL)
929
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
930
           pc, address, is_write, *(unsigned long *)old_set);
931
#endif
932
    /* XXX: locking issue */
933
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
934
        return 1;
935
    }
936

    
937
    /* see if it is an MMU fault */
938
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
939
    if (ret < 0)
940
        return 0; /* not an MMU fault */
941
    if (ret == 0)
942
        return 1; /* the MMU fault was handled without causing real CPU fault */
943

    
944
    /* now we have a real cpu fault */
945
    tb = tb_find_pc(pc);
946
    if (tb) {
947
        /* the PC is inside the translated code. It means that we have
948
           a virtual CPU fault */
949
        cpu_restore_state(tb, env, pc, puc);
950
    }
951
    if (ret == 1) {
952
#if 0
953
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
954
               env->nip, env->error_code, tb);
955
#endif
956
    /* we restore the process signal mask as the sigreturn should
957
       do it (XXX: use sigsetjmp) */
958
        sigprocmask(SIG_SETMASK, old_set, NULL);
959
        cpu_loop_exit();
960
    } else {
961
        /* activate soft MMU for this block */
962
        cpu_resume_from_signal(env, puc);
963
    }
964
    /* never comes here */
965
    return 1;
966
}
967

    
968
#elif defined(TARGET_M68K)
969
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
970
                                    int is_write, sigset_t *old_set,
971
                                    void *puc)
972
{
973
    TranslationBlock *tb;
974
    int ret;
975

    
976
    if (cpu_single_env)
977
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
978
#if defined(DEBUG_SIGNAL)
979
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
980
           pc, address, is_write, *(unsigned long *)old_set);
981
#endif
982
    /* XXX: locking issue */
983
    if (is_write && page_unprotect(address, pc, puc)) {
984
        return 1;
985
    }
986
    /* see if it is an MMU fault */
987
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
988
    if (ret < 0)
989
        return 0; /* not an MMU fault */
990
    if (ret == 0)
991
        return 1; /* the MMU fault was handled without causing real CPU fault */
992
    /* now we have a real cpu fault */
993
    tb = tb_find_pc(pc);
994
    if (tb) {
995
        /* the PC is inside the translated code. It means that we have
996
           a virtual CPU fault */
997
        cpu_restore_state(tb, env, pc, puc);
998
    }
999
    /* we restore the process signal mask as the sigreturn should
1000
       do it (XXX: use sigsetjmp) */
1001
    sigprocmask(SIG_SETMASK, old_set, NULL);
1002
    cpu_loop_exit();
1003
    /* never comes here */
1004
    return 1;
1005
}
1006

    
1007
#elif defined (TARGET_MIPS)
1008
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1009
                                    int is_write, sigset_t *old_set,
1010
                                    void *puc)
1011
{
1012
    TranslationBlock *tb;
1013
    int ret;
1014

    
1015
    if (cpu_single_env)
1016
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1017
#if defined(DEBUG_SIGNAL)
1018
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1019
           pc, address, is_write, *(unsigned long *)old_set);
1020
#endif
1021
    /* XXX: locking issue */
1022
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1023
        return 1;
1024
    }
1025

    
1026
    /* see if it is an MMU fault */
1027
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1028
    if (ret < 0)
1029
        return 0; /* not an MMU fault */
1030
    if (ret == 0)
1031
        return 1; /* the MMU fault was handled without causing real CPU fault */
1032

    
1033
    /* now we have a real cpu fault */
1034
    tb = tb_find_pc(pc);
1035
    if (tb) {
1036
        /* the PC is inside the translated code. It means that we have
1037
           a virtual CPU fault */
1038
        cpu_restore_state(tb, env, pc, puc);
1039
    }
1040
    if (ret == 1) {
1041
#if 0
1042
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1043
               env->PC, env->error_code, tb);
1044
#endif
1045
    /* we restore the process signal mask as the sigreturn should
1046
       do it (XXX: use sigsetjmp) */
1047
        sigprocmask(SIG_SETMASK, old_set, NULL);
1048
        cpu_loop_exit();
1049
    } else {
1050
        /* activate soft MMU for this block */
1051
        cpu_resume_from_signal(env, puc);
1052
    }
1053
    /* never comes here */
1054
    return 1;
1055
}
1056

    
1057
#elif defined (TARGET_MICROBLAZE)
1058
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1059
                                    int is_write, sigset_t *old_set,
1060
                                    void *puc)
1061
{
1062
    TranslationBlock *tb;
1063
    int ret;
1064

    
1065
    if (cpu_single_env)
1066
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1067
#if defined(DEBUG_SIGNAL)
1068
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1069
           pc, address, is_write, *(unsigned long *)old_set);
1070
#endif
1071
    /* XXX: locking issue */
1072
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1073
        return 1;
1074
    }
1075

    
1076
    /* see if it is an MMU fault */
1077
    ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1078
    if (ret < 0)
1079
        return 0; /* not an MMU fault */
1080
    if (ret == 0)
1081
        return 1; /* the MMU fault was handled without causing real CPU fault */
1082

    
1083
    /* now we have a real cpu fault */
1084
    tb = tb_find_pc(pc);
1085
    if (tb) {
1086
        /* the PC is inside the translated code. It means that we have
1087
           a virtual CPU fault */
1088
        cpu_restore_state(tb, env, pc, puc);
1089
    }
1090
    if (ret == 1) {
1091
#if 0
1092
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1093
               env->PC, env->error_code, tb);
1094
#endif
1095
    /* we restore the process signal mask as the sigreturn should
1096
       do it (XXX: use sigsetjmp) */
1097
        sigprocmask(SIG_SETMASK, old_set, NULL);
1098
        cpu_loop_exit();
1099
    } else {
1100
        /* activate soft MMU for this block */
1101
        cpu_resume_from_signal(env, puc);
1102
    }
1103
    /* never comes here */
1104
    return 1;
1105
}
1106

    
1107
#elif defined (TARGET_SH4)
1108
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1109
                                    int is_write, sigset_t *old_set,
1110
                                    void *puc)
1111
{
1112
    TranslationBlock *tb;
1113
    int ret;
1114

    
1115
    if (cpu_single_env)
1116
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1117
#if defined(DEBUG_SIGNAL)
1118
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1119
           pc, address, is_write, *(unsigned long *)old_set);
1120
#endif
1121
    /* XXX: locking issue */
1122
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1123
        return 1;
1124
    }
1125

    
1126
    /* see if it is an MMU fault */
1127
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1128
    if (ret < 0)
1129
        return 0; /* not an MMU fault */
1130
    if (ret == 0)
1131
        return 1; /* the MMU fault was handled without causing real CPU fault */
1132

    
1133
    /* now we have a real cpu fault */
1134
    tb = tb_find_pc(pc);
1135
    if (tb) {
1136
        /* the PC is inside the translated code. It means that we have
1137
           a virtual CPU fault */
1138
        cpu_restore_state(tb, env, pc, puc);
1139
    }
1140
#if 0
1141
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1142
               env->nip, env->error_code, tb);
1143
#endif
1144
    /* we restore the process signal mask as the sigreturn should
1145
       do it (XXX: use sigsetjmp) */
1146
    sigprocmask(SIG_SETMASK, old_set, NULL);
1147
    cpu_loop_exit();
1148
    /* never comes here */
1149
    return 1;
1150
}
1151

    
1152
#elif defined (TARGET_ALPHA)
1153
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1154
                                    int is_write, sigset_t *old_set,
1155
                                    void *puc)
1156
{
1157
    TranslationBlock *tb;
1158
    int ret;
1159

    
1160
    if (cpu_single_env)
1161
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1162
#if defined(DEBUG_SIGNAL)
1163
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1164
           pc, address, is_write, *(unsigned long *)old_set);
1165
#endif
1166
    /* XXX: locking issue */
1167
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1168
        return 1;
1169
    }
1170

    
1171
    /* see if it is an MMU fault */
1172
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1173
    if (ret < 0)
1174
        return 0; /* not an MMU fault */
1175
    if (ret == 0)
1176
        return 1; /* the MMU fault was handled without causing real CPU fault */
1177

    
1178
    /* now we have a real cpu fault */
1179
    tb = tb_find_pc(pc);
1180
    if (tb) {
1181
        /* the PC is inside the translated code. It means that we have
1182
           a virtual CPU fault */
1183
        cpu_restore_state(tb, env, pc, puc);
1184
    }
1185
#if 0
1186
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1187
               env->nip, env->error_code, tb);
1188
#endif
1189
    /* we restore the process signal mask as the sigreturn should
1190
       do it (XXX: use sigsetjmp) */
1191
    sigprocmask(SIG_SETMASK, old_set, NULL);
1192
    cpu_loop_exit();
1193
    /* never comes here */
1194
    return 1;
1195
}
1196
#elif defined (TARGET_CRIS)
1197
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1198
                                    int is_write, sigset_t *old_set,
1199
                                    void *puc)
1200
{
1201
    TranslationBlock *tb;
1202
    int ret;
1203

    
1204
    if (cpu_single_env)
1205
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1206
#if defined(DEBUG_SIGNAL)
1207
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1208
           pc, address, is_write, *(unsigned long *)old_set);
1209
#endif
1210
    /* XXX: locking issue */
1211
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1212
        return 1;
1213
    }
1214

    
1215
    /* see if it is an MMU fault */
1216
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1217
    if (ret < 0)
1218
        return 0; /* not an MMU fault */
1219
    if (ret == 0)
1220
        return 1; /* the MMU fault was handled without causing real CPU fault */
1221

    
1222
    /* now we have a real cpu fault */
1223
    tb = tb_find_pc(pc);
1224
    if (tb) {
1225
        /* the PC is inside the translated code. It means that we have
1226
           a virtual CPU fault */
1227
        cpu_restore_state(tb, env, pc, puc);
1228
    }
1229
    /* we restore the process signal mask as the sigreturn should
1230
       do it (XXX: use sigsetjmp) */
1231
    sigprocmask(SIG_SETMASK, old_set, NULL);
1232
    cpu_loop_exit();
1233
    /* never comes here */
1234
    return 1;
1235
}
1236

    
1237
#else
1238
#error unsupported target CPU
1239
#endif
1240

    
1241
#if defined(__i386__)
1242

    
1243
#if defined(__APPLE__)
1244
# include <sys/ucontext.h>
1245

    
1246
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1247
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1248
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1249
# define MASK_sig(context)    ((context)->uc_sigmask)
1250
#elif defined(__OpenBSD__)
1251
# define EIP_sig(context)     ((context)->sc_eip)
1252
# define TRAP_sig(context)    ((context)->sc_trapno)
1253
# define ERROR_sig(context)   ((context)->sc_err)
1254
# define MASK_sig(context)    ((context)->sc_mask)
1255
#else
1256
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1257
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1258
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1259
# define MASK_sig(context)    ((context)->uc_sigmask)
1260
#endif
1261

    
1262
int cpu_signal_handler(int host_signum, void *pinfo,
1263
                       void *puc)
1264
{
1265
    siginfo_t *info = pinfo;
1266
#if defined(__OpenBSD__)
1267
    struct sigcontext *uc = puc;
1268
#else
1269
    struct ucontext *uc = puc;
1270
#endif
1271
    unsigned long pc;
1272
    int trapno;
1273

    
1274
#ifndef REG_EIP
1275
/* for glibc 2.1 */
1276
#define REG_EIP    EIP
1277
#define REG_ERR    ERR
1278
#define REG_TRAPNO TRAPNO
1279
#endif
1280
    pc = EIP_sig(uc);
1281
    trapno = TRAP_sig(uc);
1282
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1283
                             trapno == 0xe ?
1284
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1285
                             &MASK_sig(uc), puc);
1286
}
1287

    
1288
#elif defined(__x86_64__)
1289

    
1290
#ifdef __NetBSD__
1291
#define PC_sig(context)       _UC_MACHINE_PC(context)
1292
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1293
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
1294
#define MASK_sig(context)     ((context)->uc_sigmask)
1295
#elif defined(__OpenBSD__)
1296
#define PC_sig(context)       ((context)->sc_rip)
1297
#define TRAP_sig(context)     ((context)->sc_trapno)
1298
#define ERROR_sig(context)    ((context)->sc_err)
1299
#define MASK_sig(context)     ((context)->sc_mask)
1300
#else
1301
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
1302
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
1303
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
1304
#define MASK_sig(context)     ((context)->uc_sigmask)
1305
#endif
1306

    
1307
int cpu_signal_handler(int host_signum, void *pinfo,
1308
                       void *puc)
1309
{
1310
    siginfo_t *info = pinfo;
1311
    unsigned long pc;
1312
#ifdef __NetBSD__
1313
    ucontext_t *uc = puc;
1314
#elif defined(__OpenBSD__)
1315
    struct sigcontext *uc = puc;
1316
#else
1317
    struct ucontext *uc = puc;
1318
#endif
1319

    
1320
    pc = PC_sig(uc);
1321
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1322
                             TRAP_sig(uc) == 0xe ?
1323
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1324
                             &MASK_sig(uc), puc);
1325
}
1326

    
1327
#elif defined(_ARCH_PPC)
1328

    
1329
/***********************************************************************
1330
 * signal context platform-specific definitions
1331
 * From Wine
1332
 */
1333
#ifdef linux
1334
/* All Registers access - only for local access */
1335
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1336
/* Gpr Registers access  */
1337
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1338
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1339
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1340
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1341
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1342
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1343
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1344
/* Float Registers access  */
1345
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1346
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1347
/* Exception Registers access */
1348
# define DAR_sig(context)                        REG_sig(dar, context)
1349
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1350
# define TRAP_sig(context)                        REG_sig(trap, context)
1351
#endif /* linux */
1352

    
1353
#ifdef __APPLE__
1354
# include <sys/ucontext.h>
1355
typedef struct ucontext SIGCONTEXT;
1356
/* All Registers access - only for local access */
1357
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1358
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1359
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1360
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1361
/* Gpr Registers access */
1362
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1363
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1364
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1365
# define CTR_sig(context)                        REG_sig(ctr, context)
1366
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1367
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1368
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1369
/* Float Registers access */
1370
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1371
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1372
/* Exception Registers access */
1373
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1374
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1375
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1376
#endif /* __APPLE__ */
1377

    
1378
int cpu_signal_handler(int host_signum, void *pinfo,
1379
                       void *puc)
1380
{
1381
    siginfo_t *info = pinfo;
1382
    struct ucontext *uc = puc;
1383
    unsigned long pc;
1384
    int is_write;
1385

    
1386
    pc = IAR_sig(uc);
1387
    is_write = 0;
1388
#if 0
1389
    /* ppc 4xx case */
1390
    if (DSISR_sig(uc) & 0x00800000)
1391
        is_write = 1;
1392
#else
1393
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1394
        is_write = 1;
1395
#endif
1396
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1397
                             is_write, &uc->uc_sigmask, puc);
1398
}
1399

    
1400
#elif defined(__alpha__)
1401

    
1402
int cpu_signal_handler(int host_signum, void *pinfo,
1403
                           void *puc)
1404
{
1405
    siginfo_t *info = pinfo;
1406
    struct ucontext *uc = puc;
1407
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1408
    uint32_t insn = *pc;
1409
    int is_write = 0;
1410

    
1411
    /* XXX: need kernel patch to get write flag faster */
1412
    switch (insn >> 26) {
1413
    case 0x0d: // stw
1414
    case 0x0e: // stb
1415
    case 0x0f: // stq_u
1416
    case 0x24: // stf
1417
    case 0x25: // stg
1418
    case 0x26: // sts
1419
    case 0x27: // stt
1420
    case 0x2c: // stl
1421
    case 0x2d: // stq
1422
    case 0x2e: // stl_c
1423
    case 0x2f: // stq_c
1424
        is_write = 1;
1425
    }
1426

    
1427
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1428
                             is_write, &uc->uc_sigmask, puc);
1429
}
1430
#elif defined(__sparc__)
1431

    
1432
int cpu_signal_handler(int host_signum, void *pinfo,
1433
                       void *puc)
1434
{
1435
    siginfo_t *info = pinfo;
1436
    int is_write;
1437
    uint32_t insn;
1438
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1439
    uint32_t *regs = (uint32_t *)(info + 1);
1440
    void *sigmask = (regs + 20);
1441
    /* XXX: is there a standard glibc define ? */
1442
    unsigned long pc = regs[1];
1443
#else
1444
#ifdef __linux__
1445
    struct sigcontext *sc = puc;
1446
    unsigned long pc = sc->sigc_regs.tpc;
1447
    void *sigmask = (void *)sc->sigc_mask;
1448
#elif defined(__OpenBSD__)
1449
    struct sigcontext *uc = puc;
1450
    unsigned long pc = uc->sc_pc;
1451
    void *sigmask = (void *)(long)uc->sc_mask;
1452
#endif
1453
#endif
1454

    
1455
    /* XXX: need kernel patch to get write flag faster */
1456
    is_write = 0;
1457
    insn = *(uint32_t *)pc;
1458
    if ((insn >> 30) == 3) {
1459
      switch((insn >> 19) & 0x3f) {
1460
      case 0x05: // stb
1461
      case 0x15: // stba
1462
      case 0x06: // sth
1463
      case 0x16: // stha
1464
      case 0x04: // st
1465
      case 0x14: // sta
1466
      case 0x07: // std
1467
      case 0x17: // stda
1468
      case 0x0e: // stx
1469
      case 0x1e: // stxa
1470
      case 0x24: // stf
1471
      case 0x34: // stfa
1472
      case 0x27: // stdf
1473
      case 0x37: // stdfa
1474
      case 0x26: // stqf
1475
      case 0x36: // stqfa
1476
      case 0x25: // stfsr
1477
      case 0x3c: // casa
1478
      case 0x3e: // casxa
1479
        is_write = 1;
1480
        break;
1481
      }
1482
    }
1483
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1484
                             is_write, sigmask, NULL);
1485
}
1486

    
1487
#elif defined(__arm__)
1488

    
1489
int cpu_signal_handler(int host_signum, void *pinfo,
1490
                       void *puc)
1491
{
1492
    siginfo_t *info = pinfo;
1493
    struct ucontext *uc = puc;
1494
    unsigned long pc;
1495
    int is_write;
1496

    
1497
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1498
    pc = uc->uc_mcontext.gregs[R15];
1499
#else
1500
    pc = uc->uc_mcontext.arm_pc;
1501
#endif
1502
    /* XXX: compute is_write */
1503
    is_write = 0;
1504
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1505
                             is_write,
1506
                             &uc->uc_sigmask, puc);
1507
}
1508

    
1509
#elif defined(__mc68000)
1510

    
1511
int cpu_signal_handler(int host_signum, void *pinfo,
1512
                       void *puc)
1513
{
1514
    siginfo_t *info = pinfo;
1515
    struct ucontext *uc = puc;
1516
    unsigned long pc;
1517
    int is_write;
1518

    
1519
    pc = uc->uc_mcontext.gregs[16];
1520
    /* XXX: compute is_write */
1521
    is_write = 0;
1522
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1523
                             is_write,
1524
                             &uc->uc_sigmask, puc);
1525
}
1526

    
1527
#elif defined(__ia64)
1528

    
1529
#ifndef __ISR_VALID
1530
  /* This ought to be in <bits/siginfo.h>... */
1531
# define __ISR_VALID        1
1532
#endif
1533

    
1534
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1535
{
1536
    siginfo_t *info = pinfo;
1537
    struct ucontext *uc = puc;
1538
    unsigned long ip;
1539
    int is_write = 0;
1540

    
1541
    ip = uc->uc_mcontext.sc_ip;
1542
    switch (host_signum) {
1543
      case SIGILL:
1544
      case SIGFPE:
1545
      case SIGSEGV:
1546
      case SIGBUS:
1547
      case SIGTRAP:
1548
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1549
              /* ISR.W (write-access) is bit 33:  */
1550
              is_write = (info->si_isr >> 33) & 1;
1551
          break;
1552

    
1553
      default:
1554
          break;
1555
    }
1556
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1557
                             is_write,
1558
                             &uc->uc_sigmask, puc);
1559
}
1560

    
1561
#elif defined(__s390__)
1562

    
1563
int cpu_signal_handler(int host_signum, void *pinfo,
1564
                       void *puc)
1565
{
1566
    siginfo_t *info = pinfo;
1567
    struct ucontext *uc = puc;
1568
    unsigned long pc;
1569
    int is_write;
1570

    
1571
    pc = uc->uc_mcontext.psw.addr;
1572
    /* XXX: compute is_write */
1573
    is_write = 0;
1574
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1575
                             is_write, &uc->uc_sigmask, puc);
1576
}
1577

    
1578
#elif defined(__mips__)
1579

    
1580
int cpu_signal_handler(int host_signum, void *pinfo,
1581
                       void *puc)
1582
{
1583
    siginfo_t *info = pinfo;
1584
    struct ucontext *uc = puc;
1585
    greg_t pc = uc->uc_mcontext.pc;
1586
    int is_write;
1587

    
1588
    /* XXX: compute is_write */
1589
    is_write = 0;
1590
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1591
                             is_write, &uc->uc_sigmask, puc);
1592
}
1593

    
1594
#elif defined(__hppa__)
1595

    
1596
int cpu_signal_handler(int host_signum, void *pinfo,
1597
                       void *puc)
1598
{
1599
    struct siginfo *info = pinfo;
1600
    struct ucontext *uc = puc;
1601
    unsigned long pc;
1602
    int is_write;
1603

    
1604
    pc = uc->uc_mcontext.sc_iaoq[0];
1605
    /* FIXME: compute is_write */
1606
    is_write = 0;
1607
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1608
                             is_write,
1609
                             &uc->uc_sigmask, puc);
1610
}
1611

    
1612
#else
1613

    
1614
#error host CPU specific signal handler needed
1615

    
1616
#endif
1617

    
1618
#endif /* !defined(CONFIG_SOFTMMU) */