Statistics
| Branch: | Revision:

root / cpu-exec.c @ d397abbd

History | View | Annotate | Download (51.8 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#include "exec.h"
22
#include "disas.h"
23
#include "tcg.h"
24
#include "kvm.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(HOST_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
void cpu_loop_exit(void)
54
{
55
    /* NOTE: the register at this point must be saved by hand because
56
       longjmp restore them */
57
    regs_to_env();
58
    longjmp(env->jmp_env, 1);
59
}
60

    
61
/* exit the current TB from a signal handler. The host registers are
62
   restored in a state compatible with the CPU emulator
63
 */
64
void cpu_resume_from_signal(CPUState *env1, void *puc)
65
{
66
#if !defined(CONFIG_SOFTMMU)
67
#ifdef __linux__
68
    struct ucontext *uc = puc;
69
#elif defined(__OpenBSD__)
70
    struct sigcontext *uc = puc;
71
#endif
72
#endif
73

    
74
    env = env1;
75

    
76
    /* XXX: restore cpu registers saved in host registers */
77

    
78
#if !defined(CONFIG_SOFTMMU)
79
    if (puc) {
80
        /* XXX: use siglongjmp ? */
81
#ifdef __linux__
82
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
83
#elif defined(__OpenBSD__)
84
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
85
#endif
86
    }
87
#endif
88
    env->exception_index = -1;
89
    longjmp(env->jmp_env, 1);
90
}
91

    
92
/* Execute the code without caching the generated code. An interpreter
93
   could be used if available. */
94
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
95
{
96
    unsigned long next_tb;
97
    TranslationBlock *tb;
98

    
99
    /* Should never happen.
100
       We only end up here when an existing TB is too long.  */
101
    if (max_cycles > CF_COUNT_MASK)
102
        max_cycles = CF_COUNT_MASK;
103

    
104
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105
                     max_cycles);
106
    env->current_tb = tb;
107
    /* execute the generated code */
108
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
109

    
110
    if ((next_tb & 3) == 2) {
111
        /* Restore PC.  This may happen if async event occurs before
112
           the TB starts executing.  */
113
        cpu_pc_from_tb(env, tb);
114
    }
115
    tb_phys_invalidate(tb, -1);
116
    tb_free(tb);
117
}
118

    
119
static TranslationBlock *tb_find_slow(target_ulong pc,
120
                                      target_ulong cs_base,
121
                                      uint64_t flags)
122
{
123
    TranslationBlock *tb, **ptb1;
124
    unsigned int h;
125
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
126

    
127
    tb_invalidated_flag = 0;
128

    
129
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
130

    
131
    /* find translated block using physical mappings */
132
    phys_pc = get_phys_addr_code(env, pc);
133
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
134
    phys_page2 = -1;
135
    h = tb_phys_hash_func(phys_pc);
136
    ptb1 = &tb_phys_hash[h];
137
    for(;;) {
138
        tb = *ptb1;
139
        if (!tb)
140
            goto not_found;
141
        if (tb->pc == pc &&
142
            tb->page_addr[0] == phys_page1 &&
143
            tb->cs_base == cs_base &&
144
            tb->flags == flags) {
145
            /* check next page if needed */
146
            if (tb->page_addr[1] != -1) {
147
                virt_page2 = (pc & TARGET_PAGE_MASK) +
148
                    TARGET_PAGE_SIZE;
149
                phys_page2 = get_phys_addr_code(env, virt_page2);
150
                if (tb->page_addr[1] == phys_page2)
151
                    goto found;
152
            } else {
153
                goto found;
154
            }
155
        }
156
        ptb1 = &tb->phys_hash_next;
157
    }
158
 not_found:
159
   /* if no translated code available, then translate it now */
160
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
161

    
162
 found:
163
    /* we add the TB in the virtual pc hash table */
164
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
165
    return tb;
166
}
167

    
168
static inline TranslationBlock *tb_find_fast(void)
169
{
170
    TranslationBlock *tb;
171
    target_ulong cs_base, pc;
172
    int flags;
173

    
174
    /* we record a subset of the CPU state. It will
175
       always be the same before a given translated block
176
       is executed. */
177
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
178
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
179
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180
                 tb->flags != flags)) {
181
        tb = tb_find_slow(pc, cs_base, flags);
182
    }
183
    return tb;
184
}
185

    
186
static CPUDebugExcpHandler *debug_excp_handler;
187

    
188
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
189
{
190
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
191

    
192
    debug_excp_handler = handler;
193
    return old_handler;
194
}
195

    
196
static void cpu_handle_debug_exception(CPUState *env)
197
{
198
    CPUWatchpoint *wp;
199

    
200
    if (!env->watchpoint_hit)
201
        TAILQ_FOREACH(wp, &env->watchpoints, entry)
202
            wp->flags &= ~BP_WATCHPOINT_HIT;
203

    
204
    if (debug_excp_handler)
205
        debug_excp_handler(env);
206
}
207

    
208
/* main execution loop */
209

    
210
int cpu_exec(CPUState *env1)
211
{
212
#define DECLARE_HOST_REGS 1
213
#include "hostregs_helper.h"
214
    int ret, interrupt_request;
215
    TranslationBlock *tb;
216
    uint8_t *tc_ptr;
217
    unsigned long next_tb;
218

    
219
    if (cpu_halted(env1) == EXCP_HALTED)
220
        return EXCP_HALTED;
221

    
222
    cpu_single_env = env1;
223

    
224
    /* first we save global registers */
225
#define SAVE_HOST_REGS 1
226
#include "hostregs_helper.h"
227
    env = env1;
228

    
229
    env_to_regs();
230
#if defined(TARGET_I386)
231
    /* put eflags in CPU temporary format */
232
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
234
    CC_OP = CC_OP_EFLAGS;
235
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236
#elif defined(TARGET_SPARC)
237
#elif defined(TARGET_M68K)
238
    env->cc_op = CC_OP_FLAGS;
239
    env->cc_dest = env->sr & 0xf;
240
    env->cc_x = (env->sr >> 4) & 1;
241
#elif defined(TARGET_ALPHA)
242
#elif defined(TARGET_ARM)
243
#elif defined(TARGET_PPC)
244
#elif defined(TARGET_MIPS)
245
#elif defined(TARGET_SH4)
246
#elif defined(TARGET_CRIS)
247
    /* XXXXX */
248
#else
249
#error unsupported target CPU
250
#endif
251
    env->exception_index = -1;
252

    
253
    /* prepare setjmp context for exception handling */
254
    for(;;) {
255
        if (setjmp(env->jmp_env) == 0) {
256
#if defined(__sparc__) && !defined(HOST_SOLARIS)
257
#undef env
258
                    env = cpu_single_env;
259
#define env cpu_single_env
260
#endif
261
            env->current_tb = NULL;
262
            /* if an exception is pending, we execute it here */
263
            if (env->exception_index >= 0) {
264
                if (env->exception_index >= EXCP_INTERRUPT) {
265
                    /* exit request from the cpu execution loop */
266
                    ret = env->exception_index;
267
                    if (ret == EXCP_DEBUG)
268
                        cpu_handle_debug_exception(env);
269
                    break;
270
                } else {
271
#if defined(CONFIG_USER_ONLY)
272
                    /* if user mode only, we simulate a fake exception
273
                       which will be handled outside the cpu execution
274
                       loop */
275
#if defined(TARGET_I386)
276
                    do_interrupt_user(env->exception_index,
277
                                      env->exception_is_int,
278
                                      env->error_code,
279
                                      env->exception_next_eip);
280
                    /* successfully delivered */
281
                    env->old_exception = -1;
282
#endif
283
                    ret = env->exception_index;
284
                    break;
285
#else
286
#if defined(TARGET_I386)
287
                    /* simulate a real cpu exception. On i386, it can
288
                       trigger new exceptions, but we do not handle
289
                       double or triple faults yet. */
290
                    do_interrupt(env->exception_index,
291
                                 env->exception_is_int,
292
                                 env->error_code,
293
                                 env->exception_next_eip, 0);
294
                    /* successfully delivered */
295
                    env->old_exception = -1;
296
#elif defined(TARGET_PPC)
297
                    do_interrupt(env);
298
#elif defined(TARGET_MIPS)
299
                    do_interrupt(env);
300
#elif defined(TARGET_SPARC)
301
                    do_interrupt(env);
302
#elif defined(TARGET_ARM)
303
                    do_interrupt(env);
304
#elif defined(TARGET_SH4)
305
                    do_interrupt(env);
306
#elif defined(TARGET_ALPHA)
307
                    do_interrupt(env);
308
#elif defined(TARGET_CRIS)
309
                    do_interrupt(env);
310
#elif defined(TARGET_M68K)
311
                    do_interrupt(0);
312
#endif
313
#endif
314
                }
315
                env->exception_index = -1;
316
            }
317
#ifdef USE_KQEMU
318
            if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
319
                int ret;
320
                env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
321
                ret = kqemu_cpu_exec(env);
322
                /* put eflags in CPU temporary format */
323
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
324
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
325
                CC_OP = CC_OP_EFLAGS;
326
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
327
                if (ret == 1) {
328
                    /* exception */
329
                    longjmp(env->jmp_env, 1);
330
                } else if (ret == 2) {
331
                    /* softmmu execution needed */
332
                } else {
333
                    if (env->interrupt_request != 0 || env->exit_request != 0) {
334
                        /* hardware interrupt will be executed just after */
335
                    } else {
336
                        /* otherwise, we restart */
337
                        longjmp(env->jmp_env, 1);
338
                    }
339
                }
340
            }
341
#endif
342

    
343
            if (kvm_enabled()) {
344
                kvm_cpu_exec(env);
345
                longjmp(env->jmp_env, 1);
346
            }
347

    
348
            next_tb = 0; /* force lookup of first TB */
349
            for(;;) {
350
                interrupt_request = env->interrupt_request;
351
                if (unlikely(interrupt_request)) {
352
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
353
                        /* Mask out external interrupts for this step. */
354
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
355
                                               CPU_INTERRUPT_FIQ |
356
                                               CPU_INTERRUPT_SMI |
357
                                               CPU_INTERRUPT_NMI);
358
                    }
359
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
360
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
361
                        env->exception_index = EXCP_DEBUG;
362
                        cpu_loop_exit();
363
                    }
364
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
365
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
366
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
367
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
368
                        env->halted = 1;
369
                        env->exception_index = EXCP_HLT;
370
                        cpu_loop_exit();
371
                    }
372
#endif
373
#if defined(TARGET_I386)
374
                    if (env->hflags2 & HF2_GIF_MASK) {
375
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
376
                            !(env->hflags & HF_SMM_MASK)) {
377
                            svm_check_intercept(SVM_EXIT_SMI);
378
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
379
                            do_smm_enter();
380
                            next_tb = 0;
381
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
382
                                   !(env->hflags2 & HF2_NMI_MASK)) {
383
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
384
                            env->hflags2 |= HF2_NMI_MASK;
385
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
386
                            next_tb = 0;
387
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
388
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
389
                                     (env->hflags2 & HF2_HIF_MASK)) ||
390
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
391
                                     (env->eflags & IF_MASK && 
392
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
393
                            int intno;
394
                            svm_check_intercept(SVM_EXIT_INTR);
395
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
396
                            intno = cpu_get_pic_interrupt(env);
397
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
398
#if defined(__sparc__) && !defined(HOST_SOLARIS)
399
#undef env
400
                    env = cpu_single_env;
401
#define env cpu_single_env
402
#endif
403
                            do_interrupt(intno, 0, 0, 0, 1);
404
                            /* ensure that no TB jump will be modified as
405
                               the program flow was changed */
406
                            next_tb = 0;
407
#if !defined(CONFIG_USER_ONLY)
408
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
409
                                   (env->eflags & IF_MASK) && 
410
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
411
                            int intno;
412
                            /* FIXME: this should respect TPR */
413
                            svm_check_intercept(SVM_EXIT_VINTR);
414
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
415
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
416
                            do_interrupt(intno, 0, 0, 0, 1);
417
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
418
                            next_tb = 0;
419
#endif
420
                        }
421
                    }
422
#elif defined(TARGET_PPC)
423
#if 0
424
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
425
                        cpu_ppc_reset(env);
426
                    }
427
#endif
428
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
429
                        ppc_hw_interrupt(env);
430
                        if (env->pending_interrupts == 0)
431
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
432
                        next_tb = 0;
433
                    }
434
#elif defined(TARGET_MIPS)
435
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
436
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
437
                        (env->CP0_Status & (1 << CP0St_IE)) &&
438
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
439
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
440
                        !(env->hflags & MIPS_HFLAG_DM)) {
441
                        /* Raise it */
442
                        env->exception_index = EXCP_EXT_INTERRUPT;
443
                        env->error_code = 0;
444
                        do_interrupt(env);
445
                        next_tb = 0;
446
                    }
447
#elif defined(TARGET_SPARC)
448
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
449
                        (env->psret != 0)) {
450
                        int pil = env->interrupt_index & 15;
451
                        int type = env->interrupt_index & 0xf0;
452

    
453
                        if (((type == TT_EXTINT) &&
454
                             (pil == 15 || pil > env->psrpil)) ||
455
                            type != TT_EXTINT) {
456
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
457
                            env->exception_index = env->interrupt_index;
458
                            do_interrupt(env);
459
                            env->interrupt_index = 0;
460
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
461
                            cpu_check_irqs(env);
462
#endif
463
                        next_tb = 0;
464
                        }
465
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
466
                        //do_interrupt(0, 0, 0, 0, 0);
467
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
468
                    }
469
#elif defined(TARGET_ARM)
470
                    if (interrupt_request & CPU_INTERRUPT_FIQ
471
                        && !(env->uncached_cpsr & CPSR_F)) {
472
                        env->exception_index = EXCP_FIQ;
473
                        do_interrupt(env);
474
                        next_tb = 0;
475
                    }
476
                    /* ARMv7-M interrupt return works by loading a magic value
477
                       into the PC.  On real hardware the load causes the
478
                       return to occur.  The qemu implementation performs the
479
                       jump normally, then does the exception return when the
480
                       CPU tries to execute code at the magic address.
481
                       This will cause the magic PC value to be pushed to
482
                       the stack if an interrupt occured at the wrong time.
483
                       We avoid this by disabling interrupts when
484
                       pc contains a magic address.  */
485
                    if (interrupt_request & CPU_INTERRUPT_HARD
486
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
487
                            || !(env->uncached_cpsr & CPSR_I))) {
488
                        env->exception_index = EXCP_IRQ;
489
                        do_interrupt(env);
490
                        next_tb = 0;
491
                    }
492
#elif defined(TARGET_SH4)
493
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
494
                        do_interrupt(env);
495
                        next_tb = 0;
496
                    }
497
#elif defined(TARGET_ALPHA)
498
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
499
                        do_interrupt(env);
500
                        next_tb = 0;
501
                    }
502
#elif defined(TARGET_CRIS)
503
                    if (interrupt_request & CPU_INTERRUPT_HARD
504
                        && (env->pregs[PR_CCS] & I_FLAG)) {
505
                        env->exception_index = EXCP_IRQ;
506
                        do_interrupt(env);
507
                        next_tb = 0;
508
                    }
509
                    if (interrupt_request & CPU_INTERRUPT_NMI
510
                        && (env->pregs[PR_CCS] & M_FLAG)) {
511
                        env->exception_index = EXCP_NMI;
512
                        do_interrupt(env);
513
                        next_tb = 0;
514
                    }
515
#elif defined(TARGET_M68K)
516
                    if (interrupt_request & CPU_INTERRUPT_HARD
517
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
518
                            < env->pending_level) {
519
                        /* Real hardware gets the interrupt vector via an
520
                           IACK cycle at this point.  Current emulated
521
                           hardware doesn't rely on this, so we
522
                           provide/save the vector when the interrupt is
523
                           first signalled.  */
524
                        env->exception_index = env->pending_vector;
525
                        do_interrupt(1);
526
                        next_tb = 0;
527
                    }
528
#endif
529
                   /* Don't use the cached interupt_request value,
530
                      do_interrupt may have updated the EXITTB flag. */
531
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
532
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
533
                        /* ensure that no TB jump will be modified as
534
                           the program flow was changed */
535
                        next_tb = 0;
536
                    }
537
                }
538
                if (unlikely(env->exit_request)) {
539
                    env->exit_request = 0;
540
                    env->exception_index = EXCP_INTERRUPT;
541
                    cpu_loop_exit();
542
                }
543
#ifdef DEBUG_EXEC
544
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
545
                    /* restore flags in standard format */
546
                    regs_to_env();
547
#if defined(TARGET_I386)
548
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
549
                    log_cpu_state(env, X86_DUMP_CCOP);
550
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
551
#elif defined(TARGET_ARM)
552
                    log_cpu_state(env, 0);
553
#elif defined(TARGET_SPARC)
554
                    log_cpu_state(env, 0);
555
#elif defined(TARGET_PPC)
556
                    log_cpu_state(env, 0);
557
#elif defined(TARGET_M68K)
558
                    cpu_m68k_flush_flags(env, env->cc_op);
559
                    env->cc_op = CC_OP_FLAGS;
560
                    env->sr = (env->sr & 0xffe0)
561
                              | env->cc_dest | (env->cc_x << 4);
562
                    log_cpu_state(env, 0);
563
#elif defined(TARGET_MIPS)
564
                    log_cpu_state(env, 0);
565
#elif defined(TARGET_SH4)
566
                    log_cpu_state(env, 0);
567
#elif defined(TARGET_ALPHA)
568
                    log_cpu_state(env, 0);
569
#elif defined(TARGET_CRIS)
570
                    log_cpu_state(env, 0);
571
#else
572
#error unsupported target CPU
573
#endif
574
                }
575
#endif
576
                spin_lock(&tb_lock);
577
                tb = tb_find_fast();
578
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
579
                   doing it in tb_find_slow */
580
                if (tb_invalidated_flag) {
581
                    /* as some TB could have been invalidated because
582
                       of memory exceptions while generating the code, we
583
                       must recompute the hash index here */
584
                    next_tb = 0;
585
                    tb_invalidated_flag = 0;
586
                }
587
#ifdef DEBUG_EXEC
588
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
589
                             (long)tb->tc_ptr, tb->pc,
590
                             lookup_symbol(tb->pc));
591
#endif
592
                /* see if we can patch the calling TB. When the TB
593
                   spans two pages, we cannot safely do a direct
594
                   jump. */
595
                {
596
                    if (next_tb != 0 &&
597
#ifdef USE_KQEMU
598
                        (env->kqemu_enabled != 2) &&
599
#endif
600
                        tb->page_addr[1] == -1) {
601
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
602
                }
603
                }
604
                spin_unlock(&tb_lock);
605
                env->current_tb = tb;
606

    
607
                /* cpu_interrupt might be called while translating the
608
                   TB, but before it is linked into a potentially
609
                   infinite loop and becomes env->current_tb. Avoid
610
                   starting execution if there is a pending interrupt. */
611
                if (unlikely (env->exit_request))
612
                    env->current_tb = NULL;
613

    
614
                while (env->current_tb) {
615
                    tc_ptr = tb->tc_ptr;
616
                /* execute the generated code */
617
#if defined(__sparc__) && !defined(HOST_SOLARIS)
618
#undef env
619
                    env = cpu_single_env;
620
#define env cpu_single_env
621
#endif
622
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
623
                    env->current_tb = NULL;
624
                    if ((next_tb & 3) == 2) {
625
                        /* Instruction counter expired.  */
626
                        int insns_left;
627
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
628
                        /* Restore PC.  */
629
                        cpu_pc_from_tb(env, tb);
630
                        insns_left = env->icount_decr.u32;
631
                        if (env->icount_extra && insns_left >= 0) {
632
                            /* Refill decrementer and continue execution.  */
633
                            env->icount_extra += insns_left;
634
                            if (env->icount_extra > 0xffff) {
635
                                insns_left = 0xffff;
636
                            } else {
637
                                insns_left = env->icount_extra;
638
                            }
639
                            env->icount_extra -= insns_left;
640
                            env->icount_decr.u16.low = insns_left;
641
                        } else {
642
                            if (insns_left > 0) {
643
                                /* Execute remaining instructions.  */
644
                                cpu_exec_nocache(insns_left, tb);
645
                            }
646
                            env->exception_index = EXCP_INTERRUPT;
647
                            next_tb = 0;
648
                            cpu_loop_exit();
649
                        }
650
                    }
651
                }
652
                /* reset soft MMU for next block (it can currently
653
                   only be set by a memory fault) */
654
#if defined(USE_KQEMU)
655
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
656
                if (kqemu_is_ok(env) &&
657
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
658
                    cpu_loop_exit();
659
                }
660
#endif
661
            } /* for(;;) */
662
        } else {
663
            env_to_regs();
664
        }
665
    } /* for(;;) */
666

    
667

    
668
#if defined(TARGET_I386)
669
    /* restore flags in standard format */
670
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
671
#elif defined(TARGET_ARM)
672
    /* XXX: Save/restore host fpu exception state?.  */
673
#elif defined(TARGET_SPARC)
674
#elif defined(TARGET_PPC)
675
#elif defined(TARGET_M68K)
676
    cpu_m68k_flush_flags(env, env->cc_op);
677
    env->cc_op = CC_OP_FLAGS;
678
    env->sr = (env->sr & 0xffe0)
679
              | env->cc_dest | (env->cc_x << 4);
680
#elif defined(TARGET_MIPS)
681
#elif defined(TARGET_SH4)
682
#elif defined(TARGET_ALPHA)
683
#elif defined(TARGET_CRIS)
684
    /* XXXXX */
685
#else
686
#error unsupported target CPU
687
#endif
688

    
689
    /* restore global registers */
690
#include "hostregs_helper.h"
691

    
692
    /* fail safe : never use cpu_single_env outside cpu_exec() */
693
    cpu_single_env = NULL;
694
    return ret;
695
}
696

    
697
/* must only be called from the generated code as an exception can be
698
   generated */
699
void tb_invalidate_page_range(target_ulong start, target_ulong end)
700
{
701
    /* XXX: cannot enable it yet because it yields to MMU exception
702
       where NIP != read address on PowerPC */
703
#if 0
704
    target_ulong phys_addr;
705
    phys_addr = get_phys_addr_code(env, start);
706
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
707
#endif
708
}
709

    
710
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
711

    
712
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
713
{
714
    CPUX86State *saved_env;
715

    
716
    saved_env = env;
717
    env = s;
718
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
719
        selector &= 0xffff;
720
        cpu_x86_load_seg_cache(env, seg_reg, selector,
721
                               (selector << 4), 0xffff, 0);
722
    } else {
723
        helper_load_seg(seg_reg, selector);
724
    }
725
    env = saved_env;
726
}
727

    
728
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
729
{
730
    CPUX86State *saved_env;
731

    
732
    saved_env = env;
733
    env = s;
734

    
735
    helper_fsave(ptr, data32);
736

    
737
    env = saved_env;
738
}
739

    
740
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
741
{
742
    CPUX86State *saved_env;
743

    
744
    saved_env = env;
745
    env = s;
746

    
747
    helper_frstor(ptr, data32);
748

    
749
    env = saved_env;
750
}
751

    
752
#endif /* TARGET_I386 */
753

    
754
#if !defined(CONFIG_SOFTMMU)
755

    
756
#if defined(TARGET_I386)
757

    
758
/* 'pc' is the host PC at which the exception was raised. 'address' is
759
   the effective address of the memory exception. 'is_write' is 1 if a
760
   write caused the exception and otherwise 0'. 'old_set' is the
761
   signal set which should be restored */
762
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
763
                                    int is_write, sigset_t *old_set,
764
                                    void *puc)
765
{
766
    TranslationBlock *tb;
767
    int ret;
768

    
769
    if (cpu_single_env)
770
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
771
#if defined(DEBUG_SIGNAL)
772
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
773
                pc, address, is_write, *(unsigned long *)old_set);
774
#endif
775
    /* XXX: locking issue */
776
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
777
        return 1;
778
    }
779

    
780
    /* see if it is an MMU fault */
781
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
782
    if (ret < 0)
783
        return 0; /* not an MMU fault */
784
    if (ret == 0)
785
        return 1; /* the MMU fault was handled without causing real CPU fault */
786
    /* now we have a real cpu fault */
787
    tb = tb_find_pc(pc);
788
    if (tb) {
789
        /* the PC is inside the translated code. It means that we have
790
           a virtual CPU fault */
791
        cpu_restore_state(tb, env, pc, puc);
792
    }
793
    if (ret == 1) {
794
#if 0
795
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
796
               env->eip, env->cr[2], env->error_code);
797
#endif
798
        /* we restore the process signal mask as the sigreturn should
799
           do it (XXX: use sigsetjmp) */
800
        sigprocmask(SIG_SETMASK, old_set, NULL);
801
        raise_exception_err(env->exception_index, env->error_code);
802
    } else {
803
        /* activate soft MMU for this block */
804
        env->hflags |= HF_SOFTMMU_MASK;
805
        cpu_resume_from_signal(env, puc);
806
    }
807
    /* never comes here */
808
    return 1;
809
}
810

    
811
#elif defined(TARGET_ARM)
812
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
813
                                    int is_write, sigset_t *old_set,
814
                                    void *puc)
815
{
816
    TranslationBlock *tb;
817
    int ret;
818

    
819
    if (cpu_single_env)
820
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
821
#if defined(DEBUG_SIGNAL)
822
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
823
           pc, address, is_write, *(unsigned long *)old_set);
824
#endif
825
    /* XXX: locking issue */
826
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
827
        return 1;
828
    }
829
    /* see if it is an MMU fault */
830
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
831
    if (ret < 0)
832
        return 0; /* not an MMU fault */
833
    if (ret == 0)
834
        return 1; /* the MMU fault was handled without causing real CPU fault */
835
    /* now we have a real cpu fault */
836
    tb = tb_find_pc(pc);
837
    if (tb) {
838
        /* the PC is inside the translated code. It means that we have
839
           a virtual CPU fault */
840
        cpu_restore_state(tb, env, pc, puc);
841
    }
842
    /* we restore the process signal mask as the sigreturn should
843
       do it (XXX: use sigsetjmp) */
844
    sigprocmask(SIG_SETMASK, old_set, NULL);
845
    cpu_loop_exit();
846
    /* never comes here */
847
    return 1;
848
}
849
#elif defined(TARGET_SPARC)
850
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
851
                                    int is_write, sigset_t *old_set,
852
                                    void *puc)
853
{
854
    TranslationBlock *tb;
855
    int ret;
856

    
857
    if (cpu_single_env)
858
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
859
#if defined(DEBUG_SIGNAL)
860
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
861
           pc, address, is_write, *(unsigned long *)old_set);
862
#endif
863
    /* XXX: locking issue */
864
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
865
        return 1;
866
    }
867
    /* see if it is an MMU fault */
868
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
869
    if (ret < 0)
870
        return 0; /* not an MMU fault */
871
    if (ret == 0)
872
        return 1; /* the MMU fault was handled without causing real CPU fault */
873
    /* now we have a real cpu fault */
874
    tb = tb_find_pc(pc);
875
    if (tb) {
876
        /* the PC is inside the translated code. It means that we have
877
           a virtual CPU fault */
878
        cpu_restore_state(tb, env, pc, puc);
879
    }
880
    /* we restore the process signal mask as the sigreturn should
881
       do it (XXX: use sigsetjmp) */
882
    sigprocmask(SIG_SETMASK, old_set, NULL);
883
    cpu_loop_exit();
884
    /* never comes here */
885
    return 1;
886
}
887
#elif defined (TARGET_PPC)
888
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
889
                                    int is_write, sigset_t *old_set,
890
                                    void *puc)
891
{
892
    TranslationBlock *tb;
893
    int ret;
894

    
895
    if (cpu_single_env)
896
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
897
#if defined(DEBUG_SIGNAL)
898
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
899
           pc, address, is_write, *(unsigned long *)old_set);
900
#endif
901
    /* XXX: locking issue */
902
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
903
        return 1;
904
    }
905

    
906
    /* see if it is an MMU fault */
907
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
908
    if (ret < 0)
909
        return 0; /* not an MMU fault */
910
    if (ret == 0)
911
        return 1; /* the MMU fault was handled without causing real CPU fault */
912

    
913
    /* now we have a real cpu fault */
914
    tb = tb_find_pc(pc);
915
    if (tb) {
916
        /* the PC is inside the translated code. It means that we have
917
           a virtual CPU fault */
918
        cpu_restore_state(tb, env, pc, puc);
919
    }
920
    if (ret == 1) {
921
#if 0
922
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
923
               env->nip, env->error_code, tb);
924
#endif
925
    /* we restore the process signal mask as the sigreturn should
926
       do it (XXX: use sigsetjmp) */
927
        sigprocmask(SIG_SETMASK, old_set, NULL);
928
        cpu_loop_exit();
929
    } else {
930
        /* activate soft MMU for this block */
931
        cpu_resume_from_signal(env, puc);
932
    }
933
    /* never comes here */
934
    return 1;
935
}
936

    
937
#elif defined(TARGET_M68K)
938
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
939
                                    int is_write, sigset_t *old_set,
940
                                    void *puc)
941
{
942
    TranslationBlock *tb;
943
    int ret;
944

    
945
    if (cpu_single_env)
946
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
947
#if defined(DEBUG_SIGNAL)
948
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
949
           pc, address, is_write, *(unsigned long *)old_set);
950
#endif
951
    /* XXX: locking issue */
952
    if (is_write && page_unprotect(address, pc, puc)) {
953
        return 1;
954
    }
955
    /* see if it is an MMU fault */
956
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
957
    if (ret < 0)
958
        return 0; /* not an MMU fault */
959
    if (ret == 0)
960
        return 1; /* the MMU fault was handled without causing real CPU fault */
961
    /* now we have a real cpu fault */
962
    tb = tb_find_pc(pc);
963
    if (tb) {
964
        /* the PC is inside the translated code. It means that we have
965
           a virtual CPU fault */
966
        cpu_restore_state(tb, env, pc, puc);
967
    }
968
    /* we restore the process signal mask as the sigreturn should
969
       do it (XXX: use sigsetjmp) */
970
    sigprocmask(SIG_SETMASK, old_set, NULL);
971
    cpu_loop_exit();
972
    /* never comes here */
973
    return 1;
974
}
975

    
976
#elif defined (TARGET_MIPS)
977
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
978
                                    int is_write, sigset_t *old_set,
979
                                    void *puc)
980
{
981
    TranslationBlock *tb;
982
    int ret;
983

    
984
    if (cpu_single_env)
985
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
986
#if defined(DEBUG_SIGNAL)
987
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
988
           pc, address, is_write, *(unsigned long *)old_set);
989
#endif
990
    /* XXX: locking issue */
991
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
992
        return 1;
993
    }
994

    
995
    /* see if it is an MMU fault */
996
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
997
    if (ret < 0)
998
        return 0; /* not an MMU fault */
999
    if (ret == 0)
1000
        return 1; /* the MMU fault was handled without causing real CPU fault */
1001

    
1002
    /* now we have a real cpu fault */
1003
    tb = tb_find_pc(pc);
1004
    if (tb) {
1005
        /* the PC is inside the translated code. It means that we have
1006
           a virtual CPU fault */
1007
        cpu_restore_state(tb, env, pc, puc);
1008
    }
1009
    if (ret == 1) {
1010
#if 0
1011
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1012
               env->PC, env->error_code, tb);
1013
#endif
1014
    /* we restore the process signal mask as the sigreturn should
1015
       do it (XXX: use sigsetjmp) */
1016
        sigprocmask(SIG_SETMASK, old_set, NULL);
1017
        cpu_loop_exit();
1018
    } else {
1019
        /* activate soft MMU for this block */
1020
        cpu_resume_from_signal(env, puc);
1021
    }
1022
    /* never comes here */
1023
    return 1;
1024
}
1025

    
1026
#elif defined (TARGET_SH4)
1027
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1028
                                    int is_write, sigset_t *old_set,
1029
                                    void *puc)
1030
{
1031
    TranslationBlock *tb;
1032
    int ret;
1033

    
1034
    if (cpu_single_env)
1035
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1036
#if defined(DEBUG_SIGNAL)
1037
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1038
           pc, address, is_write, *(unsigned long *)old_set);
1039
#endif
1040
    /* XXX: locking issue */
1041
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1042
        return 1;
1043
    }
1044

    
1045
    /* see if it is an MMU fault */
1046
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1047
    if (ret < 0)
1048
        return 0; /* not an MMU fault */
1049
    if (ret == 0)
1050
        return 1; /* the MMU fault was handled without causing real CPU fault */
1051

    
1052
    /* now we have a real cpu fault */
1053
    tb = tb_find_pc(pc);
1054
    if (tb) {
1055
        /* the PC is inside the translated code. It means that we have
1056
           a virtual CPU fault */
1057
        cpu_restore_state(tb, env, pc, puc);
1058
    }
1059
#if 0
1060
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1061
               env->nip, env->error_code, tb);
1062
#endif
1063
    /* we restore the process signal mask as the sigreturn should
1064
       do it (XXX: use sigsetjmp) */
1065
    sigprocmask(SIG_SETMASK, old_set, NULL);
1066
    cpu_loop_exit();
1067
    /* never comes here */
1068
    return 1;
1069
}
1070

    
1071
#elif defined (TARGET_ALPHA)
1072
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1073
                                    int is_write, sigset_t *old_set,
1074
                                    void *puc)
1075
{
1076
    TranslationBlock *tb;
1077
    int ret;
1078

    
1079
    if (cpu_single_env)
1080
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1081
#if defined(DEBUG_SIGNAL)
1082
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1083
           pc, address, is_write, *(unsigned long *)old_set);
1084
#endif
1085
    /* XXX: locking issue */
1086
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1087
        return 1;
1088
    }
1089

    
1090
    /* see if it is an MMU fault */
1091
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1092
    if (ret < 0)
1093
        return 0; /* not an MMU fault */
1094
    if (ret == 0)
1095
        return 1; /* the MMU fault was handled without causing real CPU fault */
1096

    
1097
    /* now we have a real cpu fault */
1098
    tb = tb_find_pc(pc);
1099
    if (tb) {
1100
        /* the PC is inside the translated code. It means that we have
1101
           a virtual CPU fault */
1102
        cpu_restore_state(tb, env, pc, puc);
1103
    }
1104
#if 0
1105
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1106
               env->nip, env->error_code, tb);
1107
#endif
1108
    /* we restore the process signal mask as the sigreturn should
1109
       do it (XXX: use sigsetjmp) */
1110
    sigprocmask(SIG_SETMASK, old_set, NULL);
1111
    cpu_loop_exit();
1112
    /* never comes here */
1113
    return 1;
1114
}
1115
#elif defined (TARGET_CRIS)
1116
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1117
                                    int is_write, sigset_t *old_set,
1118
                                    void *puc)
1119
{
1120
    TranslationBlock *tb;
1121
    int ret;
1122

    
1123
    if (cpu_single_env)
1124
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1125
#if defined(DEBUG_SIGNAL)
1126
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1127
           pc, address, is_write, *(unsigned long *)old_set);
1128
#endif
1129
    /* XXX: locking issue */
1130
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1131
        return 1;
1132
    }
1133

    
1134
    /* see if it is an MMU fault */
1135
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1136
    if (ret < 0)
1137
        return 0; /* not an MMU fault */
1138
    if (ret == 0)
1139
        return 1; /* the MMU fault was handled without causing real CPU fault */
1140

    
1141
    /* now we have a real cpu fault */
1142
    tb = tb_find_pc(pc);
1143
    if (tb) {
1144
        /* the PC is inside the translated code. It means that we have
1145
           a virtual CPU fault */
1146
        cpu_restore_state(tb, env, pc, puc);
1147
    }
1148
    /* we restore the process signal mask as the sigreturn should
1149
       do it (XXX: use sigsetjmp) */
1150
    sigprocmask(SIG_SETMASK, old_set, NULL);
1151
    cpu_loop_exit();
1152
    /* never comes here */
1153
    return 1;
1154
}
1155

    
1156
#else
1157
#error unsupported target CPU
1158
#endif
1159

    
1160
#if defined(__i386__)
1161

    
1162
#if defined(__APPLE__)
1163
# include <sys/ucontext.h>
1164

    
1165
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1166
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1167
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1168
# define MASK_sig(context)    ((context)->uc_sigmask)
1169
#elif defined(__OpenBSD__)
1170
# define EIP_sig(context)     ((context)->sc_eip)
1171
# define TRAP_sig(context)    ((context)->sc_trapno)
1172
# define ERROR_sig(context)   ((context)->sc_err)
1173
# define MASK_sig(context)    ((context)->sc_mask)
1174
#else
1175
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1176
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1177
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1178
# define MASK_sig(context)    ((context)->uc_sigmask)
1179
#endif
1180

    
1181
int cpu_signal_handler(int host_signum, void *pinfo,
1182
                       void *puc)
1183
{
1184
    siginfo_t *info = pinfo;
1185
#if defined(__OpenBSD__)
1186
    struct sigcontext *uc = puc;
1187
#else
1188
    struct ucontext *uc = puc;
1189
#endif
1190
    unsigned long pc;
1191
    int trapno;
1192

    
1193
#ifndef REG_EIP
1194
/* for glibc 2.1 */
1195
#define REG_EIP    EIP
1196
#define REG_ERR    ERR
1197
#define REG_TRAPNO TRAPNO
1198
#endif
1199
    pc = EIP_sig(uc);
1200
    trapno = TRAP_sig(uc);
1201
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1202
                             trapno == 0xe ?
1203
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1204
                             &MASK_sig(uc), puc);
1205
}
1206

    
1207
#elif defined(__x86_64__)
1208

    
1209
#ifdef __NetBSD__
1210
#define PC_sig(context)       _UC_MACHINE_PC(context)
1211
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1212
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
1213
#define MASK_sig(context)     ((context)->uc_sigmask)
1214
#elif defined(__OpenBSD__)
1215
#define PC_sig(context)       ((context)->sc_rip)
1216
#define TRAP_sig(context)     ((context)->sc_trapno)
1217
#define ERROR_sig(context)    ((context)->sc_err)
1218
#define MASK_sig(context)     ((context)->sc_mask)
1219
#else
1220
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
1221
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
1222
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
1223
#define MASK_sig(context)     ((context)->uc_sigmask)
1224
#endif
1225

    
1226
int cpu_signal_handler(int host_signum, void *pinfo,
1227
                       void *puc)
1228
{
1229
    siginfo_t *info = pinfo;
1230
    unsigned long pc;
1231
#ifdef __NetBSD__
1232
    ucontext_t *uc = puc;
1233
#elif defined(__OpenBSD__)
1234
    struct sigcontext *uc = puc;
1235
#else
1236
    struct ucontext *uc = puc;
1237
#endif
1238

    
1239
    pc = PC_sig(uc);
1240
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1241
                             TRAP_sig(uc) == 0xe ?
1242
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1243
                             &MASK_sig(uc), puc);
1244
}
1245

    
1246
#elif defined(_ARCH_PPC)
1247

    
1248
/***********************************************************************
1249
 * signal context platform-specific definitions
1250
 * From Wine
1251
 */
1252
#ifdef linux
1253
/* All Registers access - only for local access */
1254
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1255
/* Gpr Registers access  */
1256
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1257
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1258
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1259
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1260
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1261
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1262
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1263
/* Float Registers access  */
1264
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1265
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1266
/* Exception Registers access */
1267
# define DAR_sig(context)                        REG_sig(dar, context)
1268
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1269
# define TRAP_sig(context)                        REG_sig(trap, context)
1270
#endif /* linux */
1271

    
1272
#ifdef __APPLE__
1273
# include <sys/ucontext.h>
1274
typedef struct ucontext SIGCONTEXT;
1275
/* All Registers access - only for local access */
1276
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1277
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1278
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1279
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1280
/* Gpr Registers access */
1281
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1282
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1283
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1284
# define CTR_sig(context)                        REG_sig(ctr, context)
1285
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1286
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1287
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1288
/* Float Registers access */
1289
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1290
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1291
/* Exception Registers access */
1292
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1293
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1294
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1295
#endif /* __APPLE__ */
1296

    
1297
int cpu_signal_handler(int host_signum, void *pinfo,
1298
                       void *puc)
1299
{
1300
    siginfo_t *info = pinfo;
1301
    struct ucontext *uc = puc;
1302
    unsigned long pc;
1303
    int is_write;
1304

    
1305
    pc = IAR_sig(uc);
1306
    is_write = 0;
1307
#if 0
1308
    /* ppc 4xx case */
1309
    if (DSISR_sig(uc) & 0x00800000)
1310
        is_write = 1;
1311
#else
1312
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1313
        is_write = 1;
1314
#endif
1315
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1316
                             is_write, &uc->uc_sigmask, puc);
1317
}
1318

    
1319
#elif defined(__alpha__)
1320

    
1321
int cpu_signal_handler(int host_signum, void *pinfo,
1322
                           void *puc)
1323
{
1324
    siginfo_t *info = pinfo;
1325
    struct ucontext *uc = puc;
1326
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1327
    uint32_t insn = *pc;
1328
    int is_write = 0;
1329

    
1330
    /* XXX: need kernel patch to get write flag faster */
1331
    switch (insn >> 26) {
1332
    case 0x0d: // stw
1333
    case 0x0e: // stb
1334
    case 0x0f: // stq_u
1335
    case 0x24: // stf
1336
    case 0x25: // stg
1337
    case 0x26: // sts
1338
    case 0x27: // stt
1339
    case 0x2c: // stl
1340
    case 0x2d: // stq
1341
    case 0x2e: // stl_c
1342
    case 0x2f: // stq_c
1343
        is_write = 1;
1344
    }
1345

    
1346
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1347
                             is_write, &uc->uc_sigmask, puc);
1348
}
1349
#elif defined(__sparc__)
1350

    
1351
int cpu_signal_handler(int host_signum, void *pinfo,
1352
                       void *puc)
1353
{
1354
    siginfo_t *info = pinfo;
1355
    int is_write;
1356
    uint32_t insn;
1357
#if !defined(__arch64__) || defined(HOST_SOLARIS)
1358
    uint32_t *regs = (uint32_t *)(info + 1);
1359
    void *sigmask = (regs + 20);
1360
    /* XXX: is there a standard glibc define ? */
1361
    unsigned long pc = regs[1];
1362
#else
1363
#ifdef __linux__
1364
    struct sigcontext *sc = puc;
1365
    unsigned long pc = sc->sigc_regs.tpc;
1366
    void *sigmask = (void *)sc->sigc_mask;
1367
#elif defined(__OpenBSD__)
1368
    struct sigcontext *uc = puc;
1369
    unsigned long pc = uc->sc_pc;
1370
    void *sigmask = (void *)(long)uc->sc_mask;
1371
#endif
1372
#endif
1373

    
1374
    /* XXX: need kernel patch to get write flag faster */
1375
    is_write = 0;
1376
    insn = *(uint32_t *)pc;
1377
    if ((insn >> 30) == 3) {
1378
      switch((insn >> 19) & 0x3f) {
1379
      case 0x05: // stb
1380
      case 0x06: // sth
1381
      case 0x04: // st
1382
      case 0x07: // std
1383
      case 0x24: // stf
1384
      case 0x27: // stdf
1385
      case 0x25: // stfsr
1386
        is_write = 1;
1387
        break;
1388
      }
1389
    }
1390
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1391
                             is_write, sigmask, NULL);
1392
}
1393

    
1394
#elif defined(__arm__)
1395

    
1396
int cpu_signal_handler(int host_signum, void *pinfo,
1397
                       void *puc)
1398
{
1399
    siginfo_t *info = pinfo;
1400
    struct ucontext *uc = puc;
1401
    unsigned long pc;
1402
    int is_write;
1403

    
1404
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1405
    pc = uc->uc_mcontext.gregs[R15];
1406
#else
1407
    pc = uc->uc_mcontext.arm_pc;
1408
#endif
1409
    /* XXX: compute is_write */
1410
    is_write = 0;
1411
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1412
                             is_write,
1413
                             &uc->uc_sigmask, puc);
1414
}
1415

    
1416
#elif defined(__mc68000)
1417

    
1418
int cpu_signal_handler(int host_signum, void *pinfo,
1419
                       void *puc)
1420
{
1421
    siginfo_t *info = pinfo;
1422
    struct ucontext *uc = puc;
1423
    unsigned long pc;
1424
    int is_write;
1425

    
1426
    pc = uc->uc_mcontext.gregs[16];
1427
    /* XXX: compute is_write */
1428
    is_write = 0;
1429
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1430
                             is_write,
1431
                             &uc->uc_sigmask, puc);
1432
}
1433

    
1434
#elif defined(__ia64)
1435

    
1436
#ifndef __ISR_VALID
1437
  /* This ought to be in <bits/siginfo.h>... */
1438
# define __ISR_VALID        1
1439
#endif
1440

    
1441
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1442
{
1443
    siginfo_t *info = pinfo;
1444
    struct ucontext *uc = puc;
1445
    unsigned long ip;
1446
    int is_write = 0;
1447

    
1448
    ip = uc->uc_mcontext.sc_ip;
1449
    switch (host_signum) {
1450
      case SIGILL:
1451
      case SIGFPE:
1452
      case SIGSEGV:
1453
      case SIGBUS:
1454
      case SIGTRAP:
1455
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1456
              /* ISR.W (write-access) is bit 33:  */
1457
              is_write = (info->si_isr >> 33) & 1;
1458
          break;
1459

    
1460
      default:
1461
          break;
1462
    }
1463
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1464
                             is_write,
1465
                             &uc->uc_sigmask, puc);
1466
}
1467

    
1468
#elif defined(__s390__)
1469

    
1470
int cpu_signal_handler(int host_signum, void *pinfo,
1471
                       void *puc)
1472
{
1473
    siginfo_t *info = pinfo;
1474
    struct ucontext *uc = puc;
1475
    unsigned long pc;
1476
    int is_write;
1477

    
1478
    pc = uc->uc_mcontext.psw.addr;
1479
    /* XXX: compute is_write */
1480
    is_write = 0;
1481
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482
                             is_write, &uc->uc_sigmask, puc);
1483
}
1484

    
1485
#elif defined(__mips__)
1486

    
1487
int cpu_signal_handler(int host_signum, void *pinfo,
1488
                       void *puc)
1489
{
1490
    siginfo_t *info = pinfo;
1491
    struct ucontext *uc = puc;
1492
    greg_t pc = uc->uc_mcontext.pc;
1493
    int is_write;
1494

    
1495
    /* XXX: compute is_write */
1496
    is_write = 0;
1497
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1498
                             is_write, &uc->uc_sigmask, puc);
1499
}
1500

    
1501
#elif defined(__hppa__)
1502

    
1503
int cpu_signal_handler(int host_signum, void *pinfo,
1504
                       void *puc)
1505
{
1506
    struct siginfo *info = pinfo;
1507
    struct ucontext *uc = puc;
1508
    unsigned long pc;
1509
    int is_write;
1510

    
1511
    pc = uc->uc_mcontext.sc_iaoq[0];
1512
    /* FIXME: compute is_write */
1513
    is_write = 0;
1514
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1515
                             is_write,
1516
                             &uc->uc_sigmask, puc);
1517
}
1518

    
1519
#else
1520

    
1521
#error host CPU specific signal handler needed
1522

    
1523
#endif
1524

    
1525
#endif /* !defined(CONFIG_SOFTMMU) */