Statistics
| Branch: | Revision:

root / cpu-exec.c @ 6a4955a8

History | View | Annotate | Download (51.9 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#include "exec.h"
22
#include "disas.h"
23
#include "tcg.h"
24
#include "kvm.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(HOST_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
int qemu_cpu_has_work(CPUState *env)
54
{
55
    return cpu_has_work(env);
56
}
57

    
58
void cpu_loop_exit(void)
59
{
60
    /* NOTE: the register at this point must be saved by hand because
61
       longjmp restore them */
62
    regs_to_env();
63
    longjmp(env->jmp_env, 1);
64
}
65

    
66
/* exit the current TB from a signal handler. The host registers are
67
   restored in a state compatible with the CPU emulator
68
 */
69
void cpu_resume_from_signal(CPUState *env1, void *puc)
70
{
71
#if !defined(CONFIG_SOFTMMU)
72
#ifdef __linux__
73
    struct ucontext *uc = puc;
74
#elif defined(__OpenBSD__)
75
    struct sigcontext *uc = puc;
76
#endif
77
#endif
78

    
79
    env = env1;
80

    
81
    /* XXX: restore cpu registers saved in host registers */
82

    
83
#if !defined(CONFIG_SOFTMMU)
84
    if (puc) {
85
        /* XXX: use siglongjmp ? */
86
#ifdef __linux__
87
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88
#elif defined(__OpenBSD__)
89
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90
#endif
91
    }
92
#endif
93
    env->exception_index = -1;
94
    longjmp(env->jmp_env, 1);
95
}
96

    
97
/* Execute the code without caching the generated code. An interpreter
98
   could be used if available. */
99
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100
{
101
    unsigned long next_tb;
102
    TranslationBlock *tb;
103

    
104
    /* Should never happen.
105
       We only end up here when an existing TB is too long.  */
106
    if (max_cycles > CF_COUNT_MASK)
107
        max_cycles = CF_COUNT_MASK;
108

    
109
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110
                     max_cycles);
111
    env->current_tb = tb;
112
    /* execute the generated code */
113
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114

    
115
    if ((next_tb & 3) == 2) {
116
        /* Restore PC.  This may happen if async event occurs before
117
           the TB starts executing.  */
118
        cpu_pc_from_tb(env, tb);
119
    }
120
    tb_phys_invalidate(tb, -1);
121
    tb_free(tb);
122
}
123

    
124
static TranslationBlock *tb_find_slow(target_ulong pc,
125
                                      target_ulong cs_base,
126
                                      uint64_t flags)
127
{
128
    TranslationBlock *tb, **ptb1;
129
    unsigned int h;
130
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131

    
132
    tb_invalidated_flag = 0;
133

    
134
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135

    
136
    /* find translated block using physical mappings */
137
    phys_pc = get_phys_addr_code(env, pc);
138
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
139
    phys_page2 = -1;
140
    h = tb_phys_hash_func(phys_pc);
141
    ptb1 = &tb_phys_hash[h];
142
    for(;;) {
143
        tb = *ptb1;
144
        if (!tb)
145
            goto not_found;
146
        if (tb->pc == pc &&
147
            tb->page_addr[0] == phys_page1 &&
148
            tb->cs_base == cs_base &&
149
            tb->flags == flags) {
150
            /* check next page if needed */
151
            if (tb->page_addr[1] != -1) {
152
                virt_page2 = (pc & TARGET_PAGE_MASK) +
153
                    TARGET_PAGE_SIZE;
154
                phys_page2 = get_phys_addr_code(env, virt_page2);
155
                if (tb->page_addr[1] == phys_page2)
156
                    goto found;
157
            } else {
158
                goto found;
159
            }
160
        }
161
        ptb1 = &tb->phys_hash_next;
162
    }
163
 not_found:
164
   /* if no translated code available, then translate it now */
165
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
166

    
167
 found:
168
    /* we add the TB in the virtual pc hash table */
169
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170
    return tb;
171
}
172

    
173
static inline TranslationBlock *tb_find_fast(void)
174
{
175
    TranslationBlock *tb;
176
    target_ulong cs_base, pc;
177
    int flags;
178

    
179
    /* we record a subset of the CPU state. It will
180
       always be the same before a given translated block
181
       is executed. */
182
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185
                 tb->flags != flags)) {
186
        tb = tb_find_slow(pc, cs_base, flags);
187
    }
188
    return tb;
189
}
190

    
191
static CPUDebugExcpHandler *debug_excp_handler;
192

    
193
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194
{
195
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
196

    
197
    debug_excp_handler = handler;
198
    return old_handler;
199
}
200

    
201
static void cpu_handle_debug_exception(CPUState *env)
202
{
203
    CPUWatchpoint *wp;
204

    
205
    if (!env->watchpoint_hit)
206
        TAILQ_FOREACH(wp, &env->watchpoints, entry)
207
            wp->flags &= ~BP_WATCHPOINT_HIT;
208

    
209
    if (debug_excp_handler)
210
        debug_excp_handler(env);
211
}
212

    
213
/* main execution loop */
214

    
215
int cpu_exec(CPUState *env1)
216
{
217
#define DECLARE_HOST_REGS 1
218
#include "hostregs_helper.h"
219
    int ret, interrupt_request;
220
    TranslationBlock *tb;
221
    uint8_t *tc_ptr;
222
    unsigned long next_tb;
223

    
224
    if (cpu_halted(env1) == EXCP_HALTED)
225
        return EXCP_HALTED;
226

    
227
    cpu_single_env = env1;
228

    
229
    /* first we save global registers */
230
#define SAVE_HOST_REGS 1
231
#include "hostregs_helper.h"
232
    env = env1;
233

    
234
    env_to_regs();
235
#if defined(TARGET_I386)
236
    /* put eflags in CPU temporary format */
237
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
239
    CC_OP = CC_OP_EFLAGS;
240
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241
#elif defined(TARGET_SPARC)
242
#elif defined(TARGET_M68K)
243
    env->cc_op = CC_OP_FLAGS;
244
    env->cc_dest = env->sr & 0xf;
245
    env->cc_x = (env->sr >> 4) & 1;
246
#elif defined(TARGET_ALPHA)
247
#elif defined(TARGET_ARM)
248
#elif defined(TARGET_PPC)
249
#elif defined(TARGET_MIPS)
250
#elif defined(TARGET_SH4)
251
#elif defined(TARGET_CRIS)
252
    /* XXXXX */
253
#else
254
#error unsupported target CPU
255
#endif
256
    env->exception_index = -1;
257

    
258
    /* prepare setjmp context for exception handling */
259
    for(;;) {
260
        if (setjmp(env->jmp_env) == 0) {
261
#if defined(__sparc__) && !defined(HOST_SOLARIS)
262
#undef env
263
                    env = cpu_single_env;
264
#define env cpu_single_env
265
#endif
266
            env->current_tb = NULL;
267
            /* if an exception is pending, we execute it here */
268
            if (env->exception_index >= 0) {
269
                if (env->exception_index >= EXCP_INTERRUPT) {
270
                    /* exit request from the cpu execution loop */
271
                    ret = env->exception_index;
272
                    if (ret == EXCP_DEBUG)
273
                        cpu_handle_debug_exception(env);
274
                    break;
275
                } else {
276
#if defined(CONFIG_USER_ONLY)
277
                    /* if user mode only, we simulate a fake exception
278
                       which will be handled outside the cpu execution
279
                       loop */
280
#if defined(TARGET_I386)
281
                    do_interrupt_user(env->exception_index,
282
                                      env->exception_is_int,
283
                                      env->error_code,
284
                                      env->exception_next_eip);
285
                    /* successfully delivered */
286
                    env->old_exception = -1;
287
#endif
288
                    ret = env->exception_index;
289
                    break;
290
#else
291
#if defined(TARGET_I386)
292
                    /* simulate a real cpu exception. On i386, it can
293
                       trigger new exceptions, but we do not handle
294
                       double or triple faults yet. */
295
                    do_interrupt(env->exception_index,
296
                                 env->exception_is_int,
297
                                 env->error_code,
298
                                 env->exception_next_eip, 0);
299
                    /* successfully delivered */
300
                    env->old_exception = -1;
301
#elif defined(TARGET_PPC)
302
                    do_interrupt(env);
303
#elif defined(TARGET_MIPS)
304
                    do_interrupt(env);
305
#elif defined(TARGET_SPARC)
306
                    do_interrupt(env);
307
#elif defined(TARGET_ARM)
308
                    do_interrupt(env);
309
#elif defined(TARGET_SH4)
310
                    do_interrupt(env);
311
#elif defined(TARGET_ALPHA)
312
                    do_interrupt(env);
313
#elif defined(TARGET_CRIS)
314
                    do_interrupt(env);
315
#elif defined(TARGET_M68K)
316
                    do_interrupt(0);
317
#endif
318
#endif
319
                }
320
                env->exception_index = -1;
321
            }
322
#ifdef CONFIG_KQEMU
323
            if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
324
                int ret;
325
                env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
326
                ret = kqemu_cpu_exec(env);
327
                /* put eflags in CPU temporary format */
328
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
329
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
330
                CC_OP = CC_OP_EFLAGS;
331
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
332
                if (ret == 1) {
333
                    /* exception */
334
                    longjmp(env->jmp_env, 1);
335
                } else if (ret == 2) {
336
                    /* softmmu execution needed */
337
                } else {
338
                    if (env->interrupt_request != 0 || env->exit_request != 0) {
339
                        /* hardware interrupt will be executed just after */
340
                    } else {
341
                        /* otherwise, we restart */
342
                        longjmp(env->jmp_env, 1);
343
                    }
344
                }
345
            }
346
#endif
347

    
348
            if (kvm_enabled()) {
349
                kvm_cpu_exec(env);
350
                longjmp(env->jmp_env, 1);
351
            }
352

    
353
            next_tb = 0; /* force lookup of first TB */
354
            for(;;) {
355
                interrupt_request = env->interrupt_request;
356
                if (unlikely(interrupt_request)) {
357
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
358
                        /* Mask out external interrupts for this step. */
359
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
360
                                               CPU_INTERRUPT_FIQ |
361
                                               CPU_INTERRUPT_SMI |
362
                                               CPU_INTERRUPT_NMI);
363
                    }
364
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
365
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
366
                        env->exception_index = EXCP_DEBUG;
367
                        cpu_loop_exit();
368
                    }
369
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
370
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
371
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
372
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
373
                        env->halted = 1;
374
                        env->exception_index = EXCP_HLT;
375
                        cpu_loop_exit();
376
                    }
377
#endif
378
#if defined(TARGET_I386)
379
                    if (env->hflags2 & HF2_GIF_MASK) {
380
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
381
                            !(env->hflags & HF_SMM_MASK)) {
382
                            svm_check_intercept(SVM_EXIT_SMI);
383
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
384
                            do_smm_enter();
385
                            next_tb = 0;
386
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
387
                                   !(env->hflags2 & HF2_NMI_MASK)) {
388
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
389
                            env->hflags2 |= HF2_NMI_MASK;
390
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
391
                            next_tb = 0;
392
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
393
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
394
                                     (env->hflags2 & HF2_HIF_MASK)) ||
395
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
396
                                     (env->eflags & IF_MASK && 
397
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
398
                            int intno;
399
                            svm_check_intercept(SVM_EXIT_INTR);
400
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
401
                            intno = cpu_get_pic_interrupt(env);
402
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
403
#if defined(__sparc__) && !defined(HOST_SOLARIS)
404
#undef env
405
                    env = cpu_single_env;
406
#define env cpu_single_env
407
#endif
408
                            do_interrupt(intno, 0, 0, 0, 1);
409
                            /* ensure that no TB jump will be modified as
410
                               the program flow was changed */
411
                            next_tb = 0;
412
#if !defined(CONFIG_USER_ONLY)
413
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
414
                                   (env->eflags & IF_MASK) && 
415
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
416
                            int intno;
417
                            /* FIXME: this should respect TPR */
418
                            svm_check_intercept(SVM_EXIT_VINTR);
419
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
420
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
421
                            do_interrupt(intno, 0, 0, 0, 1);
422
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
423
                            next_tb = 0;
424
#endif
425
                        }
426
                    }
427
#elif defined(TARGET_PPC)
428
#if 0
429
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
430
                        cpu_ppc_reset(env);
431
                    }
432
#endif
433
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
434
                        ppc_hw_interrupt(env);
435
                        if (env->pending_interrupts == 0)
436
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
437
                        next_tb = 0;
438
                    }
439
#elif defined(TARGET_MIPS)
440
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
441
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
442
                        (env->CP0_Status & (1 << CP0St_IE)) &&
443
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
444
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
445
                        !(env->hflags & MIPS_HFLAG_DM)) {
446
                        /* Raise it */
447
                        env->exception_index = EXCP_EXT_INTERRUPT;
448
                        env->error_code = 0;
449
                        do_interrupt(env);
450
                        next_tb = 0;
451
                    }
452
#elif defined(TARGET_SPARC)
453
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454
                        (env->psret != 0)) {
455
                        int pil = env->interrupt_index & 15;
456
                        int type = env->interrupt_index & 0xf0;
457

    
458
                        if (((type == TT_EXTINT) &&
459
                             (pil == 15 || pil > env->psrpil)) ||
460
                            type != TT_EXTINT) {
461
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
462
                            env->exception_index = env->interrupt_index;
463
                            do_interrupt(env);
464
                            env->interrupt_index = 0;
465
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
466
                            cpu_check_irqs(env);
467
#endif
468
                        next_tb = 0;
469
                        }
470
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
471
                        //do_interrupt(0, 0, 0, 0, 0);
472
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
473
                    }
474
#elif defined(TARGET_ARM)
475
                    if (interrupt_request & CPU_INTERRUPT_FIQ
476
                        && !(env->uncached_cpsr & CPSR_F)) {
477
                        env->exception_index = EXCP_FIQ;
478
                        do_interrupt(env);
479
                        next_tb = 0;
480
                    }
481
                    /* ARMv7-M interrupt return works by loading a magic value
482
                       into the PC.  On real hardware the load causes the
483
                       return to occur.  The qemu implementation performs the
484
                       jump normally, then does the exception return when the
485
                       CPU tries to execute code at the magic address.
486
                       This will cause the magic PC value to be pushed to
487
                       the stack if an interrupt occured at the wrong time.
488
                       We avoid this by disabling interrupts when
489
                       pc contains a magic address.  */
490
                    if (interrupt_request & CPU_INTERRUPT_HARD
491
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
492
                            || !(env->uncached_cpsr & CPSR_I))) {
493
                        env->exception_index = EXCP_IRQ;
494
                        do_interrupt(env);
495
                        next_tb = 0;
496
                    }
497
#elif defined(TARGET_SH4)
498
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
499
                        do_interrupt(env);
500
                        next_tb = 0;
501
                    }
502
#elif defined(TARGET_ALPHA)
503
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
504
                        do_interrupt(env);
505
                        next_tb = 0;
506
                    }
507
#elif defined(TARGET_CRIS)
508
                    if (interrupt_request & CPU_INTERRUPT_HARD
509
                        && (env->pregs[PR_CCS] & I_FLAG)) {
510
                        env->exception_index = EXCP_IRQ;
511
                        do_interrupt(env);
512
                        next_tb = 0;
513
                    }
514
                    if (interrupt_request & CPU_INTERRUPT_NMI
515
                        && (env->pregs[PR_CCS] & M_FLAG)) {
516
                        env->exception_index = EXCP_NMI;
517
                        do_interrupt(env);
518
                        next_tb = 0;
519
                    }
520
#elif defined(TARGET_M68K)
521
                    if (interrupt_request & CPU_INTERRUPT_HARD
522
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
523
                            < env->pending_level) {
524
                        /* Real hardware gets the interrupt vector via an
525
                           IACK cycle at this point.  Current emulated
526
                           hardware doesn't rely on this, so we
527
                           provide/save the vector when the interrupt is
528
                           first signalled.  */
529
                        env->exception_index = env->pending_vector;
530
                        do_interrupt(1);
531
                        next_tb = 0;
532
                    }
533
#endif
534
                   /* Don't use the cached interupt_request value,
535
                      do_interrupt may have updated the EXITTB flag. */
536
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
537
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
538
                        /* ensure that no TB jump will be modified as
539
                           the program flow was changed */
540
                        next_tb = 0;
541
                    }
542
                }
543
                if (unlikely(env->exit_request)) {
544
                    env->exit_request = 0;
545
                    env->exception_index = EXCP_INTERRUPT;
546
                    cpu_loop_exit();
547
                }
548
#ifdef DEBUG_EXEC
549
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
550
                    /* restore flags in standard format */
551
                    regs_to_env();
552
#if defined(TARGET_I386)
553
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
554
                    log_cpu_state(env, X86_DUMP_CCOP);
555
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
556
#elif defined(TARGET_ARM)
557
                    log_cpu_state(env, 0);
558
#elif defined(TARGET_SPARC)
559
                    log_cpu_state(env, 0);
560
#elif defined(TARGET_PPC)
561
                    log_cpu_state(env, 0);
562
#elif defined(TARGET_M68K)
563
                    cpu_m68k_flush_flags(env, env->cc_op);
564
                    env->cc_op = CC_OP_FLAGS;
565
                    env->sr = (env->sr & 0xffe0)
566
                              | env->cc_dest | (env->cc_x << 4);
567
                    log_cpu_state(env, 0);
568
#elif defined(TARGET_MIPS)
569
                    log_cpu_state(env, 0);
570
#elif defined(TARGET_SH4)
571
                    log_cpu_state(env, 0);
572
#elif defined(TARGET_ALPHA)
573
                    log_cpu_state(env, 0);
574
#elif defined(TARGET_CRIS)
575
                    log_cpu_state(env, 0);
576
#else
577
#error unsupported target CPU
578
#endif
579
                }
580
#endif
581
                spin_lock(&tb_lock);
582
                tb = tb_find_fast();
583
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
584
                   doing it in tb_find_slow */
585
                if (tb_invalidated_flag) {
586
                    /* as some TB could have been invalidated because
587
                       of memory exceptions while generating the code, we
588
                       must recompute the hash index here */
589
                    next_tb = 0;
590
                    tb_invalidated_flag = 0;
591
                }
592
#ifdef DEBUG_EXEC
593
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594
                             (long)tb->tc_ptr, tb->pc,
595
                             lookup_symbol(tb->pc));
596
#endif
597
                /* see if we can patch the calling TB. When the TB
598
                   spans two pages, we cannot safely do a direct
599
                   jump. */
600
                {
601
                    if (next_tb != 0 &&
602
#ifdef CONFIG_KQEMU
603
                        (env->kqemu_enabled != 2) &&
604
#endif
605
                        tb->page_addr[1] == -1) {
606
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
607
                }
608
                }
609
                spin_unlock(&tb_lock);
610
                env->current_tb = tb;
611

    
612
                /* cpu_interrupt might be called while translating the
613
                   TB, but before it is linked into a potentially
614
                   infinite loop and becomes env->current_tb. Avoid
615
                   starting execution if there is a pending interrupt. */
616
                if (unlikely (env->exit_request))
617
                    env->current_tb = NULL;
618

    
619
                while (env->current_tb) {
620
                    tc_ptr = tb->tc_ptr;
621
                /* execute the generated code */
622
#if defined(__sparc__) && !defined(HOST_SOLARIS)
623
#undef env
624
                    env = cpu_single_env;
625
#define env cpu_single_env
626
#endif
627
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
628
                    env->current_tb = NULL;
629
                    if ((next_tb & 3) == 2) {
630
                        /* Instruction counter expired.  */
631
                        int insns_left;
632
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
633
                        /* Restore PC.  */
634
                        cpu_pc_from_tb(env, tb);
635
                        insns_left = env->icount_decr.u32;
636
                        if (env->icount_extra && insns_left >= 0) {
637
                            /* Refill decrementer and continue execution.  */
638
                            env->icount_extra += insns_left;
639
                            if (env->icount_extra > 0xffff) {
640
                                insns_left = 0xffff;
641
                            } else {
642
                                insns_left = env->icount_extra;
643
                            }
644
                            env->icount_extra -= insns_left;
645
                            env->icount_decr.u16.low = insns_left;
646
                        } else {
647
                            if (insns_left > 0) {
648
                                /* Execute remaining instructions.  */
649
                                cpu_exec_nocache(insns_left, tb);
650
                            }
651
                            env->exception_index = EXCP_INTERRUPT;
652
                            next_tb = 0;
653
                            cpu_loop_exit();
654
                        }
655
                    }
656
                }
657
                /* reset soft MMU for next block (it can currently
658
                   only be set by a memory fault) */
659
#if defined(CONFIG_KQEMU)
660
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
661
                if (kqemu_is_ok(env) &&
662
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
663
                    cpu_loop_exit();
664
                }
665
#endif
666
            } /* for(;;) */
667
        } else {
668
            env_to_regs();
669
        }
670
    } /* for(;;) */
671

    
672

    
673
#if defined(TARGET_I386)
674
    /* restore flags in standard format */
675
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
676
#elif defined(TARGET_ARM)
677
    /* XXX: Save/restore host fpu exception state?.  */
678
#elif defined(TARGET_SPARC)
679
#elif defined(TARGET_PPC)
680
#elif defined(TARGET_M68K)
681
    cpu_m68k_flush_flags(env, env->cc_op);
682
    env->cc_op = CC_OP_FLAGS;
683
    env->sr = (env->sr & 0xffe0)
684
              | env->cc_dest | (env->cc_x << 4);
685
#elif defined(TARGET_MIPS)
686
#elif defined(TARGET_SH4)
687
#elif defined(TARGET_ALPHA)
688
#elif defined(TARGET_CRIS)
689
    /* XXXXX */
690
#else
691
#error unsupported target CPU
692
#endif
693

    
694
    /* restore global registers */
695
#include "hostregs_helper.h"
696

    
697
    /* fail safe : never use cpu_single_env outside cpu_exec() */
698
    cpu_single_env = NULL;
699
    return ret;
700
}
701

    
702
/* must only be called from the generated code as an exception can be
703
   generated */
704
void tb_invalidate_page_range(target_ulong start, target_ulong end)
705
{
706
    /* XXX: cannot enable it yet because it yields to MMU exception
707
       where NIP != read address on PowerPC */
708
#if 0
709
    target_ulong phys_addr;
710
    phys_addr = get_phys_addr_code(env, start);
711
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
712
#endif
713
}
714

    
715
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
716

    
717
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
718
{
719
    CPUX86State *saved_env;
720

    
721
    saved_env = env;
722
    env = s;
723
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
724
        selector &= 0xffff;
725
        cpu_x86_load_seg_cache(env, seg_reg, selector,
726
                               (selector << 4), 0xffff, 0);
727
    } else {
728
        helper_load_seg(seg_reg, selector);
729
    }
730
    env = saved_env;
731
}
732

    
733
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
734
{
735
    CPUX86State *saved_env;
736

    
737
    saved_env = env;
738
    env = s;
739

    
740
    helper_fsave(ptr, data32);
741

    
742
    env = saved_env;
743
}
744

    
745
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
746
{
747
    CPUX86State *saved_env;
748

    
749
    saved_env = env;
750
    env = s;
751

    
752
    helper_frstor(ptr, data32);
753

    
754
    env = saved_env;
755
}
756

    
757
#endif /* TARGET_I386 */
758

    
759
#if !defined(CONFIG_SOFTMMU)
760

    
761
#if defined(TARGET_I386)
762

    
763
/* 'pc' is the host PC at which the exception was raised. 'address' is
764
   the effective address of the memory exception. 'is_write' is 1 if a
765
   write caused the exception and otherwise 0'. 'old_set' is the
766
   signal set which should be restored */
767
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
768
                                    int is_write, sigset_t *old_set,
769
                                    void *puc)
770
{
771
    TranslationBlock *tb;
772
    int ret;
773

    
774
    if (cpu_single_env)
775
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
776
#if defined(DEBUG_SIGNAL)
777
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
778
                pc, address, is_write, *(unsigned long *)old_set);
779
#endif
780
    /* XXX: locking issue */
781
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
782
        return 1;
783
    }
784

    
785
    /* see if it is an MMU fault */
786
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
787
    if (ret < 0)
788
        return 0; /* not an MMU fault */
789
    if (ret == 0)
790
        return 1; /* the MMU fault was handled without causing real CPU fault */
791
    /* now we have a real cpu fault */
792
    tb = tb_find_pc(pc);
793
    if (tb) {
794
        /* the PC is inside the translated code. It means that we have
795
           a virtual CPU fault */
796
        cpu_restore_state(tb, env, pc, puc);
797
    }
798
    if (ret == 1) {
799
#if 0
800
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
801
               env->eip, env->cr[2], env->error_code);
802
#endif
803
        /* we restore the process signal mask as the sigreturn should
804
           do it (XXX: use sigsetjmp) */
805
        sigprocmask(SIG_SETMASK, old_set, NULL);
806
        raise_exception_err(env->exception_index, env->error_code);
807
    } else {
808
        /* activate soft MMU for this block */
809
        env->hflags |= HF_SOFTMMU_MASK;
810
        cpu_resume_from_signal(env, puc);
811
    }
812
    /* never comes here */
813
    return 1;
814
}
815

    
816
#elif defined(TARGET_ARM)
817
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
818
                                    int is_write, sigset_t *old_set,
819
                                    void *puc)
820
{
821
    TranslationBlock *tb;
822
    int ret;
823

    
824
    if (cpu_single_env)
825
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
826
#if defined(DEBUG_SIGNAL)
827
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
828
           pc, address, is_write, *(unsigned long *)old_set);
829
#endif
830
    /* XXX: locking issue */
831
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
832
        return 1;
833
    }
834
    /* see if it is an MMU fault */
835
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
836
    if (ret < 0)
837
        return 0; /* not an MMU fault */
838
    if (ret == 0)
839
        return 1; /* the MMU fault was handled without causing real CPU fault */
840
    /* now we have a real cpu fault */
841
    tb = tb_find_pc(pc);
842
    if (tb) {
843
        /* the PC is inside the translated code. It means that we have
844
           a virtual CPU fault */
845
        cpu_restore_state(tb, env, pc, puc);
846
    }
847
    /* we restore the process signal mask as the sigreturn should
848
       do it (XXX: use sigsetjmp) */
849
    sigprocmask(SIG_SETMASK, old_set, NULL);
850
    cpu_loop_exit();
851
    /* never comes here */
852
    return 1;
853
}
854
#elif defined(TARGET_SPARC)
855
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
856
                                    int is_write, sigset_t *old_set,
857
                                    void *puc)
858
{
859
    TranslationBlock *tb;
860
    int ret;
861

    
862
    if (cpu_single_env)
863
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
864
#if defined(DEBUG_SIGNAL)
865
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
866
           pc, address, is_write, *(unsigned long *)old_set);
867
#endif
868
    /* XXX: locking issue */
869
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
870
        return 1;
871
    }
872
    /* see if it is an MMU fault */
873
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
874
    if (ret < 0)
875
        return 0; /* not an MMU fault */
876
    if (ret == 0)
877
        return 1; /* the MMU fault was handled without causing real CPU fault */
878
    /* now we have a real cpu fault */
879
    tb = tb_find_pc(pc);
880
    if (tb) {
881
        /* the PC is inside the translated code. It means that we have
882
           a virtual CPU fault */
883
        cpu_restore_state(tb, env, pc, puc);
884
    }
885
    /* we restore the process signal mask as the sigreturn should
886
       do it (XXX: use sigsetjmp) */
887
    sigprocmask(SIG_SETMASK, old_set, NULL);
888
    cpu_loop_exit();
889
    /* never comes here */
890
    return 1;
891
}
892
#elif defined (TARGET_PPC)
893
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
894
                                    int is_write, sigset_t *old_set,
895
                                    void *puc)
896
{
897
    TranslationBlock *tb;
898
    int ret;
899

    
900
    if (cpu_single_env)
901
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
902
#if defined(DEBUG_SIGNAL)
903
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
904
           pc, address, is_write, *(unsigned long *)old_set);
905
#endif
906
    /* XXX: locking issue */
907
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
908
        return 1;
909
    }
910

    
911
    /* see if it is an MMU fault */
912
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
913
    if (ret < 0)
914
        return 0; /* not an MMU fault */
915
    if (ret == 0)
916
        return 1; /* the MMU fault was handled without causing real CPU fault */
917

    
918
    /* now we have a real cpu fault */
919
    tb = tb_find_pc(pc);
920
    if (tb) {
921
        /* the PC is inside the translated code. It means that we have
922
           a virtual CPU fault */
923
        cpu_restore_state(tb, env, pc, puc);
924
    }
925
    if (ret == 1) {
926
#if 0
927
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
928
               env->nip, env->error_code, tb);
929
#endif
930
    /* we restore the process signal mask as the sigreturn should
931
       do it (XXX: use sigsetjmp) */
932
        sigprocmask(SIG_SETMASK, old_set, NULL);
933
        cpu_loop_exit();
934
    } else {
935
        /* activate soft MMU for this block */
936
        cpu_resume_from_signal(env, puc);
937
    }
938
    /* never comes here */
939
    return 1;
940
}
941

    
942
#elif defined(TARGET_M68K)
943
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
944
                                    int is_write, sigset_t *old_set,
945
                                    void *puc)
946
{
947
    TranslationBlock *tb;
948
    int ret;
949

    
950
    if (cpu_single_env)
951
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
952
#if defined(DEBUG_SIGNAL)
953
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
954
           pc, address, is_write, *(unsigned long *)old_set);
955
#endif
956
    /* XXX: locking issue */
957
    if (is_write && page_unprotect(address, pc, puc)) {
958
        return 1;
959
    }
960
    /* see if it is an MMU fault */
961
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
962
    if (ret < 0)
963
        return 0; /* not an MMU fault */
964
    if (ret == 0)
965
        return 1; /* the MMU fault was handled without causing real CPU fault */
966
    /* now we have a real cpu fault */
967
    tb = tb_find_pc(pc);
968
    if (tb) {
969
        /* the PC is inside the translated code. It means that we have
970
           a virtual CPU fault */
971
        cpu_restore_state(tb, env, pc, puc);
972
    }
973
    /* we restore the process signal mask as the sigreturn should
974
       do it (XXX: use sigsetjmp) */
975
    sigprocmask(SIG_SETMASK, old_set, NULL);
976
    cpu_loop_exit();
977
    /* never comes here */
978
    return 1;
979
}
980

    
981
#elif defined (TARGET_MIPS)
982
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
983
                                    int is_write, sigset_t *old_set,
984
                                    void *puc)
985
{
986
    TranslationBlock *tb;
987
    int ret;
988

    
989
    if (cpu_single_env)
990
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
991
#if defined(DEBUG_SIGNAL)
992
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
993
           pc, address, is_write, *(unsigned long *)old_set);
994
#endif
995
    /* XXX: locking issue */
996
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
997
        return 1;
998
    }
999

    
1000
    /* see if it is an MMU fault */
1001
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1002
    if (ret < 0)
1003
        return 0; /* not an MMU fault */
1004
    if (ret == 0)
1005
        return 1; /* the MMU fault was handled without causing real CPU fault */
1006

    
1007
    /* now we have a real cpu fault */
1008
    tb = tb_find_pc(pc);
1009
    if (tb) {
1010
        /* the PC is inside the translated code. It means that we have
1011
           a virtual CPU fault */
1012
        cpu_restore_state(tb, env, pc, puc);
1013
    }
1014
    if (ret == 1) {
1015
#if 0
1016
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1017
               env->PC, env->error_code, tb);
1018
#endif
1019
    /* we restore the process signal mask as the sigreturn should
1020
       do it (XXX: use sigsetjmp) */
1021
        sigprocmask(SIG_SETMASK, old_set, NULL);
1022
        cpu_loop_exit();
1023
    } else {
1024
        /* activate soft MMU for this block */
1025
        cpu_resume_from_signal(env, puc);
1026
    }
1027
    /* never comes here */
1028
    return 1;
1029
}
1030

    
1031
#elif defined (TARGET_SH4)
1032
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1033
                                    int is_write, sigset_t *old_set,
1034
                                    void *puc)
1035
{
1036
    TranslationBlock *tb;
1037
    int ret;
1038

    
1039
    if (cpu_single_env)
1040
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1041
#if defined(DEBUG_SIGNAL)
1042
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1043
           pc, address, is_write, *(unsigned long *)old_set);
1044
#endif
1045
    /* XXX: locking issue */
1046
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1047
        return 1;
1048
    }
1049

    
1050
    /* see if it is an MMU fault */
1051
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1052
    if (ret < 0)
1053
        return 0; /* not an MMU fault */
1054
    if (ret == 0)
1055
        return 1; /* the MMU fault was handled without causing real CPU fault */
1056

    
1057
    /* now we have a real cpu fault */
1058
    tb = tb_find_pc(pc);
1059
    if (tb) {
1060
        /* the PC is inside the translated code. It means that we have
1061
           a virtual CPU fault */
1062
        cpu_restore_state(tb, env, pc, puc);
1063
    }
1064
#if 0
1065
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1066
               env->nip, env->error_code, tb);
1067
#endif
1068
    /* we restore the process signal mask as the sigreturn should
1069
       do it (XXX: use sigsetjmp) */
1070
    sigprocmask(SIG_SETMASK, old_set, NULL);
1071
    cpu_loop_exit();
1072
    /* never comes here */
1073
    return 1;
1074
}
1075

    
1076
#elif defined (TARGET_ALPHA)
1077
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078
                                    int is_write, sigset_t *old_set,
1079
                                    void *puc)
1080
{
1081
    TranslationBlock *tb;
1082
    int ret;
1083

    
1084
    if (cpu_single_env)
1085
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086
#if defined(DEBUG_SIGNAL)
1087
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088
           pc, address, is_write, *(unsigned long *)old_set);
1089
#endif
1090
    /* XXX: locking issue */
1091
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092
        return 1;
1093
    }
1094

    
1095
    /* see if it is an MMU fault */
1096
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097
    if (ret < 0)
1098
        return 0; /* not an MMU fault */
1099
    if (ret == 0)
1100
        return 1; /* the MMU fault was handled without causing real CPU fault */
1101

    
1102
    /* now we have a real cpu fault */
1103
    tb = tb_find_pc(pc);
1104
    if (tb) {
1105
        /* the PC is inside the translated code. It means that we have
1106
           a virtual CPU fault */
1107
        cpu_restore_state(tb, env, pc, puc);
1108
    }
1109
#if 0
1110
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111
               env->nip, env->error_code, tb);
1112
#endif
1113
    /* we restore the process signal mask as the sigreturn should
1114
       do it (XXX: use sigsetjmp) */
1115
    sigprocmask(SIG_SETMASK, old_set, NULL);
1116
    cpu_loop_exit();
1117
    /* never comes here */
1118
    return 1;
1119
}
1120
#elif defined (TARGET_CRIS)
1121
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1122
                                    int is_write, sigset_t *old_set,
1123
                                    void *puc)
1124
{
1125
    TranslationBlock *tb;
1126
    int ret;
1127

    
1128
    if (cpu_single_env)
1129
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1130
#if defined(DEBUG_SIGNAL)
1131
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132
           pc, address, is_write, *(unsigned long *)old_set);
1133
#endif
1134
    /* XXX: locking issue */
1135
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1136
        return 1;
1137
    }
1138

    
1139
    /* see if it is an MMU fault */
1140
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1141
    if (ret < 0)
1142
        return 0; /* not an MMU fault */
1143
    if (ret == 0)
1144
        return 1; /* the MMU fault was handled without causing real CPU fault */
1145

    
1146
    /* now we have a real cpu fault */
1147
    tb = tb_find_pc(pc);
1148
    if (tb) {
1149
        /* the PC is inside the translated code. It means that we have
1150
           a virtual CPU fault */
1151
        cpu_restore_state(tb, env, pc, puc);
1152
    }
1153
    /* we restore the process signal mask as the sigreturn should
1154
       do it (XXX: use sigsetjmp) */
1155
    sigprocmask(SIG_SETMASK, old_set, NULL);
1156
    cpu_loop_exit();
1157
    /* never comes here */
1158
    return 1;
1159
}
1160

    
1161
#else
1162
#error unsupported target CPU
1163
#endif
1164

    
1165
#if defined(__i386__)
1166

    
1167
#if defined(__APPLE__)
1168
# include <sys/ucontext.h>
1169

    
1170
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1171
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1172
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1173
# define MASK_sig(context)    ((context)->uc_sigmask)
1174
#elif defined(__OpenBSD__)
1175
# define EIP_sig(context)     ((context)->sc_eip)
1176
# define TRAP_sig(context)    ((context)->sc_trapno)
1177
# define ERROR_sig(context)   ((context)->sc_err)
1178
# define MASK_sig(context)    ((context)->sc_mask)
1179
#else
1180
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1181
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1182
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1183
# define MASK_sig(context)    ((context)->uc_sigmask)
1184
#endif
1185

    
1186
int cpu_signal_handler(int host_signum, void *pinfo,
1187
                       void *puc)
1188
{
1189
    siginfo_t *info = pinfo;
1190
#if defined(__OpenBSD__)
1191
    struct sigcontext *uc = puc;
1192
#else
1193
    struct ucontext *uc = puc;
1194
#endif
1195
    unsigned long pc;
1196
    int trapno;
1197

    
1198
#ifndef REG_EIP
1199
/* for glibc 2.1 */
1200
#define REG_EIP    EIP
1201
#define REG_ERR    ERR
1202
#define REG_TRAPNO TRAPNO
1203
#endif
1204
    pc = EIP_sig(uc);
1205
    trapno = TRAP_sig(uc);
1206
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1207
                             trapno == 0xe ?
1208
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1209
                             &MASK_sig(uc), puc);
1210
}
1211

    
1212
#elif defined(__x86_64__)
1213

    
1214
#ifdef __NetBSD__
1215
#define PC_sig(context)       _UC_MACHINE_PC(context)
1216
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1217
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
1218
#define MASK_sig(context)     ((context)->uc_sigmask)
1219
#elif defined(__OpenBSD__)
1220
#define PC_sig(context)       ((context)->sc_rip)
1221
#define TRAP_sig(context)     ((context)->sc_trapno)
1222
#define ERROR_sig(context)    ((context)->sc_err)
1223
#define MASK_sig(context)     ((context)->sc_mask)
1224
#else
1225
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
1226
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
1227
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
1228
#define MASK_sig(context)     ((context)->uc_sigmask)
1229
#endif
1230

    
1231
int cpu_signal_handler(int host_signum, void *pinfo,
1232
                       void *puc)
1233
{
1234
    siginfo_t *info = pinfo;
1235
    unsigned long pc;
1236
#ifdef __NetBSD__
1237
    ucontext_t *uc = puc;
1238
#elif defined(__OpenBSD__)
1239
    struct sigcontext *uc = puc;
1240
#else
1241
    struct ucontext *uc = puc;
1242
#endif
1243

    
1244
    pc = PC_sig(uc);
1245
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1246
                             TRAP_sig(uc) == 0xe ?
1247
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1248
                             &MASK_sig(uc), puc);
1249
}
1250

    
1251
#elif defined(_ARCH_PPC)
1252

    
1253
/***********************************************************************
1254
 * signal context platform-specific definitions
1255
 * From Wine
1256
 */
1257
#ifdef linux
1258
/* All Registers access - only for local access */
1259
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1260
/* Gpr Registers access  */
1261
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1262
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1263
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1264
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1265
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1266
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1267
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1268
/* Float Registers access  */
1269
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1270
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1271
/* Exception Registers access */
1272
# define DAR_sig(context)                        REG_sig(dar, context)
1273
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1274
# define TRAP_sig(context)                        REG_sig(trap, context)
1275
#endif /* linux */
1276

    
1277
#ifdef __APPLE__
1278
# include <sys/ucontext.h>
1279
typedef struct ucontext SIGCONTEXT;
1280
/* All Registers access - only for local access */
1281
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1282
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1283
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1284
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1285
/* Gpr Registers access */
1286
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1287
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1288
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1289
# define CTR_sig(context)                        REG_sig(ctr, context)
1290
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1291
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1292
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1293
/* Float Registers access */
1294
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1295
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1296
/* Exception Registers access */
1297
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1298
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1299
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1300
#endif /* __APPLE__ */
1301

    
1302
int cpu_signal_handler(int host_signum, void *pinfo,
1303
                       void *puc)
1304
{
1305
    siginfo_t *info = pinfo;
1306
    struct ucontext *uc = puc;
1307
    unsigned long pc;
1308
    int is_write;
1309

    
1310
    pc = IAR_sig(uc);
1311
    is_write = 0;
1312
#if 0
1313
    /* ppc 4xx case */
1314
    if (DSISR_sig(uc) & 0x00800000)
1315
        is_write = 1;
1316
#else
1317
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1318
        is_write = 1;
1319
#endif
1320
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1321
                             is_write, &uc->uc_sigmask, puc);
1322
}
1323

    
1324
#elif defined(__alpha__)
1325

    
1326
int cpu_signal_handler(int host_signum, void *pinfo,
1327
                           void *puc)
1328
{
1329
    siginfo_t *info = pinfo;
1330
    struct ucontext *uc = puc;
1331
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1332
    uint32_t insn = *pc;
1333
    int is_write = 0;
1334

    
1335
    /* XXX: need kernel patch to get write flag faster */
1336
    switch (insn >> 26) {
1337
    case 0x0d: // stw
1338
    case 0x0e: // stb
1339
    case 0x0f: // stq_u
1340
    case 0x24: // stf
1341
    case 0x25: // stg
1342
    case 0x26: // sts
1343
    case 0x27: // stt
1344
    case 0x2c: // stl
1345
    case 0x2d: // stq
1346
    case 0x2e: // stl_c
1347
    case 0x2f: // stq_c
1348
        is_write = 1;
1349
    }
1350

    
1351
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1352
                             is_write, &uc->uc_sigmask, puc);
1353
}
1354
#elif defined(__sparc__)
1355

    
1356
int cpu_signal_handler(int host_signum, void *pinfo,
1357
                       void *puc)
1358
{
1359
    siginfo_t *info = pinfo;
1360
    int is_write;
1361
    uint32_t insn;
1362
#if !defined(__arch64__) || defined(HOST_SOLARIS)
1363
    uint32_t *regs = (uint32_t *)(info + 1);
1364
    void *sigmask = (regs + 20);
1365
    /* XXX: is there a standard glibc define ? */
1366
    unsigned long pc = regs[1];
1367
#else
1368
#ifdef __linux__
1369
    struct sigcontext *sc = puc;
1370
    unsigned long pc = sc->sigc_regs.tpc;
1371
    void *sigmask = (void *)sc->sigc_mask;
1372
#elif defined(__OpenBSD__)
1373
    struct sigcontext *uc = puc;
1374
    unsigned long pc = uc->sc_pc;
1375
    void *sigmask = (void *)(long)uc->sc_mask;
1376
#endif
1377
#endif
1378

    
1379
    /* XXX: need kernel patch to get write flag faster */
1380
    is_write = 0;
1381
    insn = *(uint32_t *)pc;
1382
    if ((insn >> 30) == 3) {
1383
      switch((insn >> 19) & 0x3f) {
1384
      case 0x05: // stb
1385
      case 0x06: // sth
1386
      case 0x04: // st
1387
      case 0x07: // std
1388
      case 0x24: // stf
1389
      case 0x27: // stdf
1390
      case 0x25: // stfsr
1391
        is_write = 1;
1392
        break;
1393
      }
1394
    }
1395
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1396
                             is_write, sigmask, NULL);
1397
}
1398

    
1399
#elif defined(__arm__)
1400

    
1401
int cpu_signal_handler(int host_signum, void *pinfo,
1402
                       void *puc)
1403
{
1404
    siginfo_t *info = pinfo;
1405
    struct ucontext *uc = puc;
1406
    unsigned long pc;
1407
    int is_write;
1408

    
1409
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1410
    pc = uc->uc_mcontext.gregs[R15];
1411
#else
1412
    pc = uc->uc_mcontext.arm_pc;
1413
#endif
1414
    /* XXX: compute is_write */
1415
    is_write = 0;
1416
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1417
                             is_write,
1418
                             &uc->uc_sigmask, puc);
1419
}
1420

    
1421
#elif defined(__mc68000)
1422

    
1423
int cpu_signal_handler(int host_signum, void *pinfo,
1424
                       void *puc)
1425
{
1426
    siginfo_t *info = pinfo;
1427
    struct ucontext *uc = puc;
1428
    unsigned long pc;
1429
    int is_write;
1430

    
1431
    pc = uc->uc_mcontext.gregs[16];
1432
    /* XXX: compute is_write */
1433
    is_write = 0;
1434
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435
                             is_write,
1436
                             &uc->uc_sigmask, puc);
1437
}
1438

    
1439
#elif defined(__ia64)
1440

    
1441
#ifndef __ISR_VALID
1442
  /* This ought to be in <bits/siginfo.h>... */
1443
# define __ISR_VALID        1
1444
#endif
1445

    
1446
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1447
{
1448
    siginfo_t *info = pinfo;
1449
    struct ucontext *uc = puc;
1450
    unsigned long ip;
1451
    int is_write = 0;
1452

    
1453
    ip = uc->uc_mcontext.sc_ip;
1454
    switch (host_signum) {
1455
      case SIGILL:
1456
      case SIGFPE:
1457
      case SIGSEGV:
1458
      case SIGBUS:
1459
      case SIGTRAP:
1460
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1461
              /* ISR.W (write-access) is bit 33:  */
1462
              is_write = (info->si_isr >> 33) & 1;
1463
          break;
1464

    
1465
      default:
1466
          break;
1467
    }
1468
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1469
                             is_write,
1470
                             &uc->uc_sigmask, puc);
1471
}
1472

    
1473
#elif defined(__s390__)
1474

    
1475
int cpu_signal_handler(int host_signum, void *pinfo,
1476
                       void *puc)
1477
{
1478
    siginfo_t *info = pinfo;
1479
    struct ucontext *uc = puc;
1480
    unsigned long pc;
1481
    int is_write;
1482

    
1483
    pc = uc->uc_mcontext.psw.addr;
1484
    /* XXX: compute is_write */
1485
    is_write = 0;
1486
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1487
                             is_write, &uc->uc_sigmask, puc);
1488
}
1489

    
1490
#elif defined(__mips__)
1491

    
1492
int cpu_signal_handler(int host_signum, void *pinfo,
1493
                       void *puc)
1494
{
1495
    siginfo_t *info = pinfo;
1496
    struct ucontext *uc = puc;
1497
    greg_t pc = uc->uc_mcontext.pc;
1498
    int is_write;
1499

    
1500
    /* XXX: compute is_write */
1501
    is_write = 0;
1502
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1503
                             is_write, &uc->uc_sigmask, puc);
1504
}
1505

    
1506
#elif defined(__hppa__)
1507

    
1508
int cpu_signal_handler(int host_signum, void *pinfo,
1509
                       void *puc)
1510
{
1511
    struct siginfo *info = pinfo;
1512
    struct ucontext *uc = puc;
1513
    unsigned long pc;
1514
    int is_write;
1515

    
1516
    pc = uc->uc_mcontext.sc_iaoq[0];
1517
    /* FIXME: compute is_write */
1518
    is_write = 0;
1519
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1520
                             is_write,
1521
                             &uc->uc_sigmask, puc);
1522
}
1523

    
1524
#else
1525

    
1526
#error host CPU specific signal handler needed
1527

    
1528
#endif
1529

    
1530
#endif /* !defined(CONFIG_SOFTMMU) */