Statistics
| Branch: | Revision:

root / cpu-exec.c @ c0ce998e

History | View | Annotate | Download (50.8 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#define CPU_NO_GLOBAL_REGS
22
#include "exec.h"
23
#include "disas.h"
24
#include "tcg.h"
25
#include "kvm.h"
26

    
27
#if !defined(CONFIG_SOFTMMU)
28
#undef EAX
29
#undef ECX
30
#undef EDX
31
#undef EBX
32
#undef ESP
33
#undef EBP
34
#undef ESI
35
#undef EDI
36
#undef EIP
37
#include <signal.h>
38
#ifdef __linux__
39
#include <sys/ucontext.h>
40
#endif
41
#endif
42

    
43
#if defined(__sparc__) && !defined(HOST_SOLARIS)
44
// Work around ugly bugs in glibc that mangle global register contents
45
#undef env
46
#define env cpu_single_env
47
#endif
48

    
49
int tb_invalidated_flag;
50

    
51
//#define DEBUG_EXEC
52
//#define DEBUG_SIGNAL
53

    
54
void cpu_loop_exit(void)
55
{
56
    /* NOTE: the register at this point must be saved by hand because
57
       longjmp restore them */
58
    regs_to_env();
59
    longjmp(env->jmp_env, 1);
60
}
61

    
62
/* exit the current TB from a signal handler. The host registers are
63
   restored in a state compatible with the CPU emulator
64
 */
65
void cpu_resume_from_signal(CPUState *env1, void *puc)
66
{
67
#if !defined(CONFIG_SOFTMMU)
68
#ifdef __linux__
69
    struct ucontext *uc = puc;
70
#elif defined(__OpenBSD__)
71
    struct sigcontext *uc = puc;
72
#endif
73
#endif
74

    
75
    env = env1;
76

    
77
    /* XXX: restore cpu registers saved in host registers */
78

    
79
#if !defined(CONFIG_SOFTMMU)
80
    if (puc) {
81
        /* XXX: use siglongjmp ? */
82
#ifdef __linux__
83
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84
#elif defined(__OpenBSD__)
85
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86
#endif
87
    }
88
#endif
89
    longjmp(env->jmp_env, 1);
90
}
91

    
92
/* Execute the code without caching the generated code. An interpreter
93
   could be used if available. */
94
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
95
{
96
    unsigned long next_tb;
97
    TranslationBlock *tb;
98

    
99
    /* Should never happen.
100
       We only end up here when an existing TB is too long.  */
101
    if (max_cycles > CF_COUNT_MASK)
102
        max_cycles = CF_COUNT_MASK;
103

    
104
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105
                     max_cycles);
106
    env->current_tb = tb;
107
    /* execute the generated code */
108
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
109

    
110
    if ((next_tb & 3) == 2) {
111
        /* Restore PC.  This may happen if async event occurs before
112
           the TB starts executing.  */
113
        cpu_pc_from_tb(env, tb);
114
    }
115
    tb_phys_invalidate(tb, -1);
116
    tb_free(tb);
117
}
118

    
119
static TranslationBlock *tb_find_slow(target_ulong pc,
120
                                      target_ulong cs_base,
121
                                      uint64_t flags)
122
{
123
    TranslationBlock *tb, **ptb1;
124
    unsigned int h;
125
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
126

    
127
    tb_invalidated_flag = 0;
128

    
129
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
130

    
131
    /* find translated block using physical mappings */
132
    phys_pc = get_phys_addr_code(env, pc);
133
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
134
    phys_page2 = -1;
135
    h = tb_phys_hash_func(phys_pc);
136
    ptb1 = &tb_phys_hash[h];
137
    for(;;) {
138
        tb = *ptb1;
139
        if (!tb)
140
            goto not_found;
141
        if (tb->pc == pc &&
142
            tb->page_addr[0] == phys_page1 &&
143
            tb->cs_base == cs_base &&
144
            tb->flags == flags) {
145
            /* check next page if needed */
146
            if (tb->page_addr[1] != -1) {
147
                virt_page2 = (pc & TARGET_PAGE_MASK) +
148
                    TARGET_PAGE_SIZE;
149
                phys_page2 = get_phys_addr_code(env, virt_page2);
150
                if (tb->page_addr[1] == phys_page2)
151
                    goto found;
152
            } else {
153
                goto found;
154
            }
155
        }
156
        ptb1 = &tb->phys_hash_next;
157
    }
158
 not_found:
159
   /* if no translated code available, then translate it now */
160
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
161

    
162
 found:
163
    /* we add the TB in the virtual pc hash table */
164
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
165
    return tb;
166
}
167

    
168
static inline TranslationBlock *tb_find_fast(void)
169
{
170
    TranslationBlock *tb;
171
    target_ulong cs_base, pc;
172
    int flags;
173

    
174
    /* we record a subset of the CPU state. It will
175
       always be the same before a given translated block
176
       is executed. */
177
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
178
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
179
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180
                 tb->flags != flags)) {
181
        tb = tb_find_slow(pc, cs_base, flags);
182
    }
183
    return tb;
184
}
185

    
186
static CPUDebugExcpHandler *debug_excp_handler;
187

    
188
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
189
{
190
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
191

    
192
    debug_excp_handler = handler;
193
    return old_handler;
194
}
195

    
196
static void cpu_handle_debug_exception(CPUState *env)
197
{
198
    CPUWatchpoint *wp;
199

    
200
    if (!env->watchpoint_hit)
201
        TAILQ_FOREACH(wp, &env->watchpoints, entry)
202
            wp->flags &= ~BP_WATCHPOINT_HIT;
203

    
204
    if (debug_excp_handler)
205
        debug_excp_handler(env);
206
}
207

    
208
/* main execution loop */
209

    
210
int cpu_exec(CPUState *env1)
211
{
212
#define DECLARE_HOST_REGS 1
213
#include "hostregs_helper.h"
214
    int ret, interrupt_request;
215
    TranslationBlock *tb;
216
    uint8_t *tc_ptr;
217
    unsigned long next_tb;
218

    
219
    if (cpu_halted(env1) == EXCP_HALTED)
220
        return EXCP_HALTED;
221

    
222
    cpu_single_env = env1;
223

    
224
    /* first we save global registers */
225
#define SAVE_HOST_REGS 1
226
#include "hostregs_helper.h"
227
    env = env1;
228

    
229
    env_to_regs();
230
#if defined(TARGET_I386)
231
    /* put eflags in CPU temporary format */
232
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
234
    CC_OP = CC_OP_EFLAGS;
235
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236
#elif defined(TARGET_SPARC)
237
#elif defined(TARGET_M68K)
238
    env->cc_op = CC_OP_FLAGS;
239
    env->cc_dest = env->sr & 0xf;
240
    env->cc_x = (env->sr >> 4) & 1;
241
#elif defined(TARGET_ALPHA)
242
#elif defined(TARGET_ARM)
243
#elif defined(TARGET_PPC)
244
#elif defined(TARGET_MIPS)
245
#elif defined(TARGET_SH4)
246
#elif defined(TARGET_CRIS)
247
    /* XXXXX */
248
#else
249
#error unsupported target CPU
250
#endif
251
    env->exception_index = -1;
252

    
253
    /* prepare setjmp context for exception handling */
254
    for(;;) {
255
        if (setjmp(env->jmp_env) == 0) {
256
            env->current_tb = NULL;
257
            /* if an exception is pending, we execute it here */
258
            if (env->exception_index >= 0) {
259
                if (env->exception_index >= EXCP_INTERRUPT) {
260
                    /* exit request from the cpu execution loop */
261
                    ret = env->exception_index;
262
                    if (ret == EXCP_DEBUG)
263
                        cpu_handle_debug_exception(env);
264
                    break;
265
                } else if (env->user_mode_only) {
266
                    /* if user mode only, we simulate a fake exception
267
                       which will be handled outside the cpu execution
268
                       loop */
269
#if defined(TARGET_I386)
270
                    do_interrupt_user(env->exception_index,
271
                                      env->exception_is_int,
272
                                      env->error_code,
273
                                      env->exception_next_eip);
274
                    /* successfully delivered */
275
                    env->old_exception = -1;
276
#endif
277
                    ret = env->exception_index;
278
                    break;
279
                } else {
280
#if defined(TARGET_I386)
281
                    /* simulate a real cpu exception. On i386, it can
282
                       trigger new exceptions, but we do not handle
283
                       double or triple faults yet. */
284
                    do_interrupt(env->exception_index,
285
                                 env->exception_is_int,
286
                                 env->error_code,
287
                                 env->exception_next_eip, 0);
288
                    /* successfully delivered */
289
                    env->old_exception = -1;
290
#elif defined(TARGET_PPC)
291
                    do_interrupt(env);
292
#elif defined(TARGET_MIPS)
293
                    do_interrupt(env);
294
#elif defined(TARGET_SPARC)
295
                    do_interrupt(env);
296
#elif defined(TARGET_ARM)
297
                    do_interrupt(env);
298
#elif defined(TARGET_SH4)
299
                    do_interrupt(env);
300
#elif defined(TARGET_ALPHA)
301
                    do_interrupt(env);
302
#elif defined(TARGET_CRIS)
303
                    do_interrupt(env);
304
#elif defined(TARGET_M68K)
305
                    do_interrupt(0);
306
#endif
307
                }
308
                env->exception_index = -1;
309
            }
310
#ifdef USE_KQEMU
311
            if (kqemu_is_ok(env) && env->interrupt_request == 0) {
312
                int ret;
313
                env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
314
                ret = kqemu_cpu_exec(env);
315
                /* put eflags in CPU temporary format */
316
                CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
317
                DF = 1 - (2 * ((env->eflags >> 10) & 1));
318
                CC_OP = CC_OP_EFLAGS;
319
                env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
320
                if (ret == 1) {
321
                    /* exception */
322
                    longjmp(env->jmp_env, 1);
323
                } else if (ret == 2) {
324
                    /* softmmu execution needed */
325
                } else {
326
                    if (env->interrupt_request != 0) {
327
                        /* hardware interrupt will be executed just after */
328
                    } else {
329
                        /* otherwise, we restart */
330
                        longjmp(env->jmp_env, 1);
331
                    }
332
                }
333
            }
334
#endif
335

    
336
            if (kvm_enabled()) {
337
                kvm_cpu_exec(env);
338
                longjmp(env->jmp_env, 1);
339
            }
340

    
341
            next_tb = 0; /* force lookup of first TB */
342
            for(;;) {
343
                interrupt_request = env->interrupt_request;
344
                if (unlikely(interrupt_request)) {
345
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
346
                        /* Mask out external interrupts for this step. */
347
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
348
                                               CPU_INTERRUPT_FIQ |
349
                                               CPU_INTERRUPT_SMI |
350
                                               CPU_INTERRUPT_NMI);
351
                    }
352
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
353
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
354
                        env->exception_index = EXCP_DEBUG;
355
                        cpu_loop_exit();
356
                    }
357
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
358
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
359
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
360
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
361
                        env->halted = 1;
362
                        env->exception_index = EXCP_HLT;
363
                        cpu_loop_exit();
364
                    }
365
#endif
366
#if defined(TARGET_I386)
367
                    if (env->hflags2 & HF2_GIF_MASK) {
368
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
369
                            !(env->hflags & HF_SMM_MASK)) {
370
                            svm_check_intercept(SVM_EXIT_SMI);
371
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
372
                            do_smm_enter();
373
                            next_tb = 0;
374
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
375
                                   !(env->hflags2 & HF2_NMI_MASK)) {
376
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
377
                            env->hflags2 |= HF2_NMI_MASK;
378
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
379
                            next_tb = 0;
380
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
381
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
382
                                     (env->hflags2 & HF2_HIF_MASK)) ||
383
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
384
                                     (env->eflags & IF_MASK && 
385
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
386
                            int intno;
387
                            svm_check_intercept(SVM_EXIT_INTR);
388
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
389
                            intno = cpu_get_pic_interrupt(env);
390
                            if (loglevel & CPU_LOG_TB_IN_ASM) {
391
                                fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
392
                            }
393
                            do_interrupt(intno, 0, 0, 0, 1);
394
                            /* ensure that no TB jump will be modified as
395
                               the program flow was changed */
396
                            next_tb = 0;
397
#if !defined(CONFIG_USER_ONLY)
398
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
399
                                   (env->eflags & IF_MASK) && 
400
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
401
                            int intno;
402
                            /* FIXME: this should respect TPR */
403
                            svm_check_intercept(SVM_EXIT_VINTR);
404
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
405
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
406
                            if (loglevel & CPU_LOG_TB_IN_ASM)
407
                                fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
408
                            do_interrupt(intno, 0, 0, 0, 1);
409
                            next_tb = 0;
410
#endif
411
                        }
412
                    }
413
#elif defined(TARGET_PPC)
414
#if 0
415
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
416
                        cpu_ppc_reset(env);
417
                    }
418
#endif
419
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
420
                        ppc_hw_interrupt(env);
421
                        if (env->pending_interrupts == 0)
422
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
423
                        next_tb = 0;
424
                    }
425
#elif defined(TARGET_MIPS)
426
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
427
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
428
                        (env->CP0_Status & (1 << CP0St_IE)) &&
429
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
430
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
431
                        !(env->hflags & MIPS_HFLAG_DM)) {
432
                        /* Raise it */
433
                        env->exception_index = EXCP_EXT_INTERRUPT;
434
                        env->error_code = 0;
435
                        do_interrupt(env);
436
                        next_tb = 0;
437
                    }
438
#elif defined(TARGET_SPARC)
439
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440
                        (env->psret != 0)) {
441
                        int pil = env->interrupt_index & 15;
442
                        int type = env->interrupt_index & 0xf0;
443

    
444
                        if (((type == TT_EXTINT) &&
445
                             (pil == 15 || pil > env->psrpil)) ||
446
                            type != TT_EXTINT) {
447
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
448
                            env->exception_index = env->interrupt_index;
449
                            do_interrupt(env);
450
                            env->interrupt_index = 0;
451
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
452
                            cpu_check_irqs(env);
453
#endif
454
                        next_tb = 0;
455
                        }
456
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
457
                        //do_interrupt(0, 0, 0, 0, 0);
458
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
459
                    }
460
#elif defined(TARGET_ARM)
461
                    if (interrupt_request & CPU_INTERRUPT_FIQ
462
                        && !(env->uncached_cpsr & CPSR_F)) {
463
                        env->exception_index = EXCP_FIQ;
464
                        do_interrupt(env);
465
                        next_tb = 0;
466
                    }
467
                    /* ARMv7-M interrupt return works by loading a magic value
468
                       into the PC.  On real hardware the load causes the
469
                       return to occur.  The qemu implementation performs the
470
                       jump normally, then does the exception return when the
471
                       CPU tries to execute code at the magic address.
472
                       This will cause the magic PC value to be pushed to
473
                       the stack if an interrupt occured at the wrong time.
474
                       We avoid this by disabling interrupts when
475
                       pc contains a magic address.  */
476
                    if (interrupt_request & CPU_INTERRUPT_HARD
477
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
478
                            || !(env->uncached_cpsr & CPSR_I))) {
479
                        env->exception_index = EXCP_IRQ;
480
                        do_interrupt(env);
481
                        next_tb = 0;
482
                    }
483
#elif defined(TARGET_SH4)
484
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
485
                        do_interrupt(env);
486
                        next_tb = 0;
487
                    }
488
#elif defined(TARGET_ALPHA)
489
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
490
                        do_interrupt(env);
491
                        next_tb = 0;
492
                    }
493
#elif defined(TARGET_CRIS)
494
                    if (interrupt_request & CPU_INTERRUPT_HARD
495
                        && (env->pregs[PR_CCS] & I_FLAG)) {
496
                        env->exception_index = EXCP_IRQ;
497
                        do_interrupt(env);
498
                        next_tb = 0;
499
                    }
500
                    if (interrupt_request & CPU_INTERRUPT_NMI
501
                        && (env->pregs[PR_CCS] & M_FLAG)) {
502
                        env->exception_index = EXCP_NMI;
503
                        do_interrupt(env);
504
                        next_tb = 0;
505
                    }
506
#elif defined(TARGET_M68K)
507
                    if (interrupt_request & CPU_INTERRUPT_HARD
508
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
509
                            < env->pending_level) {
510
                        /* Real hardware gets the interrupt vector via an
511
                           IACK cycle at this point.  Current emulated
512
                           hardware doesn't rely on this, so we
513
                           provide/save the vector when the interrupt is
514
                           first signalled.  */
515
                        env->exception_index = env->pending_vector;
516
                        do_interrupt(1);
517
                        next_tb = 0;
518
                    }
519
#endif
520
                   /* Don't use the cached interupt_request value,
521
                      do_interrupt may have updated the EXITTB flag. */
522
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
523
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
524
                        /* ensure that no TB jump will be modified as
525
                           the program flow was changed */
526
                        next_tb = 0;
527
                    }
528
                    if (interrupt_request & CPU_INTERRUPT_EXIT) {
529
                        env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
530
                        env->exception_index = EXCP_INTERRUPT;
531
                        cpu_loop_exit();
532
                    }
533
                }
534
#ifdef DEBUG_EXEC
535
                if ((loglevel & CPU_LOG_TB_CPU)) {
536
                    /* restore flags in standard format */
537
                    regs_to_env();
538
#if defined(TARGET_I386)
539
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
540
                    cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
541
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
542
#elif defined(TARGET_ARM)
543
                    cpu_dump_state(env, logfile, fprintf, 0);
544
#elif defined(TARGET_SPARC)
545
                    cpu_dump_state(env, logfile, fprintf, 0);
546
#elif defined(TARGET_PPC)
547
                    cpu_dump_state(env, logfile, fprintf, 0);
548
#elif defined(TARGET_M68K)
549
                    cpu_m68k_flush_flags(env, env->cc_op);
550
                    env->cc_op = CC_OP_FLAGS;
551
                    env->sr = (env->sr & 0xffe0)
552
                              | env->cc_dest | (env->cc_x << 4);
553
                    cpu_dump_state(env, logfile, fprintf, 0);
554
#elif defined(TARGET_MIPS)
555
                    cpu_dump_state(env, logfile, fprintf, 0);
556
#elif defined(TARGET_SH4)
557
                    cpu_dump_state(env, logfile, fprintf, 0);
558
#elif defined(TARGET_ALPHA)
559
                    cpu_dump_state(env, logfile, fprintf, 0);
560
#elif defined(TARGET_CRIS)
561
                    cpu_dump_state(env, logfile, fprintf, 0);
562
#else
563
#error unsupported target CPU
564
#endif
565
                }
566
#endif
567
                spin_lock(&tb_lock);
568
                tb = tb_find_fast();
569
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
570
                   doing it in tb_find_slow */
571
                if (tb_invalidated_flag) {
572
                    /* as some TB could have been invalidated because
573
                       of memory exceptions while generating the code, we
574
                       must recompute the hash index here */
575
                    next_tb = 0;
576
                    tb_invalidated_flag = 0;
577
                }
578
#ifdef DEBUG_EXEC
579
                if ((loglevel & CPU_LOG_EXEC)) {
580
                    fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
581
                            (long)tb->tc_ptr, tb->pc,
582
                            lookup_symbol(tb->pc));
583
                }
584
#endif
585
                /* see if we can patch the calling TB. When the TB
586
                   spans two pages, we cannot safely do a direct
587
                   jump. */
588
                {
589
                    if (next_tb != 0 &&
590
#ifdef USE_KQEMU
591
                        (env->kqemu_enabled != 2) &&
592
#endif
593
                        tb->page_addr[1] == -1) {
594
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
595
                }
596
                }
597
                spin_unlock(&tb_lock);
598
                env->current_tb = tb;
599

    
600
                /* cpu_interrupt might be called while translating the
601
                   TB, but before it is linked into a potentially
602
                   infinite loop and becomes env->current_tb. Avoid
603
                   starting execution if there is a pending interrupt. */
604
                if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
605
                    env->current_tb = NULL;
606

    
607
                while (env->current_tb) {
608
                    tc_ptr = tb->tc_ptr;
609
                /* execute the generated code */
610
#if defined(__sparc__) && !defined(HOST_SOLARIS)
611
#undef env
612
                    env = cpu_single_env;
613
#define env cpu_single_env
614
#endif
615
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
616
                    env->current_tb = NULL;
617
                    if ((next_tb & 3) == 2) {
618
                        /* Instruction counter expired.  */
619
                        int insns_left;
620
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
621
                        /* Restore PC.  */
622
                        cpu_pc_from_tb(env, tb);
623
                        insns_left = env->icount_decr.u32;
624
                        if (env->icount_extra && insns_left >= 0) {
625
                            /* Refill decrementer and continue execution.  */
626
                            env->icount_extra += insns_left;
627
                            if (env->icount_extra > 0xffff) {
628
                                insns_left = 0xffff;
629
                            } else {
630
                                insns_left = env->icount_extra;
631
                            }
632
                            env->icount_extra -= insns_left;
633
                            env->icount_decr.u16.low = insns_left;
634
                        } else {
635
                            if (insns_left > 0) {
636
                                /* Execute remaining instructions.  */
637
                                cpu_exec_nocache(insns_left, tb);
638
                            }
639
                            env->exception_index = EXCP_INTERRUPT;
640
                            next_tb = 0;
641
                            cpu_loop_exit();
642
                        }
643
                    }
644
                }
645
                /* reset soft MMU for next block (it can currently
646
                   only be set by a memory fault) */
647
#if defined(USE_KQEMU)
648
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
649
                if (kqemu_is_ok(env) &&
650
                    (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
651
                    cpu_loop_exit();
652
                }
653
#endif
654
            } /* for(;;) */
655
        } else {
656
            env_to_regs();
657
        }
658
    } /* for(;;) */
659

    
660

    
661
#if defined(TARGET_I386)
662
    /* restore flags in standard format */
663
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
664
#elif defined(TARGET_ARM)
665
    /* XXX: Save/restore host fpu exception state?.  */
666
#elif defined(TARGET_SPARC)
667
#elif defined(TARGET_PPC)
668
#elif defined(TARGET_M68K)
669
    cpu_m68k_flush_flags(env, env->cc_op);
670
    env->cc_op = CC_OP_FLAGS;
671
    env->sr = (env->sr & 0xffe0)
672
              | env->cc_dest | (env->cc_x << 4);
673
#elif defined(TARGET_MIPS)
674
#elif defined(TARGET_SH4)
675
#elif defined(TARGET_ALPHA)
676
#elif defined(TARGET_CRIS)
677
    /* XXXXX */
678
#else
679
#error unsupported target CPU
680
#endif
681

    
682
    /* restore global registers */
683
#include "hostregs_helper.h"
684

    
685
    /* fail safe : never use cpu_single_env outside cpu_exec() */
686
    cpu_single_env = NULL;
687
    return ret;
688
}
689

    
690
/* must only be called from the generated code as an exception can be
691
   generated */
692
void tb_invalidate_page_range(target_ulong start, target_ulong end)
693
{
694
    /* XXX: cannot enable it yet because it yields to MMU exception
695
       where NIP != read address on PowerPC */
696
#if 0
697
    target_ulong phys_addr;
698
    phys_addr = get_phys_addr_code(env, start);
699
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
700
#endif
701
}
702

    
703
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
704

    
705
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
706
{
707
    CPUX86State *saved_env;
708

    
709
    saved_env = env;
710
    env = s;
711
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
712
        selector &= 0xffff;
713
        cpu_x86_load_seg_cache(env, seg_reg, selector,
714
                               (selector << 4), 0xffff, 0);
715
    } else {
716
        helper_load_seg(seg_reg, selector);
717
    }
718
    env = saved_env;
719
}
720

    
721
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
722
{
723
    CPUX86State *saved_env;
724

    
725
    saved_env = env;
726
    env = s;
727

    
728
    helper_fsave(ptr, data32);
729

    
730
    env = saved_env;
731
}
732

    
733
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
734
{
735
    CPUX86State *saved_env;
736

    
737
    saved_env = env;
738
    env = s;
739

    
740
    helper_frstor(ptr, data32);
741

    
742
    env = saved_env;
743
}
744

    
745
#endif /* TARGET_I386 */
746

    
747
#if !defined(CONFIG_SOFTMMU)
748

    
749
#if defined(TARGET_I386)
750

    
751
/* 'pc' is the host PC at which the exception was raised. 'address' is
752
   the effective address of the memory exception. 'is_write' is 1 if a
753
   write caused the exception and otherwise 0'. 'old_set' is the
754
   signal set which should be restored */
755
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
756
                                    int is_write, sigset_t *old_set,
757
                                    void *puc)
758
{
759
    TranslationBlock *tb;
760
    int ret;
761

    
762
    if (cpu_single_env)
763
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
764
#if defined(DEBUG_SIGNAL)
765
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
766
                pc, address, is_write, *(unsigned long *)old_set);
767
#endif
768
    /* XXX: locking issue */
769
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
770
        return 1;
771
    }
772

    
773
    /* see if it is an MMU fault */
774
    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
775
    if (ret < 0)
776
        return 0; /* not an MMU fault */
777
    if (ret == 0)
778
        return 1; /* the MMU fault was handled without causing real CPU fault */
779
    /* now we have a real cpu fault */
780
    tb = tb_find_pc(pc);
781
    if (tb) {
782
        /* the PC is inside the translated code. It means that we have
783
           a virtual CPU fault */
784
        cpu_restore_state(tb, env, pc, puc);
785
    }
786
    if (ret == 1) {
787
#if 0
788
        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
789
               env->eip, env->cr[2], env->error_code);
790
#endif
791
        /* we restore the process signal mask as the sigreturn should
792
           do it (XXX: use sigsetjmp) */
793
        sigprocmask(SIG_SETMASK, old_set, NULL);
794
        raise_exception_err(env->exception_index, env->error_code);
795
    } else {
796
        /* activate soft MMU for this block */
797
        env->hflags |= HF_SOFTMMU_MASK;
798
        cpu_resume_from_signal(env, puc);
799
    }
800
    /* never comes here */
801
    return 1;
802
}
803

    
804
#elif defined(TARGET_ARM)
805
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
806
                                    int is_write, sigset_t *old_set,
807
                                    void *puc)
808
{
809
    TranslationBlock *tb;
810
    int ret;
811

    
812
    if (cpu_single_env)
813
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
814
#if defined(DEBUG_SIGNAL)
815
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
816
           pc, address, is_write, *(unsigned long *)old_set);
817
#endif
818
    /* XXX: locking issue */
819
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
820
        return 1;
821
    }
822
    /* see if it is an MMU fault */
823
    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
824
    if (ret < 0)
825
        return 0; /* not an MMU fault */
826
    if (ret == 0)
827
        return 1; /* the MMU fault was handled without causing real CPU fault */
828
    /* now we have a real cpu fault */
829
    tb = tb_find_pc(pc);
830
    if (tb) {
831
        /* the PC is inside the translated code. It means that we have
832
           a virtual CPU fault */
833
        cpu_restore_state(tb, env, pc, puc);
834
    }
835
    /* we restore the process signal mask as the sigreturn should
836
       do it (XXX: use sigsetjmp) */
837
    sigprocmask(SIG_SETMASK, old_set, NULL);
838
    cpu_loop_exit();
839
    /* never comes here */
840
    return 1;
841
}
842
#elif defined(TARGET_SPARC)
843
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
844
                                    int is_write, sigset_t *old_set,
845
                                    void *puc)
846
{
847
    TranslationBlock *tb;
848
    int ret;
849

    
850
    if (cpu_single_env)
851
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
852
#if defined(DEBUG_SIGNAL)
853
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
854
           pc, address, is_write, *(unsigned long *)old_set);
855
#endif
856
    /* XXX: locking issue */
857
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
858
        return 1;
859
    }
860
    /* see if it is an MMU fault */
861
    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
862
    if (ret < 0)
863
        return 0; /* not an MMU fault */
864
    if (ret == 0)
865
        return 1; /* the MMU fault was handled without causing real CPU fault */
866
    /* now we have a real cpu fault */
867
    tb = tb_find_pc(pc);
868
    if (tb) {
869
        /* the PC is inside the translated code. It means that we have
870
           a virtual CPU fault */
871
        cpu_restore_state(tb, env, pc, puc);
872
    }
873
    /* we restore the process signal mask as the sigreturn should
874
       do it (XXX: use sigsetjmp) */
875
    sigprocmask(SIG_SETMASK, old_set, NULL);
876
    cpu_loop_exit();
877
    /* never comes here */
878
    return 1;
879
}
880
#elif defined (TARGET_PPC)
881
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
882
                                    int is_write, sigset_t *old_set,
883
                                    void *puc)
884
{
885
    TranslationBlock *tb;
886
    int ret;
887

    
888
    if (cpu_single_env)
889
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
890
#if defined(DEBUG_SIGNAL)
891
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
892
           pc, address, is_write, *(unsigned long *)old_set);
893
#endif
894
    /* XXX: locking issue */
895
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
896
        return 1;
897
    }
898

    
899
    /* see if it is an MMU fault */
900
    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
901
    if (ret < 0)
902
        return 0; /* not an MMU fault */
903
    if (ret == 0)
904
        return 1; /* the MMU fault was handled without causing real CPU fault */
905

    
906
    /* now we have a real cpu fault */
907
    tb = tb_find_pc(pc);
908
    if (tb) {
909
        /* the PC is inside the translated code. It means that we have
910
           a virtual CPU fault */
911
        cpu_restore_state(tb, env, pc, puc);
912
    }
913
    if (ret == 1) {
914
#if 0
915
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
916
               env->nip, env->error_code, tb);
917
#endif
918
    /* we restore the process signal mask as the sigreturn should
919
       do it (XXX: use sigsetjmp) */
920
        sigprocmask(SIG_SETMASK, old_set, NULL);
921
        raise_exception_err(env, env->exception_index, env->error_code);
922
    } else {
923
        /* activate soft MMU for this block */
924
        cpu_resume_from_signal(env, puc);
925
    }
926
    /* never comes here */
927
    return 1;
928
}
929

    
930
#elif defined(TARGET_M68K)
931
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
932
                                    int is_write, sigset_t *old_set,
933
                                    void *puc)
934
{
935
    TranslationBlock *tb;
936
    int ret;
937

    
938
    if (cpu_single_env)
939
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
940
#if defined(DEBUG_SIGNAL)
941
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
942
           pc, address, is_write, *(unsigned long *)old_set);
943
#endif
944
    /* XXX: locking issue */
945
    if (is_write && page_unprotect(address, pc, puc)) {
946
        return 1;
947
    }
948
    /* see if it is an MMU fault */
949
    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
950
    if (ret < 0)
951
        return 0; /* not an MMU fault */
952
    if (ret == 0)
953
        return 1; /* the MMU fault was handled without causing real CPU fault */
954
    /* now we have a real cpu fault */
955
    tb = tb_find_pc(pc);
956
    if (tb) {
957
        /* the PC is inside the translated code. It means that we have
958
           a virtual CPU fault */
959
        cpu_restore_state(tb, env, pc, puc);
960
    }
961
    /* we restore the process signal mask as the sigreturn should
962
       do it (XXX: use sigsetjmp) */
963
    sigprocmask(SIG_SETMASK, old_set, NULL);
964
    cpu_loop_exit();
965
    /* never comes here */
966
    return 1;
967
}
968

    
969
#elif defined (TARGET_MIPS)
970
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
971
                                    int is_write, sigset_t *old_set,
972
                                    void *puc)
973
{
974
    TranslationBlock *tb;
975
    int ret;
976

    
977
    if (cpu_single_env)
978
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
979
#if defined(DEBUG_SIGNAL)
980
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
981
           pc, address, is_write, *(unsigned long *)old_set);
982
#endif
983
    /* XXX: locking issue */
984
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
985
        return 1;
986
    }
987

    
988
    /* see if it is an MMU fault */
989
    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
990
    if (ret < 0)
991
        return 0; /* not an MMU fault */
992
    if (ret == 0)
993
        return 1; /* the MMU fault was handled without causing real CPU fault */
994

    
995
    /* now we have a real cpu fault */
996
    tb = tb_find_pc(pc);
997
    if (tb) {
998
        /* the PC is inside the translated code. It means that we have
999
           a virtual CPU fault */
1000
        cpu_restore_state(tb, env, pc, puc);
1001
    }
1002
    if (ret == 1) {
1003
#if 0
1004
        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1005
               env->PC, env->error_code, tb);
1006
#endif
1007
    /* we restore the process signal mask as the sigreturn should
1008
       do it (XXX: use sigsetjmp) */
1009
        sigprocmask(SIG_SETMASK, old_set, NULL);
1010
        do_raise_exception_err(env->exception_index, env->error_code);
1011
    } else {
1012
        /* activate soft MMU for this block */
1013
        cpu_resume_from_signal(env, puc);
1014
    }
1015
    /* never comes here */
1016
    return 1;
1017
}
1018

    
1019
#elif defined (TARGET_SH4)
1020
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1021
                                    int is_write, sigset_t *old_set,
1022
                                    void *puc)
1023
{
1024
    TranslationBlock *tb;
1025
    int ret;
1026

    
1027
    if (cpu_single_env)
1028
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1029
#if defined(DEBUG_SIGNAL)
1030
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1031
           pc, address, is_write, *(unsigned long *)old_set);
1032
#endif
1033
    /* XXX: locking issue */
1034
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1035
        return 1;
1036
    }
1037

    
1038
    /* see if it is an MMU fault */
1039
    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1040
    if (ret < 0)
1041
        return 0; /* not an MMU fault */
1042
    if (ret == 0)
1043
        return 1; /* the MMU fault was handled without causing real CPU fault */
1044

    
1045
    /* now we have a real cpu fault */
1046
    tb = tb_find_pc(pc);
1047
    if (tb) {
1048
        /* the PC is inside the translated code. It means that we have
1049
           a virtual CPU fault */
1050
        cpu_restore_state(tb, env, pc, puc);
1051
    }
1052
#if 0
1053
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1054
               env->nip, env->error_code, tb);
1055
#endif
1056
    /* we restore the process signal mask as the sigreturn should
1057
       do it (XXX: use sigsetjmp) */
1058
    sigprocmask(SIG_SETMASK, old_set, NULL);
1059
    cpu_loop_exit();
1060
    /* never comes here */
1061
    return 1;
1062
}
1063

    
1064
#elif defined (TARGET_ALPHA)
1065
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1066
                                    int is_write, sigset_t *old_set,
1067
                                    void *puc)
1068
{
1069
    TranslationBlock *tb;
1070
    int ret;
1071

    
1072
    if (cpu_single_env)
1073
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1074
#if defined(DEBUG_SIGNAL)
1075
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1076
           pc, address, is_write, *(unsigned long *)old_set);
1077
#endif
1078
    /* XXX: locking issue */
1079
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1080
        return 1;
1081
    }
1082

    
1083
    /* see if it is an MMU fault */
1084
    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1085
    if (ret < 0)
1086
        return 0; /* not an MMU fault */
1087
    if (ret == 0)
1088
        return 1; /* the MMU fault was handled without causing real CPU fault */
1089

    
1090
    /* now we have a real cpu fault */
1091
    tb = tb_find_pc(pc);
1092
    if (tb) {
1093
        /* the PC is inside the translated code. It means that we have
1094
           a virtual CPU fault */
1095
        cpu_restore_state(tb, env, pc, puc);
1096
    }
1097
#if 0
1098
        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1099
               env->nip, env->error_code, tb);
1100
#endif
1101
    /* we restore the process signal mask as the sigreturn should
1102
       do it (XXX: use sigsetjmp) */
1103
    sigprocmask(SIG_SETMASK, old_set, NULL);
1104
    cpu_loop_exit();
1105
    /* never comes here */
1106
    return 1;
1107
}
1108
#elif defined (TARGET_CRIS)
1109
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1110
                                    int is_write, sigset_t *old_set,
1111
                                    void *puc)
1112
{
1113
    TranslationBlock *tb;
1114
    int ret;
1115

    
1116
    if (cpu_single_env)
1117
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
1118
#if defined(DEBUG_SIGNAL)
1119
    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120
           pc, address, is_write, *(unsigned long *)old_set);
1121
#endif
1122
    /* XXX: locking issue */
1123
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
1124
        return 1;
1125
    }
1126

    
1127
    /* see if it is an MMU fault */
1128
    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1129
    if (ret < 0)
1130
        return 0; /* not an MMU fault */
1131
    if (ret == 0)
1132
        return 1; /* the MMU fault was handled without causing real CPU fault */
1133

    
1134
    /* now we have a real cpu fault */
1135
    tb = tb_find_pc(pc);
1136
    if (tb) {
1137
        /* the PC is inside the translated code. It means that we have
1138
           a virtual CPU fault */
1139
        cpu_restore_state(tb, env, pc, puc);
1140
    }
1141
    /* we restore the process signal mask as the sigreturn should
1142
       do it (XXX: use sigsetjmp) */
1143
    sigprocmask(SIG_SETMASK, old_set, NULL);
1144
    cpu_loop_exit();
1145
    /* never comes here */
1146
    return 1;
1147
}
1148

    
1149
#else
1150
#error unsupported target CPU
1151
#endif
1152

    
1153
#if defined(__i386__)
1154

    
1155
#if defined(__APPLE__)
1156
# include <sys/ucontext.h>
1157

    
1158
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1159
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1160
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1161
#else
1162
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1163
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1164
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1165
#endif
1166

    
1167
int cpu_signal_handler(int host_signum, void *pinfo,
1168
                       void *puc)
1169
{
1170
    siginfo_t *info = pinfo;
1171
    struct ucontext *uc = puc;
1172
    unsigned long pc;
1173
    int trapno;
1174

    
1175
#ifndef REG_EIP
1176
/* for glibc 2.1 */
1177
#define REG_EIP    EIP
1178
#define REG_ERR    ERR
1179
#define REG_TRAPNO TRAPNO
1180
#endif
1181
    pc = EIP_sig(uc);
1182
    trapno = TRAP_sig(uc);
1183
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1184
                             trapno == 0xe ?
1185
                             (ERROR_sig(uc) >> 1) & 1 : 0,
1186
                             &uc->uc_sigmask, puc);
1187
}
1188

    
1189
#elif defined(__x86_64__)
1190

    
1191
int cpu_signal_handler(int host_signum, void *pinfo,
1192
                       void *puc)
1193
{
1194
    siginfo_t *info = pinfo;
1195
    struct ucontext *uc = puc;
1196
    unsigned long pc;
1197

    
1198
    pc = uc->uc_mcontext.gregs[REG_RIP];
1199
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1200
                             uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1201
                             (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1202
                             &uc->uc_sigmask, puc);
1203
}
1204

    
1205
#elif defined(__powerpc__)
1206

    
1207
/***********************************************************************
1208
 * signal context platform-specific definitions
1209
 * From Wine
1210
 */
1211
#ifdef linux
1212
/* All Registers access - only for local access */
1213
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
1214
/* Gpr Registers access  */
1215
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
1216
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
1217
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1218
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
1219
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
1220
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
1221
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
1222
/* Float Registers access  */
1223
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1224
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1225
/* Exception Registers access */
1226
# define DAR_sig(context)                        REG_sig(dar, context)
1227
# define DSISR_sig(context)                        REG_sig(dsisr, context)
1228
# define TRAP_sig(context)                        REG_sig(trap, context)
1229
#endif /* linux */
1230

    
1231
#ifdef __APPLE__
1232
# include <sys/ucontext.h>
1233
typedef struct ucontext SIGCONTEXT;
1234
/* All Registers access - only for local access */
1235
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
1236
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
1237
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
1238
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
1239
/* Gpr Registers access */
1240
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
1241
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
1242
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1243
# define CTR_sig(context)                        REG_sig(ctr, context)
1244
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
1245
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
1246
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
1247
/* Float Registers access */
1248
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
1249
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
1250
/* Exception Registers access */
1251
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1252
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
1253
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1254
#endif /* __APPLE__ */
1255

    
1256
int cpu_signal_handler(int host_signum, void *pinfo,
1257
                       void *puc)
1258
{
1259
    siginfo_t *info = pinfo;
1260
    struct ucontext *uc = puc;
1261
    unsigned long pc;
1262
    int is_write;
1263

    
1264
    pc = IAR_sig(uc);
1265
    is_write = 0;
1266
#if 0
1267
    /* ppc 4xx case */
1268
    if (DSISR_sig(uc) & 0x00800000)
1269
        is_write = 1;
1270
#else
1271
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1272
        is_write = 1;
1273
#endif
1274
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1275
                             is_write, &uc->uc_sigmask, puc);
1276
}
1277

    
1278
#elif defined(__alpha__)
1279

    
1280
int cpu_signal_handler(int host_signum, void *pinfo,
1281
                           void *puc)
1282
{
1283
    siginfo_t *info = pinfo;
1284
    struct ucontext *uc = puc;
1285
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1286
    uint32_t insn = *pc;
1287
    int is_write = 0;
1288

    
1289
    /* XXX: need kernel patch to get write flag faster */
1290
    switch (insn >> 26) {
1291
    case 0x0d: // stw
1292
    case 0x0e: // stb
1293
    case 0x0f: // stq_u
1294
    case 0x24: // stf
1295
    case 0x25: // stg
1296
    case 0x26: // sts
1297
    case 0x27: // stt
1298
    case 0x2c: // stl
1299
    case 0x2d: // stq
1300
    case 0x2e: // stl_c
1301
    case 0x2f: // stq_c
1302
        is_write = 1;
1303
    }
1304

    
1305
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1306
                             is_write, &uc->uc_sigmask, puc);
1307
}
1308
#elif defined(__sparc__)
1309

    
1310
int cpu_signal_handler(int host_signum, void *pinfo,
1311
                       void *puc)
1312
{
1313
    siginfo_t *info = pinfo;
1314
    int is_write;
1315
    uint32_t insn;
1316
#if !defined(__arch64__) || defined(HOST_SOLARIS)
1317
    uint32_t *regs = (uint32_t *)(info + 1);
1318
    void *sigmask = (regs + 20);
1319
    /* XXX: is there a standard glibc define ? */
1320
    unsigned long pc = regs[1];
1321
#else
1322
#ifdef __linux__
1323
    struct sigcontext *sc = puc;
1324
    unsigned long pc = sc->sigc_regs.tpc;
1325
    void *sigmask = (void *)sc->sigc_mask;
1326
#elif defined(__OpenBSD__)
1327
    struct sigcontext *uc = puc;
1328
    unsigned long pc = uc->sc_pc;
1329
    void *sigmask = (void *)(long)uc->sc_mask;
1330
#endif
1331
#endif
1332

    
1333
    /* XXX: need kernel patch to get write flag faster */
1334
    is_write = 0;
1335
    insn = *(uint32_t *)pc;
1336
    if ((insn >> 30) == 3) {
1337
      switch((insn >> 19) & 0x3f) {
1338
      case 0x05: // stb
1339
      case 0x06: // sth
1340
      case 0x04: // st
1341
      case 0x07: // std
1342
      case 0x24: // stf
1343
      case 0x27: // stdf
1344
      case 0x25: // stfsr
1345
        is_write = 1;
1346
        break;
1347
      }
1348
    }
1349
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1350
                             is_write, sigmask, NULL);
1351
}
1352

    
1353
#elif defined(__arm__)
1354

    
1355
int cpu_signal_handler(int host_signum, void *pinfo,
1356
                       void *puc)
1357
{
1358
    siginfo_t *info = pinfo;
1359
    struct ucontext *uc = puc;
1360
    unsigned long pc;
1361
    int is_write;
1362

    
1363
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1364
    pc = uc->uc_mcontext.gregs[R15];
1365
#else
1366
    pc = uc->uc_mcontext.arm_pc;
1367
#endif
1368
    /* XXX: compute is_write */
1369
    is_write = 0;
1370
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1371
                             is_write,
1372
                             &uc->uc_sigmask, puc);
1373
}
1374

    
1375
#elif defined(__mc68000)
1376

    
1377
int cpu_signal_handler(int host_signum, void *pinfo,
1378
                       void *puc)
1379
{
1380
    siginfo_t *info = pinfo;
1381
    struct ucontext *uc = puc;
1382
    unsigned long pc;
1383
    int is_write;
1384

    
1385
    pc = uc->uc_mcontext.gregs[16];
1386
    /* XXX: compute is_write */
1387
    is_write = 0;
1388
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389
                             is_write,
1390
                             &uc->uc_sigmask, puc);
1391
}
1392

    
1393
#elif defined(__ia64)
1394

    
1395
#ifndef __ISR_VALID
1396
  /* This ought to be in <bits/siginfo.h>... */
1397
# define __ISR_VALID        1
1398
#endif
1399

    
1400
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1401
{
1402
    siginfo_t *info = pinfo;
1403
    struct ucontext *uc = puc;
1404
    unsigned long ip;
1405
    int is_write = 0;
1406

    
1407
    ip = uc->uc_mcontext.sc_ip;
1408
    switch (host_signum) {
1409
      case SIGILL:
1410
      case SIGFPE:
1411
      case SIGSEGV:
1412
      case SIGBUS:
1413
      case SIGTRAP:
1414
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1415
              /* ISR.W (write-access) is bit 33:  */
1416
              is_write = (info->si_isr >> 33) & 1;
1417
          break;
1418

    
1419
      default:
1420
          break;
1421
    }
1422
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1423
                             is_write,
1424
                             &uc->uc_sigmask, puc);
1425
}
1426

    
1427
#elif defined(__s390__)
1428

    
1429
int cpu_signal_handler(int host_signum, void *pinfo,
1430
                       void *puc)
1431
{
1432
    siginfo_t *info = pinfo;
1433
    struct ucontext *uc = puc;
1434
    unsigned long pc;
1435
    int is_write;
1436

    
1437
    pc = uc->uc_mcontext.psw.addr;
1438
    /* XXX: compute is_write */
1439
    is_write = 0;
1440
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1441
                             is_write, &uc->uc_sigmask, puc);
1442
}
1443

    
1444
#elif defined(__mips__)
1445

    
1446
int cpu_signal_handler(int host_signum, void *pinfo,
1447
                       void *puc)
1448
{
1449
    siginfo_t *info = pinfo;
1450
    struct ucontext *uc = puc;
1451
    greg_t pc = uc->uc_mcontext.pc;
1452
    int is_write;
1453

    
1454
    /* XXX: compute is_write */
1455
    is_write = 0;
1456
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1457
                             is_write, &uc->uc_sigmask, puc);
1458
}
1459

    
1460
#elif defined(__hppa__)
1461

    
1462
int cpu_signal_handler(int host_signum, void *pinfo,
1463
                       void *puc)
1464
{
1465
    struct siginfo *info = pinfo;
1466
    struct ucontext *uc = puc;
1467
    unsigned long pc;
1468
    int is_write;
1469

    
1470
    pc = uc->uc_mcontext.sc_iaoq[0];
1471
    /* FIXME: compute is_write */
1472
    is_write = 0;
1473
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1474
                             is_write,
1475
                             &uc->uc_sigmask, puc);
1476
}
1477

    
1478
#else
1479

    
1480
#error host CPU specific signal handler needed
1481

    
1482
#endif
1483

    
1484
#endif /* !defined(CONFIG_SOFTMMU) */