Statistics
| Branch: | Revision:

root / cpu-exec.c @ 4556bd8b

History | View | Annotate | Download (42.6 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24

    
25
#if !defined(CONFIG_SOFTMMU)
26
#undef EAX
27
#undef ECX
28
#undef EDX
29
#undef EBX
30
#undef ESP
31
#undef EBP
32
#undef ESI
33
#undef EDI
34
#undef EIP
35
#include <signal.h>
36
#ifdef __linux__
37
#include <sys/ucontext.h>
38
#endif
39
#endif
40

    
41
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42
// Work around ugly bugs in glibc that mangle global register contents
43
#undef env
44
#define env cpu_single_env
45
#endif
46

    
47
int tb_invalidated_flag;
48

    
49
//#define CONFIG_DEBUG_EXEC
50
//#define DEBUG_SIGNAL
51

    
52
int qemu_cpu_has_work(CPUState *env)
53
{
54
    return cpu_has_work(env);
55
}
56

    
57
void cpu_loop_exit(void)
58
{
59
    env->current_tb = NULL;
60
    longjmp(env->jmp_env, 1);
61
}
62

    
63
/* exit the current TB from a signal handler. The host registers are
64
   restored in a state compatible with the CPU emulator
65
 */
66
void cpu_resume_from_signal(CPUState *env1, void *puc)
67
{
68
#if !defined(CONFIG_SOFTMMU)
69
#ifdef __linux__
70
    struct ucontext *uc = puc;
71
#elif defined(__OpenBSD__)
72
    struct sigcontext *uc = puc;
73
#endif
74
#endif
75

    
76
    env = env1;
77

    
78
    /* XXX: restore cpu registers saved in host registers */
79

    
80
#if !defined(CONFIG_SOFTMMU)
81
    if (puc) {
82
        /* XXX: use siglongjmp ? */
83
#ifdef __linux__
84
#ifdef __ia64
85
        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
86
#else
87
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88
#endif
89
#elif defined(__OpenBSD__)
90
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
91
#endif
92
    }
93
#endif
94
    env->exception_index = -1;
95
    longjmp(env->jmp_env, 1);
96
}
97

    
98
/* Execute the code without caching the generated code. An interpreter
99
   could be used if available. */
100
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101
{
102
    unsigned long next_tb;
103
    TranslationBlock *tb;
104

    
105
    /* Should never happen.
106
       We only end up here when an existing TB is too long.  */
107
    if (max_cycles > CF_COUNT_MASK)
108
        max_cycles = CF_COUNT_MASK;
109

    
110
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
111
                     max_cycles);
112
    env->current_tb = tb;
113
    /* execute the generated code */
114
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115
    env->current_tb = NULL;
116

    
117
    if ((next_tb & 3) == 2) {
118
        /* Restore PC.  This may happen if async event occurs before
119
           the TB starts executing.  */
120
        cpu_pc_from_tb(env, tb);
121
    }
122
    tb_phys_invalidate(tb, -1);
123
    tb_free(tb);
124
}
125

    
126
static TranslationBlock *tb_find_slow(target_ulong pc,
127
                                      target_ulong cs_base,
128
                                      uint64_t flags)
129
{
130
    TranslationBlock *tb, **ptb1;
131
    unsigned int h;
132
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
133
    target_ulong virt_page2;
134

    
135
    tb_invalidated_flag = 0;
136

    
137
    /* find translated block using physical mappings */
138
    phys_pc = get_page_addr_code(env, pc);
139
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
140
    phys_page2 = -1;
141
    h = tb_phys_hash_func(phys_pc);
142
    ptb1 = &tb_phys_hash[h];
143
    for(;;) {
144
        tb = *ptb1;
145
        if (!tb)
146
            goto not_found;
147
        if (tb->pc == pc &&
148
            tb->page_addr[0] == phys_page1 &&
149
            tb->cs_base == cs_base &&
150
            tb->flags == flags) {
151
            /* check next page if needed */
152
            if (tb->page_addr[1] != -1) {
153
                virt_page2 = (pc & TARGET_PAGE_MASK) +
154
                    TARGET_PAGE_SIZE;
155
                phys_page2 = get_page_addr_code(env, virt_page2);
156
                if (tb->page_addr[1] == phys_page2)
157
                    goto found;
158
            } else {
159
                goto found;
160
            }
161
        }
162
        ptb1 = &tb->phys_hash_next;
163
    }
164
 not_found:
165
   /* if no translated code available, then translate it now */
166
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
167

    
168
 found:
169
    /* we add the TB in the virtual pc hash table */
170
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
171
    return tb;
172
}
173

    
174
static inline TranslationBlock *tb_find_fast(void)
175
{
176
    TranslationBlock *tb;
177
    target_ulong cs_base, pc;
178
    int flags;
179

    
180
    /* we record a subset of the CPU state. It will
181
       always be the same before a given translated block
182
       is executed. */
183
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
184
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
185
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186
                 tb->flags != flags)) {
187
        tb = tb_find_slow(pc, cs_base, flags);
188
    }
189
    return tb;
190
}
191

    
192
static CPUDebugExcpHandler *debug_excp_handler;
193

    
194
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195
{
196
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
197

    
198
    debug_excp_handler = handler;
199
    return old_handler;
200
}
201

    
202
static void cpu_handle_debug_exception(CPUState *env)
203
{
204
    CPUWatchpoint *wp;
205

    
206
    if (!env->watchpoint_hit)
207
        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
208
            wp->flags &= ~BP_WATCHPOINT_HIT;
209

    
210
    if (debug_excp_handler)
211
        debug_excp_handler(env);
212
}
213

    
214
/* main execution loop */
215

    
216
volatile sig_atomic_t exit_request;
217

    
218
int cpu_exec(CPUState *env1)
219
{
220
    volatile host_reg_t saved_env_reg;
221
    int ret, interrupt_request;
222
    TranslationBlock *tb;
223
    uint8_t *tc_ptr;
224
    unsigned long next_tb;
225

    
226
    if (cpu_halted(env1) == EXCP_HALTED)
227
        return EXCP_HALTED;
228

    
229
    cpu_single_env = env1;
230

    
231
    /* the access to env below is actually saving the global register's
232
       value, so that files not including target-xyz/exec.h are free to
233
       use it.  */
234
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
235
    saved_env_reg = (host_reg_t) env;
236
    asm("");
237
    env = env1;
238

    
239
    if (exit_request) {
240
        env->exit_request = 1;
241
        exit_request = 0;
242
    }
243

    
244
#if defined(TARGET_I386)
245
    if (!kvm_enabled()) {
246
        /* put eflags in CPU temporary format */
247
        CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
248
        DF = 1 - (2 * ((env->eflags >> 10) & 1));
249
        CC_OP = CC_OP_EFLAGS;
250
        env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
251
    }
252
#elif defined(TARGET_SPARC)
253
#elif defined(TARGET_M68K)
254
    env->cc_op = CC_OP_FLAGS;
255
    env->cc_dest = env->sr & 0xf;
256
    env->cc_x = (env->sr >> 4) & 1;
257
#elif defined(TARGET_ALPHA)
258
#elif defined(TARGET_ARM)
259
#elif defined(TARGET_PPC)
260
#elif defined(TARGET_MICROBLAZE)
261
#elif defined(TARGET_MIPS)
262
#elif defined(TARGET_SH4)
263
#elif defined(TARGET_CRIS)
264
#elif defined(TARGET_S390X)
265
    /* XXXXX */
266
#else
267
#error unsupported target CPU
268
#endif
269
    env->exception_index = -1;
270

    
271
    /* prepare setjmp context for exception handling */
272
    for(;;) {
273
        if (setjmp(env->jmp_env) == 0) {
274
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
275
#undef env
276
                    env = cpu_single_env;
277
#define env cpu_single_env
278
#endif
279
            /* if an exception is pending, we execute it here */
280
            if (env->exception_index >= 0) {
281
                if (env->exception_index >= EXCP_INTERRUPT) {
282
                    /* exit request from the cpu execution loop */
283
                    ret = env->exception_index;
284
                    if (ret == EXCP_DEBUG)
285
                        cpu_handle_debug_exception(env);
286
                    break;
287
                } else {
288
#if defined(CONFIG_USER_ONLY)
289
                    /* if user mode only, we simulate a fake exception
290
                       which will be handled outside the cpu execution
291
                       loop */
292
#if defined(TARGET_I386)
293
                    do_interrupt_user(env->exception_index,
294
                                      env->exception_is_int,
295
                                      env->error_code,
296
                                      env->exception_next_eip);
297
                    /* successfully delivered */
298
                    env->old_exception = -1;
299
#endif
300
                    ret = env->exception_index;
301
                    break;
302
#else
303
#if defined(TARGET_I386)
304
                    /* simulate a real cpu exception. On i386, it can
305
                       trigger new exceptions, but we do not handle
306
                       double or triple faults yet. */
307
                    do_interrupt(env->exception_index,
308
                                 env->exception_is_int,
309
                                 env->error_code,
310
                                 env->exception_next_eip, 0);
311
                    /* successfully delivered */
312
                    env->old_exception = -1;
313
#elif defined(TARGET_PPC)
314
                    do_interrupt(env);
315
#elif defined(TARGET_MICROBLAZE)
316
                    do_interrupt(env);
317
#elif defined(TARGET_MIPS)
318
                    do_interrupt(env);
319
#elif defined(TARGET_SPARC)
320
                    do_interrupt(env);
321
#elif defined(TARGET_ARM)
322
                    do_interrupt(env);
323
#elif defined(TARGET_SH4)
324
                    do_interrupt(env);
325
#elif defined(TARGET_ALPHA)
326
                    do_interrupt(env);
327
#elif defined(TARGET_CRIS)
328
                    do_interrupt(env);
329
#elif defined(TARGET_M68K)
330
                    do_interrupt(0);
331
#endif
332
                    env->exception_index = -1;
333
#endif
334
                }
335
            }
336

    
337
            if (kvm_enabled()) {
338
                kvm_cpu_exec(env);
339
                longjmp(env->jmp_env, 1);
340
            }
341

    
342
            next_tb = 0; /* force lookup of first TB */
343
            for(;;) {
344
                interrupt_request = env->interrupt_request;
345
                if (unlikely(interrupt_request)) {
346
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
347
                        /* Mask out external interrupts for this step. */
348
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
349
                                               CPU_INTERRUPT_FIQ |
350
                                               CPU_INTERRUPT_SMI |
351
                                               CPU_INTERRUPT_NMI);
352
                    }
353
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
354
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
355
                        env->exception_index = EXCP_DEBUG;
356
                        cpu_loop_exit();
357
                    }
358
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
359
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
360
    defined(TARGET_MICROBLAZE)
361
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
362
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
363
                        env->halted = 1;
364
                        env->exception_index = EXCP_HLT;
365
                        cpu_loop_exit();
366
                    }
367
#endif
368
#if defined(TARGET_I386)
369
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
370
                            svm_check_intercept(SVM_EXIT_INIT);
371
                            do_cpu_init(env);
372
                            env->exception_index = EXCP_HALTED;
373
                            cpu_loop_exit();
374
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
375
                            do_cpu_sipi(env);
376
                    } else if (env->hflags2 & HF2_GIF_MASK) {
377
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
378
                            !(env->hflags & HF_SMM_MASK)) {
379
                            svm_check_intercept(SVM_EXIT_SMI);
380
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
381
                            do_smm_enter();
382
                            next_tb = 0;
383
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
384
                                   !(env->hflags2 & HF2_NMI_MASK)) {
385
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
386
                            env->hflags2 |= HF2_NMI_MASK;
387
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
388
                            next_tb = 0;
389
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
390
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
391
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
392
                            next_tb = 0;
393
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
395
                                     (env->hflags2 & HF2_HIF_MASK)) ||
396
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
397
                                     (env->eflags & IF_MASK && 
398
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
399
                            int intno;
400
                            svm_check_intercept(SVM_EXIT_INTR);
401
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
402
                            intno = cpu_get_pic_interrupt(env);
403
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
404
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
405
#undef env
406
                    env = cpu_single_env;
407
#define env cpu_single_env
408
#endif
409
                            do_interrupt(intno, 0, 0, 0, 1);
410
                            /* ensure that no TB jump will be modified as
411
                               the program flow was changed */
412
                            next_tb = 0;
413
#if !defined(CONFIG_USER_ONLY)
414
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
415
                                   (env->eflags & IF_MASK) && 
416
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
417
                            int intno;
418
                            /* FIXME: this should respect TPR */
419
                            svm_check_intercept(SVM_EXIT_VINTR);
420
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
421
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
422
                            do_interrupt(intno, 0, 0, 0, 1);
423
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
424
                            next_tb = 0;
425
#endif
426
                        }
427
                    }
428
#elif defined(TARGET_PPC)
429
#if 0
430
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
431
                        cpu_reset(env);
432
                    }
433
#endif
434
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
435
                        ppc_hw_interrupt(env);
436
                        if (env->pending_interrupts == 0)
437
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
438
                        next_tb = 0;
439
                    }
440
#elif defined(TARGET_MICROBLAZE)
441
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
442
                        && (env->sregs[SR_MSR] & MSR_IE)
443
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
444
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
445
                        env->exception_index = EXCP_IRQ;
446
                        do_interrupt(env);
447
                        next_tb = 0;
448
                    }
449
#elif defined(TARGET_MIPS)
450
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
451
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
452
                        (env->CP0_Status & (1 << CP0St_IE)) &&
453
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
454
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
455
                        !(env->hflags & MIPS_HFLAG_DM)) {
456
                        /* Raise it */
457
                        env->exception_index = EXCP_EXT_INTERRUPT;
458
                        env->error_code = 0;
459
                        do_interrupt(env);
460
                        next_tb = 0;
461
                    }
462
#elif defined(TARGET_SPARC)
463
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
464
                        if (cpu_interrupts_enabled(env) &&
465
                            env->interrupt_index > 0) {
466
                            int pil = env->interrupt_index & 0xf;
467
                            int type = env->interrupt_index & 0xf0;
468

    
469
                            if (((type == TT_EXTINT) &&
470
                                  cpu_pil_allowed(env, pil)) ||
471
                                  type != TT_EXTINT) {
472
                                env->exception_index = env->interrupt_index;
473
                                do_interrupt(env);
474
                                next_tb = 0;
475
                            }
476
                        }
477
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
478
                        //do_interrupt(0, 0, 0, 0, 0);
479
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
480
                    }
481
#elif defined(TARGET_ARM)
482
                    if (interrupt_request & CPU_INTERRUPT_FIQ
483
                        && !(env->uncached_cpsr & CPSR_F)) {
484
                        env->exception_index = EXCP_FIQ;
485
                        do_interrupt(env);
486
                        next_tb = 0;
487
                    }
488
                    /* ARMv7-M interrupt return works by loading a magic value
489
                       into the PC.  On real hardware the load causes the
490
                       return to occur.  The qemu implementation performs the
491
                       jump normally, then does the exception return when the
492
                       CPU tries to execute code at the magic address.
493
                       This will cause the magic PC value to be pushed to
494
                       the stack if an interrupt occured at the wrong time.
495
                       We avoid this by disabling interrupts when
496
                       pc contains a magic address.  */
497
                    if (interrupt_request & CPU_INTERRUPT_HARD
498
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
499
                            || !(env->uncached_cpsr & CPSR_I))) {
500
                        env->exception_index = EXCP_IRQ;
501
                        do_interrupt(env);
502
                        next_tb = 0;
503
                    }
504
#elif defined(TARGET_SH4)
505
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
506
                        do_interrupt(env);
507
                        next_tb = 0;
508
                    }
509
#elif defined(TARGET_ALPHA)
510
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
511
                        do_interrupt(env);
512
                        next_tb = 0;
513
                    }
514
#elif defined(TARGET_CRIS)
515
                    if (interrupt_request & CPU_INTERRUPT_HARD
516
                        && (env->pregs[PR_CCS] & I_FLAG)
517
                        && !env->locked_irq) {
518
                        env->exception_index = EXCP_IRQ;
519
                        do_interrupt(env);
520
                        next_tb = 0;
521
                    }
522
                    if (interrupt_request & CPU_INTERRUPT_NMI
523
                        && (env->pregs[PR_CCS] & M_FLAG)) {
524
                        env->exception_index = EXCP_NMI;
525
                        do_interrupt(env);
526
                        next_tb = 0;
527
                    }
528
#elif defined(TARGET_M68K)
529
                    if (interrupt_request & CPU_INTERRUPT_HARD
530
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
531
                            < env->pending_level) {
532
                        /* Real hardware gets the interrupt vector via an
533
                           IACK cycle at this point.  Current emulated
534
                           hardware doesn't rely on this, so we
535
                           provide/save the vector when the interrupt is
536
                           first signalled.  */
537
                        env->exception_index = env->pending_vector;
538
                        do_interrupt(1);
539
                        next_tb = 0;
540
                    }
541
#endif
542
                   /* Don't use the cached interupt_request value,
543
                      do_interrupt may have updated the EXITTB flag. */
544
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
545
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
546
                        /* ensure that no TB jump will be modified as
547
                           the program flow was changed */
548
                        next_tb = 0;
549
                    }
550
                }
551
                if (unlikely(env->exit_request)) {
552
                    env->exit_request = 0;
553
                    env->exception_index = EXCP_INTERRUPT;
554
                    cpu_loop_exit();
555
                }
556
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
557
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
558
                    /* restore flags in standard format */
559
#if defined(TARGET_I386)
560
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
561
                    log_cpu_state(env, X86_DUMP_CCOP);
562
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
563
#elif defined(TARGET_M68K)
564
                    cpu_m68k_flush_flags(env, env->cc_op);
565
                    env->cc_op = CC_OP_FLAGS;
566
                    env->sr = (env->sr & 0xffe0)
567
                              | env->cc_dest | (env->cc_x << 4);
568
                    log_cpu_state(env, 0);
569
#else
570
                    log_cpu_state(env, 0);
571
#endif
572
                }
573
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
574
                spin_lock(&tb_lock);
575
                tb = tb_find_fast();
576
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
577
                   doing it in tb_find_slow */
578
                if (tb_invalidated_flag) {
579
                    /* as some TB could have been invalidated because
580
                       of memory exceptions while generating the code, we
581
                       must recompute the hash index here */
582
                    next_tb = 0;
583
                    tb_invalidated_flag = 0;
584
                }
585
#ifdef CONFIG_DEBUG_EXEC
586
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
587
                             (long)tb->tc_ptr, tb->pc,
588
                             lookup_symbol(tb->pc));
589
#endif
590
                /* see if we can patch the calling TB. When the TB
591
                   spans two pages, we cannot safely do a direct
592
                   jump. */
593
                if (next_tb != 0 && tb->page_addr[1] == -1) {
594
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
595
                }
596
                spin_unlock(&tb_lock);
597

    
598
                /* cpu_interrupt might be called while translating the
599
                   TB, but before it is linked into a potentially
600
                   infinite loop and becomes env->current_tb. Avoid
601
                   starting execution if there is a pending interrupt. */
602
                if (!unlikely (env->exit_request)) {
603
                    env->current_tb = tb;
604
                    tc_ptr = tb->tc_ptr;
605
                /* execute the generated code */
606
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
607
#undef env
608
                    env = cpu_single_env;
609
#define env cpu_single_env
610
#endif
611
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
612
                    env->current_tb = NULL;
613
                    if ((next_tb & 3) == 2) {
614
                        /* Instruction counter expired.  */
615
                        int insns_left;
616
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
617
                        /* Restore PC.  */
618
                        cpu_pc_from_tb(env, tb);
619
                        insns_left = env->icount_decr.u32;
620
                        if (env->icount_extra && insns_left >= 0) {
621
                            /* Refill decrementer and continue execution.  */
622
                            env->icount_extra += insns_left;
623
                            if (env->icount_extra > 0xffff) {
624
                                insns_left = 0xffff;
625
                            } else {
626
                                insns_left = env->icount_extra;
627
                            }
628
                            env->icount_extra -= insns_left;
629
                            env->icount_decr.u16.low = insns_left;
630
                        } else {
631
                            if (insns_left > 0) {
632
                                /* Execute remaining instructions.  */
633
                                cpu_exec_nocache(insns_left, tb);
634
                            }
635
                            env->exception_index = EXCP_INTERRUPT;
636
                            next_tb = 0;
637
                            cpu_loop_exit();
638
                        }
639
                    }
640
                }
641
                /* reset soft MMU for next block (it can currently
642
                   only be set by a memory fault) */
643
            } /* for(;;) */
644
        }
645
    } /* for(;;) */
646

    
647

    
648
#if defined(TARGET_I386)
649
    /* restore flags in standard format */
650
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
651
#elif defined(TARGET_ARM)
652
    /* XXX: Save/restore host fpu exception state?.  */
653
#elif defined(TARGET_SPARC)
654
#elif defined(TARGET_PPC)
655
#elif defined(TARGET_M68K)
656
    cpu_m68k_flush_flags(env, env->cc_op);
657
    env->cc_op = CC_OP_FLAGS;
658
    env->sr = (env->sr & 0xffe0)
659
              | env->cc_dest | (env->cc_x << 4);
660
#elif defined(TARGET_MICROBLAZE)
661
#elif defined(TARGET_MIPS)
662
#elif defined(TARGET_SH4)
663
#elif defined(TARGET_ALPHA)
664
#elif defined(TARGET_CRIS)
665
#elif defined(TARGET_S390X)
666
    /* XXXXX */
667
#else
668
#error unsupported target CPU
669
#endif
670

    
671
    /* restore global registers */
672
    asm("");
673
    env = (void *) saved_env_reg;
674

    
675
    /* fail safe : never use cpu_single_env outside cpu_exec() */
676
    cpu_single_env = NULL;
677
    return ret;
678
}
679

    
680
/* must only be called from the generated code as an exception can be
681
   generated */
682
void tb_invalidate_page_range(target_ulong start, target_ulong end)
683
{
684
    /* XXX: cannot enable it yet because it yields to MMU exception
685
       where NIP != read address on PowerPC */
686
#if 0
687
    target_ulong phys_addr;
688
    phys_addr = get_phys_addr_code(env, start);
689
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
690
#endif
691
}
692

    
693
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
694

    
695
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
696
{
697
    CPUX86State *saved_env;
698

    
699
    saved_env = env;
700
    env = s;
701
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
702
        selector &= 0xffff;
703
        cpu_x86_load_seg_cache(env, seg_reg, selector,
704
                               (selector << 4), 0xffff, 0);
705
    } else {
706
        helper_load_seg(seg_reg, selector);
707
    }
708
    env = saved_env;
709
}
710

    
711
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
712
{
713
    CPUX86State *saved_env;
714

    
715
    saved_env = env;
716
    env = s;
717

    
718
    helper_fsave(ptr, data32);
719

    
720
    env = saved_env;
721
}
722

    
723
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
724
{
725
    CPUX86State *saved_env;
726

    
727
    saved_env = env;
728
    env = s;
729

    
730
    helper_frstor(ptr, data32);
731

    
732
    env = saved_env;
733
}
734

    
735
#endif /* TARGET_I386 */
736

    
737
#if !defined(CONFIG_SOFTMMU)
738

    
739
#if defined(TARGET_I386)
740
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
741
#else
742
#define EXCEPTION_ACTION cpu_loop_exit()
743
#endif
744

    
745
/* 'pc' is the host PC at which the exception was raised. 'address' is
746
   the effective address of the memory exception. 'is_write' is 1 if a
747
   write caused the exception and otherwise 0'. 'old_set' is the
748
   signal set which should be restored */
749
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
750
                                    int is_write, sigset_t *old_set,
751
                                    void *puc)
752
{
753
    TranslationBlock *tb;
754
    int ret;
755

    
756
    if (cpu_single_env)
757
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
758
#if defined(DEBUG_SIGNAL)
759
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
760
                pc, address, is_write, *(unsigned long *)old_set);
761
#endif
762
    /* XXX: locking issue */
763
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
764
        return 1;
765
    }
766

    
767
    /* see if it is an MMU fault */
768
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
769
    if (ret < 0)
770
        return 0; /* not an MMU fault */
771
    if (ret == 0)
772
        return 1; /* the MMU fault was handled without causing real CPU fault */
773
    /* now we have a real cpu fault */
774
    tb = tb_find_pc(pc);
775
    if (tb) {
776
        /* the PC is inside the translated code. It means that we have
777
           a virtual CPU fault */
778
        cpu_restore_state(tb, env, pc, puc);
779
    }
780

    
781
    /* we restore the process signal mask as the sigreturn should
782
       do it (XXX: use sigsetjmp) */
783
    sigprocmask(SIG_SETMASK, old_set, NULL);
784
    EXCEPTION_ACTION;
785

    
786
    /* never comes here */
787
    return 1;
788
}
789

    
790
#if defined(__i386__)
791

    
792
#if defined(__APPLE__)
793
# include <sys/ucontext.h>
794

    
795
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
796
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
797
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
798
# define MASK_sig(context)    ((context)->uc_sigmask)
799
#elif defined (__NetBSD__)
800
# include <ucontext.h>
801

    
802
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
803
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
804
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
805
# define MASK_sig(context)    ((context)->uc_sigmask)
806
#elif defined (__FreeBSD__) || defined(__DragonFly__)
807
# include <ucontext.h>
808

    
809
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
810
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
811
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
812
# define MASK_sig(context)    ((context)->uc_sigmask)
813
#elif defined(__OpenBSD__)
814
# define EIP_sig(context)     ((context)->sc_eip)
815
# define TRAP_sig(context)    ((context)->sc_trapno)
816
# define ERROR_sig(context)   ((context)->sc_err)
817
# define MASK_sig(context)    ((context)->sc_mask)
818
#else
819
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
820
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
821
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
822
# define MASK_sig(context)    ((context)->uc_sigmask)
823
#endif
824

    
825
int cpu_signal_handler(int host_signum, void *pinfo,
826
                       void *puc)
827
{
828
    siginfo_t *info = pinfo;
829
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
830
    ucontext_t *uc = puc;
831
#elif defined(__OpenBSD__)
832
    struct sigcontext *uc = puc;
833
#else
834
    struct ucontext *uc = puc;
835
#endif
836
    unsigned long pc;
837
    int trapno;
838

    
839
#ifndef REG_EIP
840
/* for glibc 2.1 */
841
#define REG_EIP    EIP
842
#define REG_ERR    ERR
843
#define REG_TRAPNO TRAPNO
844
#endif
845
    pc = EIP_sig(uc);
846
    trapno = TRAP_sig(uc);
847
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
848
                             trapno == 0xe ?
849
                             (ERROR_sig(uc) >> 1) & 1 : 0,
850
                             &MASK_sig(uc), puc);
851
}
852

    
853
#elif defined(__x86_64__)
854

    
855
#ifdef __NetBSD__
856
#define PC_sig(context)       _UC_MACHINE_PC(context)
857
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
858
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
859
#define MASK_sig(context)     ((context)->uc_sigmask)
860
#elif defined(__OpenBSD__)
861
#define PC_sig(context)       ((context)->sc_rip)
862
#define TRAP_sig(context)     ((context)->sc_trapno)
863
#define ERROR_sig(context)    ((context)->sc_err)
864
#define MASK_sig(context)     ((context)->sc_mask)
865
#elif defined (__FreeBSD__) || defined(__DragonFly__)
866
#include <ucontext.h>
867

    
868
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
869
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
870
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
871
#define MASK_sig(context)     ((context)->uc_sigmask)
872
#else
873
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
874
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
875
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
876
#define MASK_sig(context)     ((context)->uc_sigmask)
877
#endif
878

    
879
int cpu_signal_handler(int host_signum, void *pinfo,
880
                       void *puc)
881
{
882
    siginfo_t *info = pinfo;
883
    unsigned long pc;
884
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
885
    ucontext_t *uc = puc;
886
#elif defined(__OpenBSD__)
887
    struct sigcontext *uc = puc;
888
#else
889
    struct ucontext *uc = puc;
890
#endif
891

    
892
    pc = PC_sig(uc);
893
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
894
                             TRAP_sig(uc) == 0xe ?
895
                             (ERROR_sig(uc) >> 1) & 1 : 0,
896
                             &MASK_sig(uc), puc);
897
}
898

    
899
#elif defined(_ARCH_PPC)
900

    
901
/***********************************************************************
902
 * signal context platform-specific definitions
903
 * From Wine
904
 */
905
#ifdef linux
906
/* All Registers access - only for local access */
907
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
908
/* Gpr Registers access  */
909
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
910
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
911
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
912
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
913
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
914
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
915
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
916
/* Float Registers access  */
917
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
918
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
919
/* Exception Registers access */
920
# define DAR_sig(context)                        REG_sig(dar, context)
921
# define DSISR_sig(context)                        REG_sig(dsisr, context)
922
# define TRAP_sig(context)                        REG_sig(trap, context)
923
#endif /* linux */
924

    
925
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
926
#include <ucontext.h>
927
# define IAR_sig(context)                ((context)->uc_mcontext.mc_srr0)
928
# define MSR_sig(context)                ((context)->uc_mcontext.mc_srr1)
929
# define CTR_sig(context)                ((context)->uc_mcontext.mc_ctr)
930
# define XER_sig(context)                ((context)->uc_mcontext.mc_xer)
931
# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
932
# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
933
/* Exception Registers access */
934
# define DAR_sig(context)                ((context)->uc_mcontext.mc_dar)
935
# define DSISR_sig(context)                ((context)->uc_mcontext.mc_dsisr)
936
# define TRAP_sig(context)                ((context)->uc_mcontext.mc_exc)
937
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
938

    
939
#ifdef __APPLE__
940
# include <sys/ucontext.h>
941
typedef struct ucontext SIGCONTEXT;
942
/* All Registers access - only for local access */
943
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
944
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
945
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
946
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
947
/* Gpr Registers access */
948
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
949
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
950
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
951
# define CTR_sig(context)                        REG_sig(ctr, context)
952
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
953
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
954
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
955
/* Float Registers access */
956
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
957
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
958
/* Exception Registers access */
959
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
960
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
961
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
962
#endif /* __APPLE__ */
963

    
964
int cpu_signal_handler(int host_signum, void *pinfo,
965
                       void *puc)
966
{
967
    siginfo_t *info = pinfo;
968
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
969
    ucontext_t *uc = puc;
970
#else
971
    struct ucontext *uc = puc;
972
#endif
973
    unsigned long pc;
974
    int is_write;
975

    
976
    pc = IAR_sig(uc);
977
    is_write = 0;
978
#if 0
979
    /* ppc 4xx case */
980
    if (DSISR_sig(uc) & 0x00800000)
981
        is_write = 1;
982
#else
983
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
984
        is_write = 1;
985
#endif
986
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
987
                             is_write, &uc->uc_sigmask, puc);
988
}
989

    
990
#elif defined(__alpha__)
991

    
992
int cpu_signal_handler(int host_signum, void *pinfo,
993
                           void *puc)
994
{
995
    siginfo_t *info = pinfo;
996
    struct ucontext *uc = puc;
997
    uint32_t *pc = uc->uc_mcontext.sc_pc;
998
    uint32_t insn = *pc;
999
    int is_write = 0;
1000

    
1001
    /* XXX: need kernel patch to get write flag faster */
1002
    switch (insn >> 26) {
1003
    case 0x0d: // stw
1004
    case 0x0e: // stb
1005
    case 0x0f: // stq_u
1006
    case 0x24: // stf
1007
    case 0x25: // stg
1008
    case 0x26: // sts
1009
    case 0x27: // stt
1010
    case 0x2c: // stl
1011
    case 0x2d: // stq
1012
    case 0x2e: // stl_c
1013
    case 0x2f: // stq_c
1014
        is_write = 1;
1015
    }
1016

    
1017
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1018
                             is_write, &uc->uc_sigmask, puc);
1019
}
1020
#elif defined(__sparc__)
1021

    
1022
int cpu_signal_handler(int host_signum, void *pinfo,
1023
                       void *puc)
1024
{
1025
    siginfo_t *info = pinfo;
1026
    int is_write;
1027
    uint32_t insn;
1028
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1029
    uint32_t *regs = (uint32_t *)(info + 1);
1030
    void *sigmask = (regs + 20);
1031
    /* XXX: is there a standard glibc define ? */
1032
    unsigned long pc = regs[1];
1033
#else
1034
#ifdef __linux__
1035
    struct sigcontext *sc = puc;
1036
    unsigned long pc = sc->sigc_regs.tpc;
1037
    void *sigmask = (void *)sc->sigc_mask;
1038
#elif defined(__OpenBSD__)
1039
    struct sigcontext *uc = puc;
1040
    unsigned long pc = uc->sc_pc;
1041
    void *sigmask = (void *)(long)uc->sc_mask;
1042
#endif
1043
#endif
1044

    
1045
    /* XXX: need kernel patch to get write flag faster */
1046
    is_write = 0;
1047
    insn = *(uint32_t *)pc;
1048
    if ((insn >> 30) == 3) {
1049
      switch((insn >> 19) & 0x3f) {
1050
      case 0x05: // stb
1051
      case 0x15: // stba
1052
      case 0x06: // sth
1053
      case 0x16: // stha
1054
      case 0x04: // st
1055
      case 0x14: // sta
1056
      case 0x07: // std
1057
      case 0x17: // stda
1058
      case 0x0e: // stx
1059
      case 0x1e: // stxa
1060
      case 0x24: // stf
1061
      case 0x34: // stfa
1062
      case 0x27: // stdf
1063
      case 0x37: // stdfa
1064
      case 0x26: // stqf
1065
      case 0x36: // stqfa
1066
      case 0x25: // stfsr
1067
      case 0x3c: // casa
1068
      case 0x3e: // casxa
1069
        is_write = 1;
1070
        break;
1071
      }
1072
    }
1073
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1074
                             is_write, sigmask, NULL);
1075
}
1076

    
1077
#elif defined(__arm__)
1078

    
1079
int cpu_signal_handler(int host_signum, void *pinfo,
1080
                       void *puc)
1081
{
1082
    siginfo_t *info = pinfo;
1083
    struct ucontext *uc = puc;
1084
    unsigned long pc;
1085
    int is_write;
1086

    
1087
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1088
    pc = uc->uc_mcontext.gregs[R15];
1089
#else
1090
    pc = uc->uc_mcontext.arm_pc;
1091
#endif
1092
    /* XXX: compute is_write */
1093
    is_write = 0;
1094
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1095
                             is_write,
1096
                             &uc->uc_sigmask, puc);
1097
}
1098

    
1099
#elif defined(__mc68000)
1100

    
1101
int cpu_signal_handler(int host_signum, void *pinfo,
1102
                       void *puc)
1103
{
1104
    siginfo_t *info = pinfo;
1105
    struct ucontext *uc = puc;
1106
    unsigned long pc;
1107
    int is_write;
1108

    
1109
    pc = uc->uc_mcontext.gregs[16];
1110
    /* XXX: compute is_write */
1111
    is_write = 0;
1112
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1113
                             is_write,
1114
                             &uc->uc_sigmask, puc);
1115
}
1116

    
1117
#elif defined(__ia64)
1118

    
1119
#ifndef __ISR_VALID
1120
  /* This ought to be in <bits/siginfo.h>... */
1121
# define __ISR_VALID        1
1122
#endif
1123

    
1124
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1125
{
1126
    siginfo_t *info = pinfo;
1127
    struct ucontext *uc = puc;
1128
    unsigned long ip;
1129
    int is_write = 0;
1130

    
1131
    ip = uc->uc_mcontext.sc_ip;
1132
    switch (host_signum) {
1133
      case SIGILL:
1134
      case SIGFPE:
1135
      case SIGSEGV:
1136
      case SIGBUS:
1137
      case SIGTRAP:
1138
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1139
              /* ISR.W (write-access) is bit 33:  */
1140
              is_write = (info->si_isr >> 33) & 1;
1141
          break;
1142

    
1143
      default:
1144
          break;
1145
    }
1146
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1147
                             is_write,
1148
                             (sigset_t *)&uc->uc_sigmask, puc);
1149
}
1150

    
1151
#elif defined(__s390__)
1152

    
1153
int cpu_signal_handler(int host_signum, void *pinfo,
1154
                       void *puc)
1155
{
1156
    siginfo_t *info = pinfo;
1157
    struct ucontext *uc = puc;
1158
    unsigned long pc;
1159
    int is_write;
1160

    
1161
    pc = uc->uc_mcontext.psw.addr;
1162
    /* XXX: compute is_write */
1163
    is_write = 0;
1164
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1165
                             is_write, &uc->uc_sigmask, puc);
1166
}
1167

    
1168
#elif defined(__mips__)
1169

    
1170
int cpu_signal_handler(int host_signum, void *pinfo,
1171
                       void *puc)
1172
{
1173
    siginfo_t *info = pinfo;
1174
    struct ucontext *uc = puc;
1175
    greg_t pc = uc->uc_mcontext.pc;
1176
    int is_write;
1177

    
1178
    /* XXX: compute is_write */
1179
    is_write = 0;
1180
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1181
                             is_write, &uc->uc_sigmask, puc);
1182
}
1183

    
1184
#elif defined(__hppa__)
1185

    
1186
int cpu_signal_handler(int host_signum, void *pinfo,
1187
                       void *puc)
1188
{
1189
    struct siginfo *info = pinfo;
1190
    struct ucontext *uc = puc;
1191
    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1192
    uint32_t insn = *(uint32_t *)pc;
1193
    int is_write = 0;
1194

    
1195
    /* XXX: need kernel patch to get write flag faster.  */
1196
    switch (insn >> 26) {
1197
    case 0x1a: /* STW */
1198
    case 0x19: /* STH */
1199
    case 0x18: /* STB */
1200
    case 0x1b: /* STWM */
1201
        is_write = 1;
1202
        break;
1203

    
1204
    case 0x09: /* CSTWX, FSTWX, FSTWS */
1205
    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1206
        /* Distinguish from coprocessor load ... */
1207
        is_write = (insn >> 9) & 1;
1208
        break;
1209

    
1210
    case 0x03:
1211
        switch ((insn >> 6) & 15) {
1212
        case 0xa: /* STWS */
1213
        case 0x9: /* STHS */
1214
        case 0x8: /* STBS */
1215
        case 0xe: /* STWAS */
1216
        case 0xc: /* STBYS */
1217
            is_write = 1;
1218
        }
1219
        break;
1220
    }
1221

    
1222
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1223
                             is_write, &uc->uc_sigmask, puc);
1224
}
1225

    
1226
#else
1227

    
1228
#error host CPU specific signal handler needed
1229

    
1230
#endif
1231

    
1232
#endif /* !defined(CONFIG_SOFTMMU) */