Statistics
| Branch: | Revision:

root / cpu-exec.c @ fb9fb692

History | View | Annotate | Download (41.2 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24

    
25
#if !defined(CONFIG_SOFTMMU)
26
#undef EAX
27
#undef ECX
28
#undef EDX
29
#undef EBX
30
#undef ESP
31
#undef EBP
32
#undef ESI
33
#undef EDI
34
#undef EIP
35
#include <signal.h>
36
#ifdef __linux__
37
#include <sys/ucontext.h>
38
#endif
39
#endif
40

    
41
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42
// Work around ugly bugs in glibc that mangle global register contents
43
#undef env
44
#define env cpu_single_env
45
#endif
46

    
47
int tb_invalidated_flag;
48

    
49
//#define CONFIG_DEBUG_EXEC
50
//#define DEBUG_SIGNAL
51

    
52
int qemu_cpu_has_work(CPUState *env)
53
{
54
    return cpu_has_work(env);
55
}
56

    
57
void cpu_loop_exit(void)
58
{
59
    env->current_tb = NULL;
60
    longjmp(env->jmp_env, 1);
61
}
62

    
63
/* exit the current TB from a signal handler. The host registers are
64
   restored in a state compatible with the CPU emulator
65
 */
66
void cpu_resume_from_signal(CPUState *env1, void *puc)
67
{
68
#if !defined(CONFIG_SOFTMMU)
69
#ifdef __linux__
70
    struct ucontext *uc = puc;
71
#elif defined(__OpenBSD__)
72
    struct sigcontext *uc = puc;
73
#endif
74
#endif
75

    
76
    env = env1;
77

    
78
    /* XXX: restore cpu registers saved in host registers */
79

    
80
#if !defined(CONFIG_SOFTMMU)
81
    if (puc) {
82
        /* XXX: use siglongjmp ? */
83
#ifdef __linux__
84
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85
#elif defined(__OpenBSD__)
86
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
87
#endif
88
    }
89
#endif
90
    env->exception_index = -1;
91
    longjmp(env->jmp_env, 1);
92
}
93

    
94
/* Execute the code without caching the generated code. An interpreter
95
   could be used if available. */
96
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97
{
98
    unsigned long next_tb;
99
    TranslationBlock *tb;
100

    
101
    /* Should never happen.
102
       We only end up here when an existing TB is too long.  */
103
    if (max_cycles > CF_COUNT_MASK)
104
        max_cycles = CF_COUNT_MASK;
105

    
106
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107
                     max_cycles);
108
    env->current_tb = tb;
109
    /* execute the generated code */
110
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111
    env->current_tb = NULL;
112

    
113
    if ((next_tb & 3) == 2) {
114
        /* Restore PC.  This may happen if async event occurs before
115
           the TB starts executing.  */
116
        cpu_pc_from_tb(env, tb);
117
    }
118
    tb_phys_invalidate(tb, -1);
119
    tb_free(tb);
120
}
121

    
122
static TranslationBlock *tb_find_slow(target_ulong pc,
123
                                      target_ulong cs_base,
124
                                      uint64_t flags)
125
{
126
    TranslationBlock *tb, **ptb1;
127
    unsigned int h;
128
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129

    
130
    tb_invalidated_flag = 0;
131

    
132
    /* find translated block using physical mappings */
133
    phys_pc = get_phys_addr_code(env, pc);
134
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
135
    phys_page2 = -1;
136
    h = tb_phys_hash_func(phys_pc);
137
    ptb1 = &tb_phys_hash[h];
138
    for(;;) {
139
        tb = *ptb1;
140
        if (!tb)
141
            goto not_found;
142
        if (tb->pc == pc &&
143
            tb->page_addr[0] == phys_page1 &&
144
            tb->cs_base == cs_base &&
145
            tb->flags == flags) {
146
            /* check next page if needed */
147
            if (tb->page_addr[1] != -1) {
148
                virt_page2 = (pc & TARGET_PAGE_MASK) +
149
                    TARGET_PAGE_SIZE;
150
                phys_page2 = get_phys_addr_code(env, virt_page2);
151
                if (tb->page_addr[1] == phys_page2)
152
                    goto found;
153
            } else {
154
                goto found;
155
            }
156
        }
157
        ptb1 = &tb->phys_hash_next;
158
    }
159
 not_found:
160
   /* if no translated code available, then translate it now */
161
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
162

    
163
 found:
164
    /* we add the TB in the virtual pc hash table */
165
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166
    return tb;
167
}
168

    
169
static inline TranslationBlock *tb_find_fast(void)
170
{
171
    TranslationBlock *tb;
172
    target_ulong cs_base, pc;
173
    int flags;
174

    
175
    /* we record a subset of the CPU state. It will
176
       always be the same before a given translated block
177
       is executed. */
178
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181
                 tb->flags != flags)) {
182
        tb = tb_find_slow(pc, cs_base, flags);
183
    }
184
    return tb;
185
}
186

    
187
static CPUDebugExcpHandler *debug_excp_handler;
188

    
189
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190
{
191
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
192

    
193
    debug_excp_handler = handler;
194
    return old_handler;
195
}
196

    
197
static void cpu_handle_debug_exception(CPUState *env)
198
{
199
    CPUWatchpoint *wp;
200

    
201
    if (!env->watchpoint_hit)
202
        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
203
            wp->flags &= ~BP_WATCHPOINT_HIT;
204

    
205
    if (debug_excp_handler)
206
        debug_excp_handler(env);
207
}
208

    
209
/* main execution loop */
210

    
211
int cpu_exec(CPUState *env1)
212
{
213
#define DECLARE_HOST_REGS 1
214
#include "hostregs_helper.h"
215
    int ret, interrupt_request;
216
    TranslationBlock *tb;
217
    uint8_t *tc_ptr;
218
    unsigned long next_tb;
219

    
220
    if (cpu_halted(env1) == EXCP_HALTED)
221
        return EXCP_HALTED;
222

    
223
    cpu_single_env = env1;
224

    
225
    /* first we save global registers */
226
#define SAVE_HOST_REGS 1
227
#include "hostregs_helper.h"
228
    env = env1;
229

    
230
#if defined(TARGET_I386)
231
    /* put eflags in CPU temporary format */
232
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
234
    CC_OP = CC_OP_EFLAGS;
235
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236
#elif defined(TARGET_SPARC)
237
#elif defined(TARGET_M68K)
238
    env->cc_op = CC_OP_FLAGS;
239
    env->cc_dest = env->sr & 0xf;
240
    env->cc_x = (env->sr >> 4) & 1;
241
#elif defined(TARGET_ALPHA)
242
#elif defined(TARGET_ARM)
243
#elif defined(TARGET_PPC)
244
#elif defined(TARGET_MICROBLAZE)
245
#elif defined(TARGET_MIPS)
246
#elif defined(TARGET_SH4)
247
#elif defined(TARGET_CRIS)
248
#elif defined(TARGET_S390X)
249
    /* XXXXX */
250
#else
251
#error unsupported target CPU
252
#endif
253
    env->exception_index = -1;
254

    
255
    /* prepare setjmp context for exception handling */
256
    for(;;) {
257
        if (setjmp(env->jmp_env) == 0) {
258
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
259
#undef env
260
                    env = cpu_single_env;
261
#define env cpu_single_env
262
#endif
263
            /* if an exception is pending, we execute it here */
264
            if (env->exception_index >= 0) {
265
                if (env->exception_index >= EXCP_INTERRUPT) {
266
                    /* exit request from the cpu execution loop */
267
                    ret = env->exception_index;
268
                    if (ret == EXCP_DEBUG)
269
                        cpu_handle_debug_exception(env);
270
                    break;
271
                } else {
272
#if defined(CONFIG_USER_ONLY)
273
                    /* if user mode only, we simulate a fake exception
274
                       which will be handled outside the cpu execution
275
                       loop */
276
#if defined(TARGET_I386)
277
                    do_interrupt_user(env->exception_index,
278
                                      env->exception_is_int,
279
                                      env->error_code,
280
                                      env->exception_next_eip);
281
                    /* successfully delivered */
282
                    env->old_exception = -1;
283
#endif
284
                    ret = env->exception_index;
285
                    break;
286
#else
287
#if defined(TARGET_I386)
288
                    /* simulate a real cpu exception. On i386, it can
289
                       trigger new exceptions, but we do not handle
290
                       double or triple faults yet. */
291
                    do_interrupt(env->exception_index,
292
                                 env->exception_is_int,
293
                                 env->error_code,
294
                                 env->exception_next_eip, 0);
295
                    /* successfully delivered */
296
                    env->old_exception = -1;
297
#elif defined(TARGET_PPC)
298
                    do_interrupt(env);
299
#elif defined(TARGET_MICROBLAZE)
300
                    do_interrupt(env);
301
#elif defined(TARGET_MIPS)
302
                    do_interrupt(env);
303
#elif defined(TARGET_SPARC)
304
                    do_interrupt(env);
305
#elif defined(TARGET_ARM)
306
                    do_interrupt(env);
307
#elif defined(TARGET_SH4)
308
                    do_interrupt(env);
309
#elif defined(TARGET_ALPHA)
310
                    do_interrupt(env);
311
#elif defined(TARGET_CRIS)
312
                    do_interrupt(env);
313
#elif defined(TARGET_M68K)
314
                    do_interrupt(0);
315
#endif
316
                    env->exception_index = -1;
317
#endif
318
                }
319
            }
320

    
321
            if (kvm_enabled()) {
322
                kvm_cpu_exec(env);
323
                longjmp(env->jmp_env, 1);
324
            }
325

    
326
            next_tb = 0; /* force lookup of first TB */
327
            for(;;) {
328
                interrupt_request = env->interrupt_request;
329
                if (unlikely(interrupt_request)) {
330
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
331
                        /* Mask out external interrupts for this step. */
332
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
333
                                               CPU_INTERRUPT_FIQ |
334
                                               CPU_INTERRUPT_SMI |
335
                                               CPU_INTERRUPT_NMI);
336
                    }
337
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
338
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
339
                        env->exception_index = EXCP_DEBUG;
340
                        cpu_loop_exit();
341
                    }
342
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
343
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
344
    defined(TARGET_MICROBLAZE)
345
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
346
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
347
                        env->halted = 1;
348
                        env->exception_index = EXCP_HLT;
349
                        cpu_loop_exit();
350
                    }
351
#endif
352
#if defined(TARGET_I386)
353
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
354
                            svm_check_intercept(SVM_EXIT_INIT);
355
                            do_cpu_init(env);
356
                            env->exception_index = EXCP_HALTED;
357
                            cpu_loop_exit();
358
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
359
                            do_cpu_sipi(env);
360
                    } else if (env->hflags2 & HF2_GIF_MASK) {
361
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
362
                            !(env->hflags & HF_SMM_MASK)) {
363
                            svm_check_intercept(SVM_EXIT_SMI);
364
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
365
                            do_smm_enter();
366
                            next_tb = 0;
367
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
368
                                   !(env->hflags2 & HF2_NMI_MASK)) {
369
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
370
                            env->hflags2 |= HF2_NMI_MASK;
371
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
372
                            next_tb = 0;
373
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
374
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
375
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
376
                            next_tb = 0;
377
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
378
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
379
                                     (env->hflags2 & HF2_HIF_MASK)) ||
380
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
381
                                     (env->eflags & IF_MASK && 
382
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
383
                            int intno;
384
                            svm_check_intercept(SVM_EXIT_INTR);
385
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
386
                            intno = cpu_get_pic_interrupt(env);
387
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
388
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
389
#undef env
390
                    env = cpu_single_env;
391
#define env cpu_single_env
392
#endif
393
                            do_interrupt(intno, 0, 0, 0, 1);
394
                            /* ensure that no TB jump will be modified as
395
                               the program flow was changed */
396
                            next_tb = 0;
397
#if !defined(CONFIG_USER_ONLY)
398
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
399
                                   (env->eflags & IF_MASK) && 
400
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
401
                            int intno;
402
                            /* FIXME: this should respect TPR */
403
                            svm_check_intercept(SVM_EXIT_VINTR);
404
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
405
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
406
                            do_interrupt(intno, 0, 0, 0, 1);
407
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
408
                            next_tb = 0;
409
#endif
410
                        }
411
                    }
412
#elif defined(TARGET_PPC)
413
#if 0
414
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
415
                        cpu_reset(env);
416
                    }
417
#endif
418
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
419
                        ppc_hw_interrupt(env);
420
                        if (env->pending_interrupts == 0)
421
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
422
                        next_tb = 0;
423
                    }
424
#elif defined(TARGET_MICROBLAZE)
425
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
426
                        && (env->sregs[SR_MSR] & MSR_IE)
427
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
428
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
429
                        env->exception_index = EXCP_IRQ;
430
                        do_interrupt(env);
431
                        next_tb = 0;
432
                    }
433
#elif defined(TARGET_MIPS)
434
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
435
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
436
                        (env->CP0_Status & (1 << CP0St_IE)) &&
437
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
438
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
439
                        !(env->hflags & MIPS_HFLAG_DM)) {
440
                        /* Raise it */
441
                        env->exception_index = EXCP_EXT_INTERRUPT;
442
                        env->error_code = 0;
443
                        do_interrupt(env);
444
                        next_tb = 0;
445
                    }
446
#elif defined(TARGET_SPARC)
447
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
448
                        if (cpu_interrupts_enabled(env) &&
449
                            env->interrupt_index > 0) {
450
                            int pil = env->interrupt_index & 0xf;
451
                            int type = env->interrupt_index & 0xf0;
452

    
453
                            if (((type == TT_EXTINT) &&
454
                                  cpu_pil_allowed(env, pil)) ||
455
                                  type != TT_EXTINT) {
456
                                env->exception_index = env->interrupt_index;
457
                                do_interrupt(env);
458
                                next_tb = 0;
459
                            }
460
                        }
461
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
462
                        //do_interrupt(0, 0, 0, 0, 0);
463
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
464
                    }
465
#elif defined(TARGET_ARM)
466
                    if (interrupt_request & CPU_INTERRUPT_FIQ
467
                        && !(env->uncached_cpsr & CPSR_F)) {
468
                        env->exception_index = EXCP_FIQ;
469
                        do_interrupt(env);
470
                        next_tb = 0;
471
                    }
472
                    /* ARMv7-M interrupt return works by loading a magic value
473
                       into the PC.  On real hardware the load causes the
474
                       return to occur.  The qemu implementation performs the
475
                       jump normally, then does the exception return when the
476
                       CPU tries to execute code at the magic address.
477
                       This will cause the magic PC value to be pushed to
478
                       the stack if an interrupt occured at the wrong time.
479
                       We avoid this by disabling interrupts when
480
                       pc contains a magic address.  */
481
                    if (interrupt_request & CPU_INTERRUPT_HARD
482
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
483
                            || !(env->uncached_cpsr & CPSR_I))) {
484
                        env->exception_index = EXCP_IRQ;
485
                        do_interrupt(env);
486
                        next_tb = 0;
487
                    }
488
#elif defined(TARGET_SH4)
489
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
490
                        do_interrupt(env);
491
                        next_tb = 0;
492
                    }
493
#elif defined(TARGET_ALPHA)
494
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
495
                        do_interrupt(env);
496
                        next_tb = 0;
497
                    }
498
#elif defined(TARGET_CRIS)
499
                    if (interrupt_request & CPU_INTERRUPT_HARD
500
                        && (env->pregs[PR_CCS] & I_FLAG)
501
                        && !env->locked_irq) {
502
                        env->exception_index = EXCP_IRQ;
503
                        do_interrupt(env);
504
                        next_tb = 0;
505
                    }
506
                    if (interrupt_request & CPU_INTERRUPT_NMI
507
                        && (env->pregs[PR_CCS] & M_FLAG)) {
508
                        env->exception_index = EXCP_NMI;
509
                        do_interrupt(env);
510
                        next_tb = 0;
511
                    }
512
#elif defined(TARGET_M68K)
513
                    if (interrupt_request & CPU_INTERRUPT_HARD
514
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
515
                            < env->pending_level) {
516
                        /* Real hardware gets the interrupt vector via an
517
                           IACK cycle at this point.  Current emulated
518
                           hardware doesn't rely on this, so we
519
                           provide/save the vector when the interrupt is
520
                           first signalled.  */
521
                        env->exception_index = env->pending_vector;
522
                        do_interrupt(1);
523
                        next_tb = 0;
524
                    }
525
#endif
526
                   /* Don't use the cached interupt_request value,
527
                      do_interrupt may have updated the EXITTB flag. */
528
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
529
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
530
                        /* ensure that no TB jump will be modified as
531
                           the program flow was changed */
532
                        next_tb = 0;
533
                    }
534
                }
535
                if (unlikely(env->exit_request)) {
536
                    env->exit_request = 0;
537
                    env->exception_index = EXCP_INTERRUPT;
538
                    cpu_loop_exit();
539
                }
540
#ifdef CONFIG_DEBUG_EXEC
541
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
542
                    /* restore flags in standard format */
543
#if defined(TARGET_I386)
544
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
545
                    log_cpu_state(env, X86_DUMP_CCOP);
546
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
547
#elif defined(TARGET_ARM)
548
                    log_cpu_state(env, 0);
549
#elif defined(TARGET_SPARC)
550
                    log_cpu_state(env, 0);
551
#elif defined(TARGET_PPC)
552
                    log_cpu_state(env, 0);
553
#elif defined(TARGET_M68K)
554
                    cpu_m68k_flush_flags(env, env->cc_op);
555
                    env->cc_op = CC_OP_FLAGS;
556
                    env->sr = (env->sr & 0xffe0)
557
                              | env->cc_dest | (env->cc_x << 4);
558
                    log_cpu_state(env, 0);
559
#elif defined(TARGET_MICROBLAZE)
560
                    log_cpu_state(env, 0);
561
#elif defined(TARGET_MIPS)
562
                    log_cpu_state(env, 0);
563
#elif defined(TARGET_SH4)
564
                    log_cpu_state(env, 0);
565
#elif defined(TARGET_ALPHA)
566
                    log_cpu_state(env, 0);
567
#elif defined(TARGET_CRIS)
568
                    log_cpu_state(env, 0);
569
#else
570
#error unsupported target CPU
571
#endif
572
                }
573
#endif
574
                spin_lock(&tb_lock);
575
                tb = tb_find_fast();
576
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
577
                   doing it in tb_find_slow */
578
                if (tb_invalidated_flag) {
579
                    /* as some TB could have been invalidated because
580
                       of memory exceptions while generating the code, we
581
                       must recompute the hash index here */
582
                    next_tb = 0;
583
                    tb_invalidated_flag = 0;
584
                }
585
#ifdef CONFIG_DEBUG_EXEC
586
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
587
                             (long)tb->tc_ptr, tb->pc,
588
                             lookup_symbol(tb->pc));
589
#endif
590
                /* see if we can patch the calling TB. When the TB
591
                   spans two pages, we cannot safely do a direct
592
                   jump. */
593
                if (next_tb != 0 && tb->page_addr[1] == -1) {
594
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
595
                }
596
                spin_unlock(&tb_lock);
597

    
598
                /* cpu_interrupt might be called while translating the
599
                   TB, but before it is linked into a potentially
600
                   infinite loop and becomes env->current_tb. Avoid
601
                   starting execution if there is a pending interrupt. */
602
                if (!unlikely (env->exit_request)) {
603
                    env->current_tb = tb;
604
                    tc_ptr = tb->tc_ptr;
605
                /* execute the generated code */
606
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
607
#undef env
608
                    env = cpu_single_env;
609
#define env cpu_single_env
610
#endif
611
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
612
                    env->current_tb = NULL;
613
                    if ((next_tb & 3) == 2) {
614
                        /* Instruction counter expired.  */
615
                        int insns_left;
616
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
617
                        /* Restore PC.  */
618
                        cpu_pc_from_tb(env, tb);
619
                        insns_left = env->icount_decr.u32;
620
                        if (env->icount_extra && insns_left >= 0) {
621
                            /* Refill decrementer and continue execution.  */
622
                            env->icount_extra += insns_left;
623
                            if (env->icount_extra > 0xffff) {
624
                                insns_left = 0xffff;
625
                            } else {
626
                                insns_left = env->icount_extra;
627
                            }
628
                            env->icount_extra -= insns_left;
629
                            env->icount_decr.u16.low = insns_left;
630
                        } else {
631
                            if (insns_left > 0) {
632
                                /* Execute remaining instructions.  */
633
                                cpu_exec_nocache(insns_left, tb);
634
                            }
635
                            env->exception_index = EXCP_INTERRUPT;
636
                            next_tb = 0;
637
                            cpu_loop_exit();
638
                        }
639
                    }
640
                }
641
                /* reset soft MMU for next block (it can currently
642
                   only be set by a memory fault) */
643
            } /* for(;;) */
644
        }
645
    } /* for(;;) */
646

    
647

    
648
#if defined(TARGET_I386)
649
    /* restore flags in standard format */
650
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
651
#elif defined(TARGET_ARM)
652
    /* XXX: Save/restore host fpu exception state?.  */
653
#elif defined(TARGET_SPARC)
654
#elif defined(TARGET_PPC)
655
#elif defined(TARGET_M68K)
656
    cpu_m68k_flush_flags(env, env->cc_op);
657
    env->cc_op = CC_OP_FLAGS;
658
    env->sr = (env->sr & 0xffe0)
659
              | env->cc_dest | (env->cc_x << 4);
660
#elif defined(TARGET_MICROBLAZE)
661
#elif defined(TARGET_MIPS)
662
#elif defined(TARGET_SH4)
663
#elif defined(TARGET_ALPHA)
664
#elif defined(TARGET_CRIS)
665
#elif defined(TARGET_S390X)
666
    /* XXXXX */
667
#else
668
#error unsupported target CPU
669
#endif
670

    
671
    /* restore global registers */
672
#include "hostregs_helper.h"
673

    
674
    /* fail safe : never use cpu_single_env outside cpu_exec() */
675
    cpu_single_env = NULL;
676
    return ret;
677
}
678

    
679
/* must only be called from the generated code as an exception can be
680
   generated */
681
void tb_invalidate_page_range(target_ulong start, target_ulong end)
682
{
683
    /* XXX: cannot enable it yet because it yields to MMU exception
684
       where NIP != read address on PowerPC */
685
#if 0
686
    target_ulong phys_addr;
687
    phys_addr = get_phys_addr_code(env, start);
688
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
689
#endif
690
}
691

    
692
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
693

    
694
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
695
{
696
    CPUX86State *saved_env;
697

    
698
    saved_env = env;
699
    env = s;
700
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
701
        selector &= 0xffff;
702
        cpu_x86_load_seg_cache(env, seg_reg, selector,
703
                               (selector << 4), 0xffff, 0);
704
    } else {
705
        helper_load_seg(seg_reg, selector);
706
    }
707
    env = saved_env;
708
}
709

    
710
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
711
{
712
    CPUX86State *saved_env;
713

    
714
    saved_env = env;
715
    env = s;
716

    
717
    helper_fsave(ptr, data32);
718

    
719
    env = saved_env;
720
}
721

    
722
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
723
{
724
    CPUX86State *saved_env;
725

    
726
    saved_env = env;
727
    env = s;
728

    
729
    helper_frstor(ptr, data32);
730

    
731
    env = saved_env;
732
}
733

    
734
#endif /* TARGET_I386 */
735

    
736
#if !defined(CONFIG_SOFTMMU)
737

    
738
#if defined(TARGET_I386)
739
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
740
#else
741
#define EXCEPTION_ACTION cpu_loop_exit()
742
#endif
743

    
744
/* 'pc' is the host PC at which the exception was raised. 'address' is
745
   the effective address of the memory exception. 'is_write' is 1 if a
746
   write caused the exception and otherwise 0'. 'old_set' is the
747
   signal set which should be restored */
748
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
749
                                    int is_write, sigset_t *old_set,
750
                                    void *puc)
751
{
752
    TranslationBlock *tb;
753
    int ret;
754

    
755
    if (cpu_single_env)
756
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
757
#if defined(DEBUG_SIGNAL)
758
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
759
                pc, address, is_write, *(unsigned long *)old_set);
760
#endif
761
    /* XXX: locking issue */
762
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
763
        return 1;
764
    }
765

    
766
    /* see if it is an MMU fault */
767
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
768
    if (ret < 0)
769
        return 0; /* not an MMU fault */
770
    if (ret == 0)
771
        return 1; /* the MMU fault was handled without causing real CPU fault */
772
    /* now we have a real cpu fault */
773
    tb = tb_find_pc(pc);
774
    if (tb) {
775
        /* the PC is inside the translated code. It means that we have
776
           a virtual CPU fault */
777
        cpu_restore_state(tb, env, pc, puc);
778
    }
779

    
780
    /* we restore the process signal mask as the sigreturn should
781
       do it (XXX: use sigsetjmp) */
782
    sigprocmask(SIG_SETMASK, old_set, NULL);
783
    EXCEPTION_ACTION;
784

    
785
    /* never comes here */
786
    return 1;
787
}
788

    
789
#if defined(__i386__)
790

    
791
#if defined(__APPLE__)
792
# include <sys/ucontext.h>
793

    
794
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
795
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
796
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
797
# define MASK_sig(context)    ((context)->uc_sigmask)
798
#elif defined (__NetBSD__)
799
# include <ucontext.h>
800

    
801
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
802
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
803
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
804
# define MASK_sig(context)    ((context)->uc_sigmask)
805
#elif defined (__FreeBSD__) || defined(__DragonFly__)
806
# include <ucontext.h>
807

    
808
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
809
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
810
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
811
# define MASK_sig(context)    ((context)->uc_sigmask)
812
#elif defined(__OpenBSD__)
813
# define EIP_sig(context)     ((context)->sc_eip)
814
# define TRAP_sig(context)    ((context)->sc_trapno)
815
# define ERROR_sig(context)   ((context)->sc_err)
816
# define MASK_sig(context)    ((context)->sc_mask)
817
#else
818
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
819
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
820
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
821
# define MASK_sig(context)    ((context)->uc_sigmask)
822
#endif
823

    
824
int cpu_signal_handler(int host_signum, void *pinfo,
825
                       void *puc)
826
{
827
    siginfo_t *info = pinfo;
828
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
829
    ucontext_t *uc = puc;
830
#elif defined(__OpenBSD__)
831
    struct sigcontext *uc = puc;
832
#else
833
    struct ucontext *uc = puc;
834
#endif
835
    unsigned long pc;
836
    int trapno;
837

    
838
#ifndef REG_EIP
839
/* for glibc 2.1 */
840
#define REG_EIP    EIP
841
#define REG_ERR    ERR
842
#define REG_TRAPNO TRAPNO
843
#endif
844
    pc = EIP_sig(uc);
845
    trapno = TRAP_sig(uc);
846
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
847
                             trapno == 0xe ?
848
                             (ERROR_sig(uc) >> 1) & 1 : 0,
849
                             &MASK_sig(uc), puc);
850
}
851

    
852
#elif defined(__x86_64__)
853

    
854
#ifdef __NetBSD__
855
#define PC_sig(context)       _UC_MACHINE_PC(context)
856
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
857
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
858
#define MASK_sig(context)     ((context)->uc_sigmask)
859
#elif defined(__OpenBSD__)
860
#define PC_sig(context)       ((context)->sc_rip)
861
#define TRAP_sig(context)     ((context)->sc_trapno)
862
#define ERROR_sig(context)    ((context)->sc_err)
863
#define MASK_sig(context)     ((context)->sc_mask)
864
#elif defined (__FreeBSD__) || defined(__DragonFly__)
865
#include <ucontext.h>
866

    
867
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
868
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
869
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
870
#define MASK_sig(context)     ((context)->uc_sigmask)
871
#else
872
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
873
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
874
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
875
#define MASK_sig(context)     ((context)->uc_sigmask)
876
#endif
877

    
878
int cpu_signal_handler(int host_signum, void *pinfo,
879
                       void *puc)
880
{
881
    siginfo_t *info = pinfo;
882
    unsigned long pc;
883
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
884
    ucontext_t *uc = puc;
885
#elif defined(__OpenBSD__)
886
    struct sigcontext *uc = puc;
887
#else
888
    struct ucontext *uc = puc;
889
#endif
890

    
891
    pc = PC_sig(uc);
892
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
893
                             TRAP_sig(uc) == 0xe ?
894
                             (ERROR_sig(uc) >> 1) & 1 : 0,
895
                             &MASK_sig(uc), puc);
896
}
897

    
898
#elif defined(_ARCH_PPC)
899

    
900
/***********************************************************************
901
 * signal context platform-specific definitions
902
 * From Wine
903
 */
904
#ifdef linux
905
/* All Registers access - only for local access */
906
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
907
/* Gpr Registers access  */
908
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
909
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
910
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
911
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
912
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
913
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
914
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
915
/* Float Registers access  */
916
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
917
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
918
/* Exception Registers access */
919
# define DAR_sig(context)                        REG_sig(dar, context)
920
# define DSISR_sig(context)                        REG_sig(dsisr, context)
921
# define TRAP_sig(context)                        REG_sig(trap, context)
922
#endif /* linux */
923

    
924
#ifdef __APPLE__
925
# include <sys/ucontext.h>
926
typedef struct ucontext SIGCONTEXT;
927
/* All Registers access - only for local access */
928
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
929
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
930
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
931
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
932
/* Gpr Registers access */
933
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
934
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
935
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
936
# define CTR_sig(context)                        REG_sig(ctr, context)
937
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
938
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
939
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
940
/* Float Registers access */
941
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
942
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
943
/* Exception Registers access */
944
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
945
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
946
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
947
#endif /* __APPLE__ */
948

    
949
int cpu_signal_handler(int host_signum, void *pinfo,
950
                       void *puc)
951
{
952
    siginfo_t *info = pinfo;
953
    struct ucontext *uc = puc;
954
    unsigned long pc;
955
    int is_write;
956

    
957
    pc = IAR_sig(uc);
958
    is_write = 0;
959
#if 0
960
    /* ppc 4xx case */
961
    if (DSISR_sig(uc) & 0x00800000)
962
        is_write = 1;
963
#else
964
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
965
        is_write = 1;
966
#endif
967
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
968
                             is_write, &uc->uc_sigmask, puc);
969
}
970

    
971
#elif defined(__alpha__)
972

    
973
int cpu_signal_handler(int host_signum, void *pinfo,
974
                           void *puc)
975
{
976
    siginfo_t *info = pinfo;
977
    struct ucontext *uc = puc;
978
    uint32_t *pc = uc->uc_mcontext.sc_pc;
979
    uint32_t insn = *pc;
980
    int is_write = 0;
981

    
982
    /* XXX: need kernel patch to get write flag faster */
983
    switch (insn >> 26) {
984
    case 0x0d: // stw
985
    case 0x0e: // stb
986
    case 0x0f: // stq_u
987
    case 0x24: // stf
988
    case 0x25: // stg
989
    case 0x26: // sts
990
    case 0x27: // stt
991
    case 0x2c: // stl
992
    case 0x2d: // stq
993
    case 0x2e: // stl_c
994
    case 0x2f: // stq_c
995
        is_write = 1;
996
    }
997

    
998
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
999
                             is_write, &uc->uc_sigmask, puc);
1000
}
1001
#elif defined(__sparc__)
1002

    
1003
int cpu_signal_handler(int host_signum, void *pinfo,
1004
                       void *puc)
1005
{
1006
    siginfo_t *info = pinfo;
1007
    int is_write;
1008
    uint32_t insn;
1009
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1010
    uint32_t *regs = (uint32_t *)(info + 1);
1011
    void *sigmask = (regs + 20);
1012
    /* XXX: is there a standard glibc define ? */
1013
    unsigned long pc = regs[1];
1014
#else
1015
#ifdef __linux__
1016
    struct sigcontext *sc = puc;
1017
    unsigned long pc = sc->sigc_regs.tpc;
1018
    void *sigmask = (void *)sc->sigc_mask;
1019
#elif defined(__OpenBSD__)
1020
    struct sigcontext *uc = puc;
1021
    unsigned long pc = uc->sc_pc;
1022
    void *sigmask = (void *)(long)uc->sc_mask;
1023
#endif
1024
#endif
1025

    
1026
    /* XXX: need kernel patch to get write flag faster */
1027
    is_write = 0;
1028
    insn = *(uint32_t *)pc;
1029
    if ((insn >> 30) == 3) {
1030
      switch((insn >> 19) & 0x3f) {
1031
      case 0x05: // stb
1032
      case 0x15: // stba
1033
      case 0x06: // sth
1034
      case 0x16: // stha
1035
      case 0x04: // st
1036
      case 0x14: // sta
1037
      case 0x07: // std
1038
      case 0x17: // stda
1039
      case 0x0e: // stx
1040
      case 0x1e: // stxa
1041
      case 0x24: // stf
1042
      case 0x34: // stfa
1043
      case 0x27: // stdf
1044
      case 0x37: // stdfa
1045
      case 0x26: // stqf
1046
      case 0x36: // stqfa
1047
      case 0x25: // stfsr
1048
      case 0x3c: // casa
1049
      case 0x3e: // casxa
1050
        is_write = 1;
1051
        break;
1052
      }
1053
    }
1054
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1055
                             is_write, sigmask, NULL);
1056
}
1057

    
1058
#elif defined(__arm__)
1059

    
1060
int cpu_signal_handler(int host_signum, void *pinfo,
1061
                       void *puc)
1062
{
1063
    siginfo_t *info = pinfo;
1064
    struct ucontext *uc = puc;
1065
    unsigned long pc;
1066
    int is_write;
1067

    
1068
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1069
    pc = uc->uc_mcontext.gregs[R15];
1070
#else
1071
    pc = uc->uc_mcontext.arm_pc;
1072
#endif
1073
    /* XXX: compute is_write */
1074
    is_write = 0;
1075
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1076
                             is_write,
1077
                             &uc->uc_sigmask, puc);
1078
}
1079

    
1080
#elif defined(__mc68000)
1081

    
1082
int cpu_signal_handler(int host_signum, void *pinfo,
1083
                       void *puc)
1084
{
1085
    siginfo_t *info = pinfo;
1086
    struct ucontext *uc = puc;
1087
    unsigned long pc;
1088
    int is_write;
1089

    
1090
    pc = uc->uc_mcontext.gregs[16];
1091
    /* XXX: compute is_write */
1092
    is_write = 0;
1093
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1094
                             is_write,
1095
                             &uc->uc_sigmask, puc);
1096
}
1097

    
1098
#elif defined(__ia64)
1099

    
1100
#ifndef __ISR_VALID
1101
  /* This ought to be in <bits/siginfo.h>... */
1102
# define __ISR_VALID        1
1103
#endif
1104

    
1105
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1106
{
1107
    siginfo_t *info = pinfo;
1108
    struct ucontext *uc = puc;
1109
    unsigned long ip;
1110
    int is_write = 0;
1111

    
1112
    ip = uc->uc_mcontext.sc_ip;
1113
    switch (host_signum) {
1114
      case SIGILL:
1115
      case SIGFPE:
1116
      case SIGSEGV:
1117
      case SIGBUS:
1118
      case SIGTRAP:
1119
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1120
              /* ISR.W (write-access) is bit 33:  */
1121
              is_write = (info->si_isr >> 33) & 1;
1122
          break;
1123

    
1124
      default:
1125
          break;
1126
    }
1127
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1128
                             is_write,
1129
                             &uc->uc_sigmask, puc);
1130
}
1131

    
1132
#elif defined(__s390__)
1133

    
1134
int cpu_signal_handler(int host_signum, void *pinfo,
1135
                       void *puc)
1136
{
1137
    siginfo_t *info = pinfo;
1138
    struct ucontext *uc = puc;
1139
    unsigned long pc;
1140
    int is_write;
1141

    
1142
    pc = uc->uc_mcontext.psw.addr;
1143
    /* XXX: compute is_write */
1144
    is_write = 0;
1145
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1146
                             is_write, &uc->uc_sigmask, puc);
1147
}
1148

    
1149
#elif defined(__mips__)
1150

    
1151
int cpu_signal_handler(int host_signum, void *pinfo,
1152
                       void *puc)
1153
{
1154
    siginfo_t *info = pinfo;
1155
    struct ucontext *uc = puc;
1156
    greg_t pc = uc->uc_mcontext.pc;
1157
    int is_write;
1158

    
1159
    /* XXX: compute is_write */
1160
    is_write = 0;
1161
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1162
                             is_write, &uc->uc_sigmask, puc);
1163
}
1164

    
1165
#elif defined(__hppa__)
1166

    
1167
int cpu_signal_handler(int host_signum, void *pinfo,
1168
                       void *puc)
1169
{
1170
    struct siginfo *info = pinfo;
1171
    struct ucontext *uc = puc;
1172
    unsigned long pc;
1173
    int is_write;
1174

    
1175
    pc = uc->uc_mcontext.sc_iaoq[0];
1176
    /* FIXME: compute is_write */
1177
    is_write = 0;
1178
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1179
                             is_write,
1180
                             &uc->uc_sigmask, puc);
1181
}
1182

    
1183
#else
1184

    
1185
#error host CPU specific signal handler needed
1186

    
1187
#endif
1188

    
1189
#endif /* !defined(CONFIG_SOFTMMU) */