Statistics
| Branch: | Revision:

root / cpu-exec.c @ 6113d6d3

History | View | Annotate | Download (41.2 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24

    
25
#include <assert.h>
26

    
27
#if !defined(CONFIG_SOFTMMU)
28
#undef EAX
29
#undef ECX
30
#undef EDX
31
#undef EBX
32
#undef ESP
33
#undef EBP
34
#undef ESI
35
#undef EDI
36
#undef EIP
37
#include <signal.h>
38
#ifdef __linux__
39
#include <sys/ucontext.h>
40
#endif
41
#endif
42

    
43
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
44
// Work around ugly bugs in glibc that mangle global register contents
45
#undef env
46
#define env cpu_single_env
47
#endif
48

    
49
int tb_invalidated_flag;
50

    
51
//#define CONFIG_DEBUG_EXEC
52
//#define DEBUG_SIGNAL
53

    
54
int qemu_cpu_has_work(CPUState *env)
55
{
56
    return cpu_has_work(env);
57
}
58

    
59
void cpu_loop_exit(void)
60
{
61
    env->current_tb = NULL;
62
    longjmp(env->jmp_env, 1);
63
}
64

    
65
/* exit the current TB from a signal handler. The host registers are
66
   restored in a state compatible with the CPU emulator
67
 */
68
void cpu_resume_from_signal(CPUState *env1, void *puc)
69
{
70
#if !defined(CONFIG_SOFTMMU)
71
#ifdef __linux__
72
    struct ucontext *uc = puc;
73
#elif defined(__OpenBSD__)
74
    struct sigcontext *uc = puc;
75
#endif
76
#endif
77

    
78
    env = env1;
79

    
80
    /* XXX: restore cpu registers saved in host registers */
81

    
82
#if !defined(CONFIG_SOFTMMU)
83
    if (puc) {
84
        /* XXX: use siglongjmp ? */
85
#ifdef __linux__
86
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87
#elif defined(__OpenBSD__)
88
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89
#endif
90
    }
91
#endif
92
    env->exception_index = -1;
93
    longjmp(env->jmp_env, 1);
94
}
95

    
96
/* Execute the code without caching the generated code. An interpreter
97
   could be used if available. */
98
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99
{
100
    unsigned long next_tb;
101
    TranslationBlock *tb;
102

    
103
    /* Should never happen.
104
       We only end up here when an existing TB is too long.  */
105
    if (max_cycles > CF_COUNT_MASK)
106
        max_cycles = CF_COUNT_MASK;
107

    
108
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109
                     max_cycles);
110
    env->current_tb = tb;
111
    /* execute the generated code */
112
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113
    env->current_tb = NULL;
114

    
115
    if ((next_tb & 3) == 2) {
116
        /* Restore PC.  This may happen if async event occurs before
117
           the TB starts executing.  */
118
        cpu_pc_from_tb(env, tb);
119
    }
120
    tb_phys_invalidate(tb, -1);
121
    tb_free(tb);
122
}
123

    
124
static TranslationBlock *tb_find_slow(target_ulong pc,
125
                                      target_ulong cs_base,
126
                                      uint64_t flags)
127
{
128
    TranslationBlock *tb, **ptb1;
129
    unsigned int h;
130
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131

    
132
    tb_invalidated_flag = 0;
133

    
134
    /* find translated block using physical mappings */
135
    phys_pc = get_phys_addr_code(env, pc);
136
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
137
    phys_page2 = -1;
138
    h = tb_phys_hash_func(phys_pc);
139
    ptb1 = &tb_phys_hash[h];
140
    for(;;) {
141
        tb = *ptb1;
142
        if (!tb)
143
            goto not_found;
144
        if (tb->pc == pc &&
145
            tb->page_addr[0] == phys_page1 &&
146
            tb->cs_base == cs_base &&
147
            tb->flags == flags) {
148
            /* check next page if needed */
149
            if (tb->page_addr[1] != -1) {
150
                virt_page2 = (pc & TARGET_PAGE_MASK) +
151
                    TARGET_PAGE_SIZE;
152
                phys_page2 = get_phys_addr_code(env, virt_page2);
153
                if (tb->page_addr[1] == phys_page2)
154
                    goto found;
155
            } else {
156
                goto found;
157
            }
158
        }
159
        ptb1 = &tb->phys_hash_next;
160
    }
161
 not_found:
162
   /* if no translated code available, then translate it now */
163
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
164

    
165
 found:
166
    /* we add the TB in the virtual pc hash table */
167
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168
    return tb;
169
}
170

    
171
static inline TranslationBlock *tb_find_fast(void)
172
{
173
    TranslationBlock *tb;
174
    target_ulong cs_base, pc;
175
    int flags;
176

    
177
    /* we record a subset of the CPU state. It will
178
       always be the same before a given translated block
179
       is executed. */
180
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
181
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
182
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
183
                 tb->flags != flags)) {
184
        tb = tb_find_slow(pc, cs_base, flags);
185
    }
186
    return tb;
187
}
188

    
189
static CPUDebugExcpHandler *debug_excp_handler;
190

    
191
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
192
{
193
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
194

    
195
    debug_excp_handler = handler;
196
    return old_handler;
197
}
198

    
199
static void cpu_handle_debug_exception(CPUState *env)
200
{
201
    CPUWatchpoint *wp;
202

    
203
    if (!env->watchpoint_hit)
204
        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
205
            wp->flags &= ~BP_WATCHPOINT_HIT;
206

    
207
    if (debug_excp_handler)
208
        debug_excp_handler(env);
209
}
210

    
211
/* main execution loop */
212

    
213
int cpu_exec(CPUState *env1)
214
{
215
#define DECLARE_HOST_REGS 1
216
#include "hostregs_helper.h"
217
    int ret, interrupt_request;
218
    TranslationBlock *tb;
219
    uint8_t *tc_ptr;
220
    unsigned long next_tb;
221

    
222
    if (cpu_halted(env1) == EXCP_HALTED)
223
        return EXCP_HALTED;
224

    
225
    cpu_single_env = env1;
226

    
227
    /* first we save global registers */
228
#define SAVE_HOST_REGS 1
229
#include "hostregs_helper.h"
230
    env = env1;
231

    
232
#if defined(TARGET_I386)
233
    /* put eflags in CPU temporary format */
234
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
235
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
236
    CC_OP = CC_OP_EFLAGS;
237
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238
#elif defined(TARGET_SPARC)
239
#elif defined(TARGET_M68K)
240
    env->cc_op = CC_OP_FLAGS;
241
    env->cc_dest = env->sr & 0xf;
242
    env->cc_x = (env->sr >> 4) & 1;
243
#elif defined(TARGET_ALPHA)
244
#elif defined(TARGET_ARM)
245
#elif defined(TARGET_PPC)
246
#elif defined(TARGET_MICROBLAZE)
247
#elif defined(TARGET_MIPS)
248
#elif defined(TARGET_SH4)
249
#elif defined(TARGET_CRIS)
250
#elif defined(TARGET_S390X)
251
    /* XXXXX */
252
#else
253
#error unsupported target CPU
254
#endif
255
    env->exception_index = -1;
256

    
257
    /* prepare setjmp context for exception handling */
258
    for(;;) {
259
        if (setjmp(env->jmp_env) == 0) {
260
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
261
#undef env
262
                    env = cpu_single_env;
263
#define env cpu_single_env
264
#endif
265
            assert (env->current_tb == NULL);
266
            /* if an exception is pending, we execute it here */
267
            if (env->exception_index >= 0) {
268
                if (env->exception_index >= EXCP_INTERRUPT) {
269
                    /* exit request from the cpu execution loop */
270
                    ret = env->exception_index;
271
                    if (ret == EXCP_DEBUG)
272
                        cpu_handle_debug_exception(env);
273
                    break;
274
                } else {
275
#if defined(CONFIG_USER_ONLY)
276
                    /* if user mode only, we simulate a fake exception
277
                       which will be handled outside the cpu execution
278
                       loop */
279
#if defined(TARGET_I386)
280
                    do_interrupt_user(env->exception_index,
281
                                      env->exception_is_int,
282
                                      env->error_code,
283
                                      env->exception_next_eip);
284
                    /* successfully delivered */
285
                    env->old_exception = -1;
286
#endif
287
                    ret = env->exception_index;
288
                    break;
289
#else
290
#if defined(TARGET_I386)
291
                    /* simulate a real cpu exception. On i386, it can
292
                       trigger new exceptions, but we do not handle
293
                       double or triple faults yet. */
294
                    do_interrupt(env->exception_index,
295
                                 env->exception_is_int,
296
                                 env->error_code,
297
                                 env->exception_next_eip, 0);
298
                    /* successfully delivered */
299
                    env->old_exception = -1;
300
#elif defined(TARGET_PPC)
301
                    do_interrupt(env);
302
#elif defined(TARGET_MICROBLAZE)
303
                    do_interrupt(env);
304
#elif defined(TARGET_MIPS)
305
                    do_interrupt(env);
306
#elif defined(TARGET_SPARC)
307
                    do_interrupt(env);
308
#elif defined(TARGET_ARM)
309
                    do_interrupt(env);
310
#elif defined(TARGET_SH4)
311
                    do_interrupt(env);
312
#elif defined(TARGET_ALPHA)
313
                    do_interrupt(env);
314
#elif defined(TARGET_CRIS)
315
                    do_interrupt(env);
316
#elif defined(TARGET_M68K)
317
                    do_interrupt(0);
318
#endif
319
#endif
320
                }
321
                env->exception_index = -1;
322
            }
323

    
324
            if (kvm_enabled()) {
325
                kvm_cpu_exec(env);
326
                longjmp(env->jmp_env, 1);
327
            }
328

    
329
            next_tb = 0; /* force lookup of first TB */
330
            for(;;) {
331
                interrupt_request = env->interrupt_request;
332
                if (unlikely(interrupt_request)) {
333
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
334
                        /* Mask out external interrupts for this step. */
335
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
336
                                               CPU_INTERRUPT_FIQ |
337
                                               CPU_INTERRUPT_SMI |
338
                                               CPU_INTERRUPT_NMI);
339
                    }
340
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
341
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
342
                        env->exception_index = EXCP_DEBUG;
343
                        cpu_loop_exit();
344
                    }
345
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
346
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
347
    defined(TARGET_MICROBLAZE)
348
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
349
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
350
                        env->halted = 1;
351
                        env->exception_index = EXCP_HLT;
352
                        cpu_loop_exit();
353
                    }
354
#endif
355
#if defined(TARGET_I386)
356
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
357
                            svm_check_intercept(SVM_EXIT_INIT);
358
                            do_cpu_init(env);
359
                            env->exception_index = EXCP_HALTED;
360
                            cpu_loop_exit();
361
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
362
                            do_cpu_sipi(env);
363
                    } else if (env->hflags2 & HF2_GIF_MASK) {
364
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
365
                            !(env->hflags & HF_SMM_MASK)) {
366
                            svm_check_intercept(SVM_EXIT_SMI);
367
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
368
                            do_smm_enter();
369
                            next_tb = 0;
370
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
371
                                   !(env->hflags2 & HF2_NMI_MASK)) {
372
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
373
                            env->hflags2 |= HF2_NMI_MASK;
374
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
375
                            next_tb = 0;
376
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
377
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
378
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
379
                            next_tb = 0;
380
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
381
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
382
                                     (env->hflags2 & HF2_HIF_MASK)) ||
383
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
384
                                     (env->eflags & IF_MASK && 
385
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
386
                            int intno;
387
                            svm_check_intercept(SVM_EXIT_INTR);
388
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
389
                            intno = cpu_get_pic_interrupt(env);
390
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
391
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
392
#undef env
393
                    env = cpu_single_env;
394
#define env cpu_single_env
395
#endif
396
                            do_interrupt(intno, 0, 0, 0, 1);
397
                            /* ensure that no TB jump will be modified as
398
                               the program flow was changed */
399
                            next_tb = 0;
400
#if !defined(CONFIG_USER_ONLY)
401
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
402
                                   (env->eflags & IF_MASK) && 
403
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
404
                            int intno;
405
                            /* FIXME: this should respect TPR */
406
                            svm_check_intercept(SVM_EXIT_VINTR);
407
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
408
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
409
                            do_interrupt(intno, 0, 0, 0, 1);
410
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
411
                            next_tb = 0;
412
#endif
413
                        }
414
                    }
415
#elif defined(TARGET_PPC)
416
#if 0
417
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
418
                        cpu_reset(env);
419
                    }
420
#endif
421
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
422
                        ppc_hw_interrupt(env);
423
                        if (env->pending_interrupts == 0)
424
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
425
                        next_tb = 0;
426
                    }
427
#elif defined(TARGET_MICROBLAZE)
428
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
429
                        && (env->sregs[SR_MSR] & MSR_IE)
430
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
431
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
432
                        env->exception_index = EXCP_IRQ;
433
                        do_interrupt(env);
434
                        next_tb = 0;
435
                    }
436
#elif defined(TARGET_MIPS)
437
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
438
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
439
                        (env->CP0_Status & (1 << CP0St_IE)) &&
440
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
441
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
442
                        !(env->hflags & MIPS_HFLAG_DM)) {
443
                        /* Raise it */
444
                        env->exception_index = EXCP_EXT_INTERRUPT;
445
                        env->error_code = 0;
446
                        do_interrupt(env);
447
                        next_tb = 0;
448
                    }
449
#elif defined(TARGET_SPARC)
450
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
451
                        if (cpu_interrupts_enabled(env) &&
452
                            env->interrupt_index > 0) {
453
                            int pil = env->interrupt_index & 0xf;
454
                            int type = env->interrupt_index & 0xf0;
455

    
456
                            if (((type == TT_EXTINT) &&
457
                                  cpu_pil_allowed(env, pil)) ||
458
                                  type != TT_EXTINT) {
459
                                env->exception_index = env->interrupt_index;
460
                                do_interrupt(env);
461
                                next_tb = 0;
462
                            }
463
                        }
464
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
465
                        //do_interrupt(0, 0, 0, 0, 0);
466
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
467
                    }
468
#elif defined(TARGET_ARM)
469
                    if (interrupt_request & CPU_INTERRUPT_FIQ
470
                        && !(env->uncached_cpsr & CPSR_F)) {
471
                        env->exception_index = EXCP_FIQ;
472
                        do_interrupt(env);
473
                        next_tb = 0;
474
                    }
475
                    /* ARMv7-M interrupt return works by loading a magic value
476
                       into the PC.  On real hardware the load causes the
477
                       return to occur.  The qemu implementation performs the
478
                       jump normally, then does the exception return when the
479
                       CPU tries to execute code at the magic address.
480
                       This will cause the magic PC value to be pushed to
481
                       the stack if an interrupt occured at the wrong time.
482
                       We avoid this by disabling interrupts when
483
                       pc contains a magic address.  */
484
                    if (interrupt_request & CPU_INTERRUPT_HARD
485
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
486
                            || !(env->uncached_cpsr & CPSR_I))) {
487
                        env->exception_index = EXCP_IRQ;
488
                        do_interrupt(env);
489
                        next_tb = 0;
490
                    }
491
#elif defined(TARGET_SH4)
492
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
493
                        do_interrupt(env);
494
                        next_tb = 0;
495
                    }
496
#elif defined(TARGET_ALPHA)
497
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
498
                        do_interrupt(env);
499
                        next_tb = 0;
500
                    }
501
#elif defined(TARGET_CRIS)
502
                    if (interrupt_request & CPU_INTERRUPT_HARD
503
                        && (env->pregs[PR_CCS] & I_FLAG)) {
504
                        env->exception_index = EXCP_IRQ;
505
                        do_interrupt(env);
506
                        next_tb = 0;
507
                    }
508
                    if (interrupt_request & CPU_INTERRUPT_NMI
509
                        && (env->pregs[PR_CCS] & M_FLAG)) {
510
                        env->exception_index = EXCP_NMI;
511
                        do_interrupt(env);
512
                        next_tb = 0;
513
                    }
514
#elif defined(TARGET_M68K)
515
                    if (interrupt_request & CPU_INTERRUPT_HARD
516
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
517
                            < env->pending_level) {
518
                        /* Real hardware gets the interrupt vector via an
519
                           IACK cycle at this point.  Current emulated
520
                           hardware doesn't rely on this, so we
521
                           provide/save the vector when the interrupt is
522
                           first signalled.  */
523
                        env->exception_index = env->pending_vector;
524
                        do_interrupt(1);
525
                        next_tb = 0;
526
                    }
527
#endif
528
                   /* Don't use the cached interupt_request value,
529
                      do_interrupt may have updated the EXITTB flag. */
530
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
531
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
532
                        /* ensure that no TB jump will be modified as
533
                           the program flow was changed */
534
                        next_tb = 0;
535
                    }
536
                }
537
                if (unlikely(env->exit_request)) {
538
                    env->exit_request = 0;
539
                    env->exception_index = EXCP_INTERRUPT;
540
                    cpu_loop_exit();
541
                }
542
#ifdef CONFIG_DEBUG_EXEC
543
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
544
                    /* restore flags in standard format */
545
#if defined(TARGET_I386)
546
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
547
                    log_cpu_state(env, X86_DUMP_CCOP);
548
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
549
#elif defined(TARGET_ARM)
550
                    log_cpu_state(env, 0);
551
#elif defined(TARGET_SPARC)
552
                    log_cpu_state(env, 0);
553
#elif defined(TARGET_PPC)
554
                    log_cpu_state(env, 0);
555
#elif defined(TARGET_M68K)
556
                    cpu_m68k_flush_flags(env, env->cc_op);
557
                    env->cc_op = CC_OP_FLAGS;
558
                    env->sr = (env->sr & 0xffe0)
559
                              | env->cc_dest | (env->cc_x << 4);
560
                    log_cpu_state(env, 0);
561
#elif defined(TARGET_MICROBLAZE)
562
                    log_cpu_state(env, 0);
563
#elif defined(TARGET_MIPS)
564
                    log_cpu_state(env, 0);
565
#elif defined(TARGET_SH4)
566
                    log_cpu_state(env, 0);
567
#elif defined(TARGET_ALPHA)
568
                    log_cpu_state(env, 0);
569
#elif defined(TARGET_CRIS)
570
                    log_cpu_state(env, 0);
571
#else
572
#error unsupported target CPU
573
#endif
574
                }
575
#endif
576
                spin_lock(&tb_lock);
577
                tb = tb_find_fast();
578
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
579
                   doing it in tb_find_slow */
580
                if (tb_invalidated_flag) {
581
                    /* as some TB could have been invalidated because
582
                       of memory exceptions while generating the code, we
583
                       must recompute the hash index here */
584
                    next_tb = 0;
585
                    tb_invalidated_flag = 0;
586
                }
587
#ifdef CONFIG_DEBUG_EXEC
588
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
589
                             (long)tb->tc_ptr, tb->pc,
590
                             lookup_symbol(tb->pc));
591
#endif
592
                /* see if we can patch the calling TB. When the TB
593
                   spans two pages, we cannot safely do a direct
594
                   jump. */
595
                if (next_tb != 0 && tb->page_addr[1] == -1) {
596
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
597
                }
598
                spin_unlock(&tb_lock);
599

    
600
                /* cpu_interrupt might be called while translating the
601
                   TB, but before it is linked into a potentially
602
                   infinite loop and becomes env->current_tb. Avoid
603
                   starting execution if there is a pending interrupt. */
604
                if (!unlikely (env->exit_request)) {
605
                    env->current_tb = tb;
606
                    tc_ptr = tb->tc_ptr;
607
                /* execute the generated code */
608
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
609
#undef env
610
                    env = cpu_single_env;
611
#define env cpu_single_env
612
#endif
613
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
614
                    env->current_tb = NULL;
615
                    if ((next_tb & 3) == 2) {
616
                        /* Instruction counter expired.  */
617
                        int insns_left;
618
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
619
                        /* Restore PC.  */
620
                        cpu_pc_from_tb(env, tb);
621
                        insns_left = env->icount_decr.u32;
622
                        if (env->icount_extra && insns_left >= 0) {
623
                            /* Refill decrementer and continue execution.  */
624
                            env->icount_extra += insns_left;
625
                            if (env->icount_extra > 0xffff) {
626
                                insns_left = 0xffff;
627
                            } else {
628
                                insns_left = env->icount_extra;
629
                            }
630
                            env->icount_extra -= insns_left;
631
                            env->icount_decr.u16.low = insns_left;
632
                        } else {
633
                            if (insns_left > 0) {
634
                                /* Execute remaining instructions.  */
635
                                cpu_exec_nocache(insns_left, tb);
636
                            }
637
                            env->exception_index = EXCP_INTERRUPT;
638
                            next_tb = 0;
639
                            cpu_loop_exit();
640
                        }
641
                    }
642
                }
643
                assert (env->current_tb == NULL);
644
                /* reset soft MMU for next block (it can currently
645
                   only be set by a memory fault) */
646
            } /* for(;;) */
647
        }
648
    } /* for(;;) */
649

    
650

    
651
#if defined(TARGET_I386)
652
    /* restore flags in standard format */
653
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
654
#elif defined(TARGET_ARM)
655
    /* XXX: Save/restore host fpu exception state?.  */
656
#elif defined(TARGET_SPARC)
657
#elif defined(TARGET_PPC)
658
#elif defined(TARGET_M68K)
659
    cpu_m68k_flush_flags(env, env->cc_op);
660
    env->cc_op = CC_OP_FLAGS;
661
    env->sr = (env->sr & 0xffe0)
662
              | env->cc_dest | (env->cc_x << 4);
663
#elif defined(TARGET_MICROBLAZE)
664
#elif defined(TARGET_MIPS)
665
#elif defined(TARGET_SH4)
666
#elif defined(TARGET_ALPHA)
667
#elif defined(TARGET_CRIS)
668
#elif defined(TARGET_S390X)
669
    /* XXXXX */
670
#else
671
#error unsupported target CPU
672
#endif
673

    
674
    /* restore global registers */
675
#include "hostregs_helper.h"
676

    
677
    /* fail safe : never use cpu_single_env outside cpu_exec() */
678
    cpu_single_env = NULL;
679
    return ret;
680
}
681

    
682
/* must only be called from the generated code as an exception can be
683
   generated */
684
void tb_invalidate_page_range(target_ulong start, target_ulong end)
685
{
686
    /* XXX: cannot enable it yet because it yields to MMU exception
687
       where NIP != read address on PowerPC */
688
#if 0
689
    target_ulong phys_addr;
690
    phys_addr = get_phys_addr_code(env, start);
691
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
692
#endif
693
}
694

    
695
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
696

    
697
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
698
{
699
    CPUX86State *saved_env;
700

    
701
    saved_env = env;
702
    env = s;
703
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
704
        selector &= 0xffff;
705
        cpu_x86_load_seg_cache(env, seg_reg, selector,
706
                               (selector << 4), 0xffff, 0);
707
    } else {
708
        helper_load_seg(seg_reg, selector);
709
    }
710
    env = saved_env;
711
}
712

    
713
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
714
{
715
    CPUX86State *saved_env;
716

    
717
    saved_env = env;
718
    env = s;
719

    
720
    helper_fsave(ptr, data32);
721

    
722
    env = saved_env;
723
}
724

    
725
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
726
{
727
    CPUX86State *saved_env;
728

    
729
    saved_env = env;
730
    env = s;
731

    
732
    helper_frstor(ptr, data32);
733

    
734
    env = saved_env;
735
}
736

    
737
#endif /* TARGET_I386 */
738

    
739
#if !defined(CONFIG_SOFTMMU)
740

    
741
#if defined(TARGET_I386)
742
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
743
#else
744
#define EXCEPTION_ACTION cpu_loop_exit()
745
#endif
746

    
747
/* 'pc' is the host PC at which the exception was raised. 'address' is
748
   the effective address of the memory exception. 'is_write' is 1 if a
749
   write caused the exception and otherwise 0'. 'old_set' is the
750
   signal set which should be restored */
751
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
752
                                    int is_write, sigset_t *old_set,
753
                                    void *puc)
754
{
755
    TranslationBlock *tb;
756
    int ret;
757

    
758
    if (cpu_single_env)
759
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
760
#if defined(DEBUG_SIGNAL)
761
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
762
                pc, address, is_write, *(unsigned long *)old_set);
763
#endif
764
    /* XXX: locking issue */
765
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
766
        return 1;
767
    }
768

    
769
    /* see if it is an MMU fault */
770
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
771
    if (ret < 0)
772
        return 0; /* not an MMU fault */
773
    if (ret == 0)
774
        return 1; /* the MMU fault was handled without causing real CPU fault */
775
    /* now we have a real cpu fault */
776
    tb = tb_find_pc(pc);
777
    if (tb) {
778
        /* the PC is inside the translated code. It means that we have
779
           a virtual CPU fault */
780
        cpu_restore_state(tb, env, pc, puc);
781
    }
782

    
783
    /* we restore the process signal mask as the sigreturn should
784
       do it (XXX: use sigsetjmp) */
785
    sigprocmask(SIG_SETMASK, old_set, NULL);
786
    EXCEPTION_ACTION;
787

    
788
    /* never comes here */
789
    return 1;
790
}
791

    
792
#if defined(__i386__)
793

    
794
#if defined(__APPLE__)
795
# include <sys/ucontext.h>
796

    
797
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
798
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
799
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
800
# define MASK_sig(context)    ((context)->uc_sigmask)
801
#elif defined (__NetBSD__)
802
# include <ucontext.h>
803

    
804
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
805
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
806
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
807
# define MASK_sig(context)    ((context)->uc_sigmask)
808
#elif defined (__FreeBSD__) || defined(__DragonFly__)
809
# include <ucontext.h>
810

    
811
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
812
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
813
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
814
# define MASK_sig(context)    ((context)->uc_sigmask)
815
#elif defined(__OpenBSD__)
816
# define EIP_sig(context)     ((context)->sc_eip)
817
# define TRAP_sig(context)    ((context)->sc_trapno)
818
# define ERROR_sig(context)   ((context)->sc_err)
819
# define MASK_sig(context)    ((context)->sc_mask)
820
#else
821
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
822
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
823
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
824
# define MASK_sig(context)    ((context)->uc_sigmask)
825
#endif
826

    
827
int cpu_signal_handler(int host_signum, void *pinfo,
828
                       void *puc)
829
{
830
    siginfo_t *info = pinfo;
831
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
832
    ucontext_t *uc = puc;
833
#elif defined(__OpenBSD__)
834
    struct sigcontext *uc = puc;
835
#else
836
    struct ucontext *uc = puc;
837
#endif
838
    unsigned long pc;
839
    int trapno;
840

    
841
#ifndef REG_EIP
842
/* for glibc 2.1 */
843
#define REG_EIP    EIP
844
#define REG_ERR    ERR
845
#define REG_TRAPNO TRAPNO
846
#endif
847
    pc = EIP_sig(uc);
848
    trapno = TRAP_sig(uc);
849
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
850
                             trapno == 0xe ?
851
                             (ERROR_sig(uc) >> 1) & 1 : 0,
852
                             &MASK_sig(uc), puc);
853
}
854

    
855
#elif defined(__x86_64__)
856

    
857
#ifdef __NetBSD__
858
#define PC_sig(context)       _UC_MACHINE_PC(context)
859
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
860
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
861
#define MASK_sig(context)     ((context)->uc_sigmask)
862
#elif defined(__OpenBSD__)
863
#define PC_sig(context)       ((context)->sc_rip)
864
#define TRAP_sig(context)     ((context)->sc_trapno)
865
#define ERROR_sig(context)    ((context)->sc_err)
866
#define MASK_sig(context)     ((context)->sc_mask)
867
#elif defined (__FreeBSD__) || defined(__DragonFly__)
868
#include <ucontext.h>
869

    
870
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
871
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
872
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
873
#define MASK_sig(context)     ((context)->uc_sigmask)
874
#else
875
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
876
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
877
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
878
#define MASK_sig(context)     ((context)->uc_sigmask)
879
#endif
880

    
881
int cpu_signal_handler(int host_signum, void *pinfo,
882
                       void *puc)
883
{
884
    siginfo_t *info = pinfo;
885
    unsigned long pc;
886
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
887
    ucontext_t *uc = puc;
888
#elif defined(__OpenBSD__)
889
    struct sigcontext *uc = puc;
890
#else
891
    struct ucontext *uc = puc;
892
#endif
893

    
894
    pc = PC_sig(uc);
895
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
896
                             TRAP_sig(uc) == 0xe ?
897
                             (ERROR_sig(uc) >> 1) & 1 : 0,
898
                             &MASK_sig(uc), puc);
899
}
900

    
901
#elif defined(_ARCH_PPC)
902

    
903
/***********************************************************************
904
 * signal context platform-specific definitions
905
 * From Wine
906
 */
907
#ifdef linux
908
/* All Registers access - only for local access */
909
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
910
/* Gpr Registers access  */
911
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
912
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
913
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
914
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
915
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
916
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
917
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
918
/* Float Registers access  */
919
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
920
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
921
/* Exception Registers access */
922
# define DAR_sig(context)                        REG_sig(dar, context)
923
# define DSISR_sig(context)                        REG_sig(dsisr, context)
924
# define TRAP_sig(context)                        REG_sig(trap, context)
925
#endif /* linux */
926

    
927
#ifdef __APPLE__
928
# include <sys/ucontext.h>
929
typedef struct ucontext SIGCONTEXT;
930
/* All Registers access - only for local access */
931
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
932
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
933
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
934
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
935
/* Gpr Registers access */
936
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
937
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
938
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
939
# define CTR_sig(context)                        REG_sig(ctr, context)
940
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
941
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
942
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
943
/* Float Registers access */
944
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
945
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
946
/* Exception Registers access */
947
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
948
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
949
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
950
#endif /* __APPLE__ */
951

    
952
int cpu_signal_handler(int host_signum, void *pinfo,
953
                       void *puc)
954
{
955
    siginfo_t *info = pinfo;
956
    struct ucontext *uc = puc;
957
    unsigned long pc;
958
    int is_write;
959

    
960
    pc = IAR_sig(uc);
961
    is_write = 0;
962
#if 0
963
    /* ppc 4xx case */
964
    if (DSISR_sig(uc) & 0x00800000)
965
        is_write = 1;
966
#else
967
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
968
        is_write = 1;
969
#endif
970
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
971
                             is_write, &uc->uc_sigmask, puc);
972
}
973

    
974
#elif defined(__alpha__)
975

    
976
int cpu_signal_handler(int host_signum, void *pinfo,
977
                           void *puc)
978
{
979
    siginfo_t *info = pinfo;
980
    struct ucontext *uc = puc;
981
    uint32_t *pc = uc->uc_mcontext.sc_pc;
982
    uint32_t insn = *pc;
983
    int is_write = 0;
984

    
985
    /* XXX: need kernel patch to get write flag faster */
986
    switch (insn >> 26) {
987
    case 0x0d: // stw
988
    case 0x0e: // stb
989
    case 0x0f: // stq_u
990
    case 0x24: // stf
991
    case 0x25: // stg
992
    case 0x26: // sts
993
    case 0x27: // stt
994
    case 0x2c: // stl
995
    case 0x2d: // stq
996
    case 0x2e: // stl_c
997
    case 0x2f: // stq_c
998
        is_write = 1;
999
    }
1000

    
1001
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1002
                             is_write, &uc->uc_sigmask, puc);
1003
}
1004
#elif defined(__sparc__)
1005

    
1006
int cpu_signal_handler(int host_signum, void *pinfo,
1007
                       void *puc)
1008
{
1009
    siginfo_t *info = pinfo;
1010
    int is_write;
1011
    uint32_t insn;
1012
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1013
    uint32_t *regs = (uint32_t *)(info + 1);
1014
    void *sigmask = (regs + 20);
1015
    /* XXX: is there a standard glibc define ? */
1016
    unsigned long pc = regs[1];
1017
#else
1018
#ifdef __linux__
1019
    struct sigcontext *sc = puc;
1020
    unsigned long pc = sc->sigc_regs.tpc;
1021
    void *sigmask = (void *)sc->sigc_mask;
1022
#elif defined(__OpenBSD__)
1023
    struct sigcontext *uc = puc;
1024
    unsigned long pc = uc->sc_pc;
1025
    void *sigmask = (void *)(long)uc->sc_mask;
1026
#endif
1027
#endif
1028

    
1029
    /* XXX: need kernel patch to get write flag faster */
1030
    is_write = 0;
1031
    insn = *(uint32_t *)pc;
1032
    if ((insn >> 30) == 3) {
1033
      switch((insn >> 19) & 0x3f) {
1034
      case 0x05: // stb
1035
      case 0x15: // stba
1036
      case 0x06: // sth
1037
      case 0x16: // stha
1038
      case 0x04: // st
1039
      case 0x14: // sta
1040
      case 0x07: // std
1041
      case 0x17: // stda
1042
      case 0x0e: // stx
1043
      case 0x1e: // stxa
1044
      case 0x24: // stf
1045
      case 0x34: // stfa
1046
      case 0x27: // stdf
1047
      case 0x37: // stdfa
1048
      case 0x26: // stqf
1049
      case 0x36: // stqfa
1050
      case 0x25: // stfsr
1051
      case 0x3c: // casa
1052
      case 0x3e: // casxa
1053
        is_write = 1;
1054
        break;
1055
      }
1056
    }
1057
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1058
                             is_write, sigmask, NULL);
1059
}
1060

    
1061
#elif defined(__arm__)
1062

    
1063
int cpu_signal_handler(int host_signum, void *pinfo,
1064
                       void *puc)
1065
{
1066
    siginfo_t *info = pinfo;
1067
    struct ucontext *uc = puc;
1068
    unsigned long pc;
1069
    int is_write;
1070

    
1071
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1072
    pc = uc->uc_mcontext.gregs[R15];
1073
#else
1074
    pc = uc->uc_mcontext.arm_pc;
1075
#endif
1076
    /* XXX: compute is_write */
1077
    is_write = 0;
1078
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1079
                             is_write,
1080
                             &uc->uc_sigmask, puc);
1081
}
1082

    
1083
#elif defined(__mc68000)
1084

    
1085
int cpu_signal_handler(int host_signum, void *pinfo,
1086
                       void *puc)
1087
{
1088
    siginfo_t *info = pinfo;
1089
    struct ucontext *uc = puc;
1090
    unsigned long pc;
1091
    int is_write;
1092

    
1093
    pc = uc->uc_mcontext.gregs[16];
1094
    /* XXX: compute is_write */
1095
    is_write = 0;
1096
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1097
                             is_write,
1098
                             &uc->uc_sigmask, puc);
1099
}
1100

    
1101
#elif defined(__ia64)
1102

    
1103
#ifndef __ISR_VALID
1104
  /* This ought to be in <bits/siginfo.h>... */
1105
# define __ISR_VALID        1
1106
#endif
1107

    
1108
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1109
{
1110
    siginfo_t *info = pinfo;
1111
    struct ucontext *uc = puc;
1112
    unsigned long ip;
1113
    int is_write = 0;
1114

    
1115
    ip = uc->uc_mcontext.sc_ip;
1116
    switch (host_signum) {
1117
      case SIGILL:
1118
      case SIGFPE:
1119
      case SIGSEGV:
1120
      case SIGBUS:
1121
      case SIGTRAP:
1122
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1123
              /* ISR.W (write-access) is bit 33:  */
1124
              is_write = (info->si_isr >> 33) & 1;
1125
          break;
1126

    
1127
      default:
1128
          break;
1129
    }
1130
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1131
                             is_write,
1132
                             &uc->uc_sigmask, puc);
1133
}
1134

    
1135
#elif defined(__s390__)
1136

    
1137
int cpu_signal_handler(int host_signum, void *pinfo,
1138
                       void *puc)
1139
{
1140
    siginfo_t *info = pinfo;
1141
    struct ucontext *uc = puc;
1142
    unsigned long pc;
1143
    int is_write;
1144

    
1145
    pc = uc->uc_mcontext.psw.addr;
1146
    /* XXX: compute is_write */
1147
    is_write = 0;
1148
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1149
                             is_write, &uc->uc_sigmask, puc);
1150
}
1151

    
1152
#elif defined(__mips__)
1153

    
1154
int cpu_signal_handler(int host_signum, void *pinfo,
1155
                       void *puc)
1156
{
1157
    siginfo_t *info = pinfo;
1158
    struct ucontext *uc = puc;
1159
    greg_t pc = uc->uc_mcontext.pc;
1160
    int is_write;
1161

    
1162
    /* XXX: compute is_write */
1163
    is_write = 0;
1164
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1165
                             is_write, &uc->uc_sigmask, puc);
1166
}
1167

    
1168
#elif defined(__hppa__)
1169

    
1170
int cpu_signal_handler(int host_signum, void *pinfo,
1171
                       void *puc)
1172
{
1173
    struct siginfo *info = pinfo;
1174
    struct ucontext *uc = puc;
1175
    unsigned long pc;
1176
    int is_write;
1177

    
1178
    pc = uc->uc_mcontext.sc_iaoq[0];
1179
    /* FIXME: compute is_write */
1180
    is_write = 0;
1181
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1182
                             is_write,
1183
                             &uc->uc_sigmask, puc);
1184
}
1185

    
1186
#else
1187

    
1188
#error host CPU specific signal handler needed
1189

    
1190
#endif
1191

    
1192
#endif /* !defined(CONFIG_SOFTMMU) */