Statistics
| Branch: | Revision:

root / cpu-exec.c @ d532b26c

History | View | Annotate | Download (41.5 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24

    
25
#if !defined(CONFIG_SOFTMMU)
26
#undef EAX
27
#undef ECX
28
#undef EDX
29
#undef EBX
30
#undef ESP
31
#undef EBP
32
#undef ESI
33
#undef EDI
34
#undef EIP
35
#include <signal.h>
36
#ifdef __linux__
37
#include <sys/ucontext.h>
38
#endif
39
#endif
40

    
41
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42
// Work around ugly bugs in glibc that mangle global register contents
43
#undef env
44
#define env cpu_single_env
45
#endif
46

    
47
int tb_invalidated_flag;
48

    
49
//#define CONFIG_DEBUG_EXEC
50
//#define DEBUG_SIGNAL
51

    
52
int qemu_cpu_has_work(CPUState *env)
53
{
54
    return cpu_has_work(env);
55
}
56

    
57
void cpu_loop_exit(void)
58
{
59
    /* NOTE: the register at this point must be saved by hand because
60
       longjmp restore them */
61
    regs_to_env();
62
    longjmp(env->jmp_env, 1);
63
}
64

    
65
/* exit the current TB from a signal handler. The host registers are
66
   restored in a state compatible with the CPU emulator
67
 */
68
void cpu_resume_from_signal(CPUState *env1, void *puc)
69
{
70
#if !defined(CONFIG_SOFTMMU)
71
#ifdef __linux__
72
    struct ucontext *uc = puc;
73
#elif defined(__OpenBSD__)
74
    struct sigcontext *uc = puc;
75
#endif
76
#endif
77

    
78
    env = env1;
79

    
80
    /* XXX: restore cpu registers saved in host registers */
81

    
82
#if !defined(CONFIG_SOFTMMU)
83
    if (puc) {
84
        /* XXX: use siglongjmp ? */
85
#ifdef __linux__
86
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87
#elif defined(__OpenBSD__)
88
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89
#endif
90
    }
91
#endif
92
    env->exception_index = -1;
93
    longjmp(env->jmp_env, 1);
94
}
95

    
96
/* Execute the code without caching the generated code. An interpreter
97
   could be used if available. */
98
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99
{
100
    unsigned long next_tb;
101
    TranslationBlock *tb;
102

    
103
    /* Should never happen.
104
       We only end up here when an existing TB is too long.  */
105
    if (max_cycles > CF_COUNT_MASK)
106
        max_cycles = CF_COUNT_MASK;
107

    
108
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109
                     max_cycles);
110
    env->current_tb = tb;
111
    /* execute the generated code */
112
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113

    
114
    if ((next_tb & 3) == 2) {
115
        /* Restore PC.  This may happen if async event occurs before
116
           the TB starts executing.  */
117
        cpu_pc_from_tb(env, tb);
118
    }
119
    tb_phys_invalidate(tb, -1);
120
    tb_free(tb);
121
}
122

    
123
static TranslationBlock *tb_find_slow(target_ulong pc,
124
                                      target_ulong cs_base,
125
                                      uint64_t flags)
126
{
127
    TranslationBlock *tb, **ptb1;
128
    unsigned int h;
129
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130

    
131
    tb_invalidated_flag = 0;
132

    
133
    regs_to_env(); /* XXX: do it just before cpu_gen_code() */
134

    
135
    /* find translated block using physical mappings */
136
    phys_pc = get_phys_addr_code(env, pc);
137
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
138
    phys_page2 = -1;
139
    h = tb_phys_hash_func(phys_pc);
140
    ptb1 = &tb_phys_hash[h];
141
    for(;;) {
142
        tb = *ptb1;
143
        if (!tb)
144
            goto not_found;
145
        if (tb->pc == pc &&
146
            tb->page_addr[0] == phys_page1 &&
147
            tb->cs_base == cs_base &&
148
            tb->flags == flags) {
149
            /* check next page if needed */
150
            if (tb->page_addr[1] != -1) {
151
                virt_page2 = (pc & TARGET_PAGE_MASK) +
152
                    TARGET_PAGE_SIZE;
153
                phys_page2 = get_phys_addr_code(env, virt_page2);
154
                if (tb->page_addr[1] == phys_page2)
155
                    goto found;
156
            } else {
157
                goto found;
158
            }
159
        }
160
        ptb1 = &tb->phys_hash_next;
161
    }
162
 not_found:
163
   /* if no translated code available, then translate it now */
164
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
165

    
166
 found:
167
    /* we add the TB in the virtual pc hash table */
168
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169
    return tb;
170
}
171

    
172
static inline TranslationBlock *tb_find_fast(void)
173
{
174
    TranslationBlock *tb;
175
    target_ulong cs_base, pc;
176
    int flags;
177

    
178
    /* we record a subset of the CPU state. It will
179
       always be the same before a given translated block
180
       is executed. */
181
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184
                 tb->flags != flags)) {
185
        tb = tb_find_slow(pc, cs_base, flags);
186
    }
187
    return tb;
188
}
189

    
190
static CPUDebugExcpHandler *debug_excp_handler;
191

    
192
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193
{
194
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
195

    
196
    debug_excp_handler = handler;
197
    return old_handler;
198
}
199

    
200
static void cpu_handle_debug_exception(CPUState *env)
201
{
202
    CPUWatchpoint *wp;
203

    
204
    if (!env->watchpoint_hit)
205
        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
206
            wp->flags &= ~BP_WATCHPOINT_HIT;
207

    
208
    if (debug_excp_handler)
209
        debug_excp_handler(env);
210
}
211

    
212
/* main execution loop */
213

    
214
int cpu_exec(CPUState *env1)
215
{
216
#define DECLARE_HOST_REGS 1
217
#include "hostregs_helper.h"
218
    int ret, interrupt_request;
219
    TranslationBlock *tb;
220
    uint8_t *tc_ptr;
221
    unsigned long next_tb;
222

    
223
    if (cpu_halted(env1) == EXCP_HALTED)
224
        return EXCP_HALTED;
225

    
226
    cpu_single_env = env1;
227

    
228
    /* first we save global registers */
229
#define SAVE_HOST_REGS 1
230
#include "hostregs_helper.h"
231
    env = env1;
232

    
233
    env_to_regs();
234
#if defined(TARGET_I386)
235
    /* put eflags in CPU temporary format */
236
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
238
    CC_OP = CC_OP_EFLAGS;
239
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
240
#elif defined(TARGET_SPARC)
241
#elif defined(TARGET_M68K)
242
    env->cc_op = CC_OP_FLAGS;
243
    env->cc_dest = env->sr & 0xf;
244
    env->cc_x = (env->sr >> 4) & 1;
245
#elif defined(TARGET_ALPHA)
246
#elif defined(TARGET_ARM)
247
#elif defined(TARGET_PPC)
248
#elif defined(TARGET_MICROBLAZE)
249
#elif defined(TARGET_MIPS)
250
#elif defined(TARGET_SH4)
251
#elif defined(TARGET_CRIS)
252
#elif defined(TARGET_S390X)
253
    /* XXXXX */
254
#else
255
#error unsupported target CPU
256
#endif
257
    env->exception_index = -1;
258

    
259
    /* prepare setjmp context for exception handling */
260
    for(;;) {
261
        if (setjmp(env->jmp_env) == 0) {
262
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
263
#undef env
264
                    env = cpu_single_env;
265
#define env cpu_single_env
266
#endif
267
            env->current_tb = NULL;
268
            /* if an exception is pending, we execute it here */
269
            if (env->exception_index >= 0) {
270
                if (env->exception_index >= EXCP_INTERRUPT) {
271
                    /* exit request from the cpu execution loop */
272
                    ret = env->exception_index;
273
                    if (ret == EXCP_DEBUG)
274
                        cpu_handle_debug_exception(env);
275
                    break;
276
                } else {
277
#if defined(CONFIG_USER_ONLY)
278
                    /* if user mode only, we simulate a fake exception
279
                       which will be handled outside the cpu execution
280
                       loop */
281
#if defined(TARGET_I386)
282
                    do_interrupt_user(env->exception_index,
283
                                      env->exception_is_int,
284
                                      env->error_code,
285
                                      env->exception_next_eip);
286
                    /* successfully delivered */
287
                    env->old_exception = -1;
288
#endif
289
                    ret = env->exception_index;
290
                    break;
291
#else
292
#if defined(TARGET_I386)
293
                    /* simulate a real cpu exception. On i386, it can
294
                       trigger new exceptions, but we do not handle
295
                       double or triple faults yet. */
296
                    do_interrupt(env->exception_index,
297
                                 env->exception_is_int,
298
                                 env->error_code,
299
                                 env->exception_next_eip, 0);
300
                    /* successfully delivered */
301
                    env->old_exception = -1;
302
#elif defined(TARGET_PPC)
303
                    do_interrupt(env);
304
#elif defined(TARGET_MICROBLAZE)
305
                    do_interrupt(env);
306
#elif defined(TARGET_MIPS)
307
                    do_interrupt(env);
308
#elif defined(TARGET_SPARC)
309
                    do_interrupt(env);
310
#elif defined(TARGET_ARM)
311
                    do_interrupt(env);
312
#elif defined(TARGET_SH4)
313
                    do_interrupt(env);
314
#elif defined(TARGET_ALPHA)
315
                    do_interrupt(env);
316
#elif defined(TARGET_CRIS)
317
                    do_interrupt(env);
318
#elif defined(TARGET_M68K)
319
                    do_interrupt(0);
320
#endif
321
#endif
322
                }
323
                env->exception_index = -1;
324
            }
325

    
326
            if (kvm_enabled()) {
327
                kvm_cpu_exec(env);
328
                longjmp(env->jmp_env, 1);
329
            }
330

    
331
            next_tb = 0; /* force lookup of first TB */
332
            for(;;) {
333
                interrupt_request = env->interrupt_request;
334
                if (unlikely(interrupt_request)) {
335
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
336
                        /* Mask out external interrupts for this step. */
337
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
338
                                               CPU_INTERRUPT_FIQ |
339
                                               CPU_INTERRUPT_SMI |
340
                                               CPU_INTERRUPT_NMI);
341
                    }
342
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
343
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
344
                        env->exception_index = EXCP_DEBUG;
345
                        cpu_loop_exit();
346
                    }
347
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
348
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
349
    defined(TARGET_MICROBLAZE)
350
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
351
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
352
                        env->halted = 1;
353
                        env->exception_index = EXCP_HLT;
354
                        cpu_loop_exit();
355
                    }
356
#endif
357
#if defined(TARGET_I386)
358
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
359
                            svm_check_intercept(SVM_EXIT_INIT);
360
                            do_cpu_init(env);
361
                            env->exception_index = EXCP_HALTED;
362
                            cpu_loop_exit();
363
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
364
                            do_cpu_sipi(env);
365
                    } else if (env->hflags2 & HF2_GIF_MASK) {
366
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
367
                            !(env->hflags & HF_SMM_MASK)) {
368
                            svm_check_intercept(SVM_EXIT_SMI);
369
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
370
                            do_smm_enter();
371
                            next_tb = 0;
372
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
373
                                   !(env->hflags2 & HF2_NMI_MASK)) {
374
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
375
                            env->hflags2 |= HF2_NMI_MASK;
376
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
377
                            next_tb = 0;
378
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
379
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
380
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
381
                            next_tb = 0;
382
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
384
                                     (env->hflags2 & HF2_HIF_MASK)) ||
385
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
386
                                     (env->eflags & IF_MASK && 
387
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388
                            int intno;
389
                            svm_check_intercept(SVM_EXIT_INTR);
390
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
391
                            intno = cpu_get_pic_interrupt(env);
392
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
393
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
394
#undef env
395
                    env = cpu_single_env;
396
#define env cpu_single_env
397
#endif
398
                            do_interrupt(intno, 0, 0, 0, 1);
399
                            /* ensure that no TB jump will be modified as
400
                               the program flow was changed */
401
                            next_tb = 0;
402
#if !defined(CONFIG_USER_ONLY)
403
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
404
                                   (env->eflags & IF_MASK) && 
405
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
406
                            int intno;
407
                            /* FIXME: this should respect TPR */
408
                            svm_check_intercept(SVM_EXIT_VINTR);
409
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
410
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
411
                            do_interrupt(intno, 0, 0, 0, 1);
412
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
413
                            next_tb = 0;
414
#endif
415
                        }
416
                    }
417
#elif defined(TARGET_PPC)
418
#if 0
419
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
420
                        cpu_reset(env);
421
                    }
422
#endif
423
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
424
                        ppc_hw_interrupt(env);
425
                        if (env->pending_interrupts == 0)
426
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
427
                        next_tb = 0;
428
                    }
429
#elif defined(TARGET_MICROBLAZE)
430
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
431
                        && (env->sregs[SR_MSR] & MSR_IE)
432
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
433
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
434
                        env->exception_index = EXCP_IRQ;
435
                        do_interrupt(env);
436
                        next_tb = 0;
437
                    }
438
#elif defined(TARGET_MIPS)
439
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
441
                        (env->CP0_Status & (1 << CP0St_IE)) &&
442
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
443
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
444
                        !(env->hflags & MIPS_HFLAG_DM)) {
445
                        /* Raise it */
446
                        env->exception_index = EXCP_EXT_INTERRUPT;
447
                        env->error_code = 0;
448
                        do_interrupt(env);
449
                        next_tb = 0;
450
                    }
451
#elif defined(TARGET_SPARC)
452
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
453
                        if (cpu_interrupts_enabled(env) &&
454
                            env->interrupt_index > 0) {
455
                            int pil = env->interrupt_index & 0xf;
456
                            int type = env->interrupt_index & 0xf0;
457

    
458
                            if (((type == TT_EXTINT) &&
459
                                  cpu_pil_allowed(env, pil)) ||
460
                                  type != TT_EXTINT) {
461
                                env->exception_index = env->interrupt_index;
462
                                do_interrupt(env);
463
                                next_tb = 0;
464
                            }
465
                        }
466
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
467
                        //do_interrupt(0, 0, 0, 0, 0);
468
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
469
                    }
470
#elif defined(TARGET_ARM)
471
                    if (interrupt_request & CPU_INTERRUPT_FIQ
472
                        && !(env->uncached_cpsr & CPSR_F)) {
473
                        env->exception_index = EXCP_FIQ;
474
                        do_interrupt(env);
475
                        next_tb = 0;
476
                    }
477
                    /* ARMv7-M interrupt return works by loading a magic value
478
                       into the PC.  On real hardware the load causes the
479
                       return to occur.  The qemu implementation performs the
480
                       jump normally, then does the exception return when the
481
                       CPU tries to execute code at the magic address.
482
                       This will cause the magic PC value to be pushed to
483
                       the stack if an interrupt occured at the wrong time.
484
                       We avoid this by disabling interrupts when
485
                       pc contains a magic address.  */
486
                    if (interrupt_request & CPU_INTERRUPT_HARD
487
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
488
                            || !(env->uncached_cpsr & CPSR_I))) {
489
                        env->exception_index = EXCP_IRQ;
490
                        do_interrupt(env);
491
                        next_tb = 0;
492
                    }
493
#elif defined(TARGET_SH4)
494
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
495
                        do_interrupt(env);
496
                        next_tb = 0;
497
                    }
498
#elif defined(TARGET_ALPHA)
499
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
500
                        do_interrupt(env);
501
                        next_tb = 0;
502
                    }
503
#elif defined(TARGET_CRIS)
504
                    if (interrupt_request & CPU_INTERRUPT_HARD
505
                        && (env->pregs[PR_CCS] & I_FLAG)) {
506
                        env->exception_index = EXCP_IRQ;
507
                        do_interrupt(env);
508
                        next_tb = 0;
509
                    }
510
                    if (interrupt_request & CPU_INTERRUPT_NMI
511
                        && (env->pregs[PR_CCS] & M_FLAG)) {
512
                        env->exception_index = EXCP_NMI;
513
                        do_interrupt(env);
514
                        next_tb = 0;
515
                    }
516
#elif defined(TARGET_M68K)
517
                    if (interrupt_request & CPU_INTERRUPT_HARD
518
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
519
                            < env->pending_level) {
520
                        /* Real hardware gets the interrupt vector via an
521
                           IACK cycle at this point.  Current emulated
522
                           hardware doesn't rely on this, so we
523
                           provide/save the vector when the interrupt is
524
                           first signalled.  */
525
                        env->exception_index = env->pending_vector;
526
                        do_interrupt(1);
527
                        next_tb = 0;
528
                    }
529
#endif
530
                   /* Don't use the cached interupt_request value,
531
                      do_interrupt may have updated the EXITTB flag. */
532
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
533
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
534
                        /* ensure that no TB jump will be modified as
535
                           the program flow was changed */
536
                        next_tb = 0;
537
                    }
538
                }
539
                if (unlikely(env->exit_request)) {
540
                    env->exit_request = 0;
541
                    env->exception_index = EXCP_INTERRUPT;
542
                    cpu_loop_exit();
543
                }
544
#ifdef CONFIG_DEBUG_EXEC
545
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
546
                    /* restore flags in standard format */
547
                    regs_to_env();
548
#if defined(TARGET_I386)
549
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
550
                    log_cpu_state(env, X86_DUMP_CCOP);
551
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
552
#elif defined(TARGET_ARM)
553
                    log_cpu_state(env, 0);
554
#elif defined(TARGET_SPARC)
555
                    log_cpu_state(env, 0);
556
#elif defined(TARGET_PPC)
557
                    log_cpu_state(env, 0);
558
#elif defined(TARGET_M68K)
559
                    cpu_m68k_flush_flags(env, env->cc_op);
560
                    env->cc_op = CC_OP_FLAGS;
561
                    env->sr = (env->sr & 0xffe0)
562
                              | env->cc_dest | (env->cc_x << 4);
563
                    log_cpu_state(env, 0);
564
#elif defined(TARGET_MICROBLAZE)
565
                    log_cpu_state(env, 0);
566
#elif defined(TARGET_MIPS)
567
                    log_cpu_state(env, 0);
568
#elif defined(TARGET_SH4)
569
                    log_cpu_state(env, 0);
570
#elif defined(TARGET_ALPHA)
571
                    log_cpu_state(env, 0);
572
#elif defined(TARGET_CRIS)
573
                    log_cpu_state(env, 0);
574
#else
575
#error unsupported target CPU
576
#endif
577
                }
578
#endif
579
                spin_lock(&tb_lock);
580
                tb = tb_find_fast();
581
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
582
                   doing it in tb_find_slow */
583
                if (tb_invalidated_flag) {
584
                    /* as some TB could have been invalidated because
585
                       of memory exceptions while generating the code, we
586
                       must recompute the hash index here */
587
                    next_tb = 0;
588
                    tb_invalidated_flag = 0;
589
                }
590
#ifdef CONFIG_DEBUG_EXEC
591
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
592
                             (long)tb->tc_ptr, tb->pc,
593
                             lookup_symbol(tb->pc));
594
#endif
595
                /* see if we can patch the calling TB. When the TB
596
                   spans two pages, we cannot safely do a direct
597
                   jump. */
598
                {
599
                    if (next_tb != 0 && tb->page_addr[1] == -1) {
600
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
601
                }
602
                }
603
                spin_unlock(&tb_lock);
604
                env->current_tb = tb;
605

    
606
                /* cpu_interrupt might be called while translating the
607
                   TB, but before it is linked into a potentially
608
                   infinite loop and becomes env->current_tb. Avoid
609
                   starting execution if there is a pending interrupt. */
610
                if (unlikely (env->exit_request))
611
                    env->current_tb = NULL;
612

    
613
                while (env->current_tb) {
614
                    tc_ptr = tb->tc_ptr;
615
                /* execute the generated code */
616
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
617
#undef env
618
                    env = cpu_single_env;
619
#define env cpu_single_env
620
#endif
621
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
622
                    env->current_tb = NULL;
623
                    if ((next_tb & 3) == 2) {
624
                        /* Instruction counter expired.  */
625
                        int insns_left;
626
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
627
                        /* Restore PC.  */
628
                        cpu_pc_from_tb(env, tb);
629
                        insns_left = env->icount_decr.u32;
630
                        if (env->icount_extra && insns_left >= 0) {
631
                            /* Refill decrementer and continue execution.  */
632
                            env->icount_extra += insns_left;
633
                            if (env->icount_extra > 0xffff) {
634
                                insns_left = 0xffff;
635
                            } else {
636
                                insns_left = env->icount_extra;
637
                            }
638
                            env->icount_extra -= insns_left;
639
                            env->icount_decr.u16.low = insns_left;
640
                        } else {
641
                            if (insns_left > 0) {
642
                                /* Execute remaining instructions.  */
643
                                cpu_exec_nocache(insns_left, tb);
644
                            }
645
                            env->exception_index = EXCP_INTERRUPT;
646
                            next_tb = 0;
647
                            cpu_loop_exit();
648
                        }
649
                    }
650
                }
651
                /* reset soft MMU for next block (it can currently
652
                   only be set by a memory fault) */
653
            } /* for(;;) */
654
        } else {
655
            env_to_regs();
656
        }
657
    } /* for(;;) */
658

    
659

    
660
#if defined(TARGET_I386)
661
    /* restore flags in standard format */
662
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
663
#elif defined(TARGET_ARM)
664
    /* XXX: Save/restore host fpu exception state?.  */
665
#elif defined(TARGET_SPARC)
666
#elif defined(TARGET_PPC)
667
#elif defined(TARGET_M68K)
668
    cpu_m68k_flush_flags(env, env->cc_op);
669
    env->cc_op = CC_OP_FLAGS;
670
    env->sr = (env->sr & 0xffe0)
671
              | env->cc_dest | (env->cc_x << 4);
672
#elif defined(TARGET_MICROBLAZE)
673
#elif defined(TARGET_MIPS)
674
#elif defined(TARGET_SH4)
675
#elif defined(TARGET_ALPHA)
676
#elif defined(TARGET_CRIS)
677
#elif defined(TARGET_S390X)
678
    /* XXXXX */
679
#else
680
#error unsupported target CPU
681
#endif
682

    
683
    /* restore global registers */
684
#include "hostregs_helper.h"
685

    
686
    /* fail safe : never use cpu_single_env outside cpu_exec() */
687
    cpu_single_env = NULL;
688
    return ret;
689
}
690

    
691
/* must only be called from the generated code as an exception can be
692
   generated */
693
void tb_invalidate_page_range(target_ulong start, target_ulong end)
694
{
695
    /* XXX: cannot enable it yet because it yields to MMU exception
696
       where NIP != read address on PowerPC */
697
#if 0
698
    target_ulong phys_addr;
699
    phys_addr = get_phys_addr_code(env, start);
700
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
701
#endif
702
}
703

    
704
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
705

    
706
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
707
{
708
    CPUX86State *saved_env;
709

    
710
    saved_env = env;
711
    env = s;
712
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
713
        selector &= 0xffff;
714
        cpu_x86_load_seg_cache(env, seg_reg, selector,
715
                               (selector << 4), 0xffff, 0);
716
    } else {
717
        helper_load_seg(seg_reg, selector);
718
    }
719
    env = saved_env;
720
}
721

    
722
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
723
{
724
    CPUX86State *saved_env;
725

    
726
    saved_env = env;
727
    env = s;
728

    
729
    helper_fsave(ptr, data32);
730

    
731
    env = saved_env;
732
}
733

    
734
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
735
{
736
    CPUX86State *saved_env;
737

    
738
    saved_env = env;
739
    env = s;
740

    
741
    helper_frstor(ptr, data32);
742

    
743
    env = saved_env;
744
}
745

    
746
#endif /* TARGET_I386 */
747

    
748
#if !defined(CONFIG_SOFTMMU)
749

    
750
#if defined(TARGET_I386)
751
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
752
#else
753
#define EXCEPTION_ACTION cpu_loop_exit()
754
#endif
755

    
756
/* 'pc' is the host PC at which the exception was raised. 'address' is
757
   the effective address of the memory exception. 'is_write' is 1 if a
758
   write caused the exception and otherwise 0'. 'old_set' is the
759
   signal set which should be restored */
760
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
761
                                    int is_write, sigset_t *old_set,
762
                                    void *puc)
763
{
764
    TranslationBlock *tb;
765
    int ret;
766

    
767
    if (cpu_single_env)
768
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
769
#if defined(DEBUG_SIGNAL)
770
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
771
                pc, address, is_write, *(unsigned long *)old_set);
772
#endif
773
    /* XXX: locking issue */
774
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
775
        return 1;
776
    }
777

    
778
    /* see if it is an MMU fault */
779
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
780
    if (ret < 0)
781
        return 0; /* not an MMU fault */
782
    if (ret == 0)
783
        return 1; /* the MMU fault was handled without causing real CPU fault */
784
    /* now we have a real cpu fault */
785
    tb = tb_find_pc(pc);
786
    if (tb) {
787
        /* the PC is inside the translated code. It means that we have
788
           a virtual CPU fault */
789
        cpu_restore_state(tb, env, pc, puc);
790
    }
791

    
792
    /* we restore the process signal mask as the sigreturn should
793
       do it (XXX: use sigsetjmp) */
794
    sigprocmask(SIG_SETMASK, old_set, NULL);
795
    EXCEPTION_ACTION;
796

    
797
    /* never comes here */
798
    return 1;
799
}
800

    
801
#if defined(__i386__)
802

    
803
#if defined(__APPLE__)
804
# include <sys/ucontext.h>
805

    
806
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
807
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
808
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
809
# define MASK_sig(context)    ((context)->uc_sigmask)
810
#elif defined (__NetBSD__)
811
# include <ucontext.h>
812

    
813
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
814
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
815
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
816
# define MASK_sig(context)    ((context)->uc_sigmask)
817
#elif defined (__FreeBSD__) || defined(__DragonFly__)
818
# include <ucontext.h>
819

    
820
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
821
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
822
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
823
# define MASK_sig(context)    ((context)->uc_sigmask)
824
#elif defined(__OpenBSD__)
825
# define EIP_sig(context)     ((context)->sc_eip)
826
# define TRAP_sig(context)    ((context)->sc_trapno)
827
# define ERROR_sig(context)   ((context)->sc_err)
828
# define MASK_sig(context)    ((context)->sc_mask)
829
#else
830
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
831
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
832
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
833
# define MASK_sig(context)    ((context)->uc_sigmask)
834
#endif
835

    
836
int cpu_signal_handler(int host_signum, void *pinfo,
837
                       void *puc)
838
{
839
    siginfo_t *info = pinfo;
840
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
841
    ucontext_t *uc = puc;
842
#elif defined(__OpenBSD__)
843
    struct sigcontext *uc = puc;
844
#else
845
    struct ucontext *uc = puc;
846
#endif
847
    unsigned long pc;
848
    int trapno;
849

    
850
#ifndef REG_EIP
851
/* for glibc 2.1 */
852
#define REG_EIP    EIP
853
#define REG_ERR    ERR
854
#define REG_TRAPNO TRAPNO
855
#endif
856
    pc = EIP_sig(uc);
857
    trapno = TRAP_sig(uc);
858
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
859
                             trapno == 0xe ?
860
                             (ERROR_sig(uc) >> 1) & 1 : 0,
861
                             &MASK_sig(uc), puc);
862
}
863

    
864
#elif defined(__x86_64__)
865

    
866
#ifdef __NetBSD__
867
#define PC_sig(context)       _UC_MACHINE_PC(context)
868
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
869
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
870
#define MASK_sig(context)     ((context)->uc_sigmask)
871
#elif defined(__OpenBSD__)
872
#define PC_sig(context)       ((context)->sc_rip)
873
#define TRAP_sig(context)     ((context)->sc_trapno)
874
#define ERROR_sig(context)    ((context)->sc_err)
875
#define MASK_sig(context)     ((context)->sc_mask)
876
#elif defined (__FreeBSD__) || defined(__DragonFly__)
877
#include <ucontext.h>
878

    
879
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
880
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
881
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
882
#define MASK_sig(context)     ((context)->uc_sigmask)
883
#else
884
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
885
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
886
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
887
#define MASK_sig(context)     ((context)->uc_sigmask)
888
#endif
889

    
890
int cpu_signal_handler(int host_signum, void *pinfo,
891
                       void *puc)
892
{
893
    siginfo_t *info = pinfo;
894
    unsigned long pc;
895
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
896
    ucontext_t *uc = puc;
897
#elif defined(__OpenBSD__)
898
    struct sigcontext *uc = puc;
899
#else
900
    struct ucontext *uc = puc;
901
#endif
902

    
903
    pc = PC_sig(uc);
904
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
905
                             TRAP_sig(uc) == 0xe ?
906
                             (ERROR_sig(uc) >> 1) & 1 : 0,
907
                             &MASK_sig(uc), puc);
908
}
909

    
910
#elif defined(_ARCH_PPC)
911

    
912
/***********************************************************************
913
 * signal context platform-specific definitions
914
 * From Wine
915
 */
916
#ifdef linux
917
/* All Registers access - only for local access */
918
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
919
/* Gpr Registers access  */
920
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
921
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
922
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
923
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
924
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
925
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
926
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
927
/* Float Registers access  */
928
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
929
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
930
/* Exception Registers access */
931
# define DAR_sig(context)                        REG_sig(dar, context)
932
# define DSISR_sig(context)                        REG_sig(dsisr, context)
933
# define TRAP_sig(context)                        REG_sig(trap, context)
934
#endif /* linux */
935

    
936
#ifdef __APPLE__
937
# include <sys/ucontext.h>
938
typedef struct ucontext SIGCONTEXT;
939
/* All Registers access - only for local access */
940
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
941
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
942
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
943
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
944
/* Gpr Registers access */
945
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
946
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
947
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
948
# define CTR_sig(context)                        REG_sig(ctr, context)
949
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
950
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
951
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
952
/* Float Registers access */
953
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
954
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
955
/* Exception Registers access */
956
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
957
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
958
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
959
#endif /* __APPLE__ */
960

    
961
int cpu_signal_handler(int host_signum, void *pinfo,
962
                       void *puc)
963
{
964
    siginfo_t *info = pinfo;
965
    struct ucontext *uc = puc;
966
    unsigned long pc;
967
    int is_write;
968

    
969
    pc = IAR_sig(uc);
970
    is_write = 0;
971
#if 0
972
    /* ppc 4xx case */
973
    if (DSISR_sig(uc) & 0x00800000)
974
        is_write = 1;
975
#else
976
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
977
        is_write = 1;
978
#endif
979
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
980
                             is_write, &uc->uc_sigmask, puc);
981
}
982

    
983
#elif defined(__alpha__)
984

    
985
int cpu_signal_handler(int host_signum, void *pinfo,
986
                           void *puc)
987
{
988
    siginfo_t *info = pinfo;
989
    struct ucontext *uc = puc;
990
    uint32_t *pc = uc->uc_mcontext.sc_pc;
991
    uint32_t insn = *pc;
992
    int is_write = 0;
993

    
994
    /* XXX: need kernel patch to get write flag faster */
995
    switch (insn >> 26) {
996
    case 0x0d: // stw
997
    case 0x0e: // stb
998
    case 0x0f: // stq_u
999
    case 0x24: // stf
1000
    case 0x25: // stg
1001
    case 0x26: // sts
1002
    case 0x27: // stt
1003
    case 0x2c: // stl
1004
    case 0x2d: // stq
1005
    case 0x2e: // stl_c
1006
    case 0x2f: // stq_c
1007
        is_write = 1;
1008
    }
1009

    
1010
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1011
                             is_write, &uc->uc_sigmask, puc);
1012
}
1013
#elif defined(__sparc__)
1014

    
1015
int cpu_signal_handler(int host_signum, void *pinfo,
1016
                       void *puc)
1017
{
1018
    siginfo_t *info = pinfo;
1019
    int is_write;
1020
    uint32_t insn;
1021
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1022
    uint32_t *regs = (uint32_t *)(info + 1);
1023
    void *sigmask = (regs + 20);
1024
    /* XXX: is there a standard glibc define ? */
1025
    unsigned long pc = regs[1];
1026
#else
1027
#ifdef __linux__
1028
    struct sigcontext *sc = puc;
1029
    unsigned long pc = sc->sigc_regs.tpc;
1030
    void *sigmask = (void *)sc->sigc_mask;
1031
#elif defined(__OpenBSD__)
1032
    struct sigcontext *uc = puc;
1033
    unsigned long pc = uc->sc_pc;
1034
    void *sigmask = (void *)(long)uc->sc_mask;
1035
#endif
1036
#endif
1037

    
1038
    /* XXX: need kernel patch to get write flag faster */
1039
    is_write = 0;
1040
    insn = *(uint32_t *)pc;
1041
    if ((insn >> 30) == 3) {
1042
      switch((insn >> 19) & 0x3f) {
1043
      case 0x05: // stb
1044
      case 0x15: // stba
1045
      case 0x06: // sth
1046
      case 0x16: // stha
1047
      case 0x04: // st
1048
      case 0x14: // sta
1049
      case 0x07: // std
1050
      case 0x17: // stda
1051
      case 0x0e: // stx
1052
      case 0x1e: // stxa
1053
      case 0x24: // stf
1054
      case 0x34: // stfa
1055
      case 0x27: // stdf
1056
      case 0x37: // stdfa
1057
      case 0x26: // stqf
1058
      case 0x36: // stqfa
1059
      case 0x25: // stfsr
1060
      case 0x3c: // casa
1061
      case 0x3e: // casxa
1062
        is_write = 1;
1063
        break;
1064
      }
1065
    }
1066
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1067
                             is_write, sigmask, NULL);
1068
}
1069

    
1070
#elif defined(__arm__)
1071

    
1072
int cpu_signal_handler(int host_signum, void *pinfo,
1073
                       void *puc)
1074
{
1075
    siginfo_t *info = pinfo;
1076
    struct ucontext *uc = puc;
1077
    unsigned long pc;
1078
    int is_write;
1079

    
1080
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1081
    pc = uc->uc_mcontext.gregs[R15];
1082
#else
1083
    pc = uc->uc_mcontext.arm_pc;
1084
#endif
1085
    /* XXX: compute is_write */
1086
    is_write = 0;
1087
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1088
                             is_write,
1089
                             &uc->uc_sigmask, puc);
1090
}
1091

    
1092
#elif defined(__mc68000)
1093

    
1094
int cpu_signal_handler(int host_signum, void *pinfo,
1095
                       void *puc)
1096
{
1097
    siginfo_t *info = pinfo;
1098
    struct ucontext *uc = puc;
1099
    unsigned long pc;
1100
    int is_write;
1101

    
1102
    pc = uc->uc_mcontext.gregs[16];
1103
    /* XXX: compute is_write */
1104
    is_write = 0;
1105
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1106
                             is_write,
1107
                             &uc->uc_sigmask, puc);
1108
}
1109

    
1110
#elif defined(__ia64)
1111

    
1112
#ifndef __ISR_VALID
1113
  /* This ought to be in <bits/siginfo.h>... */
1114
# define __ISR_VALID        1
1115
#endif
1116

    
1117
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1118
{
1119
    siginfo_t *info = pinfo;
1120
    struct ucontext *uc = puc;
1121
    unsigned long ip;
1122
    int is_write = 0;
1123

    
1124
    ip = uc->uc_mcontext.sc_ip;
1125
    switch (host_signum) {
1126
      case SIGILL:
1127
      case SIGFPE:
1128
      case SIGSEGV:
1129
      case SIGBUS:
1130
      case SIGTRAP:
1131
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1132
              /* ISR.W (write-access) is bit 33:  */
1133
              is_write = (info->si_isr >> 33) & 1;
1134
          break;
1135

    
1136
      default:
1137
          break;
1138
    }
1139
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1140
                             is_write,
1141
                             &uc->uc_sigmask, puc);
1142
}
1143

    
1144
#elif defined(__s390__)
1145

    
1146
int cpu_signal_handler(int host_signum, void *pinfo,
1147
                       void *puc)
1148
{
1149
    siginfo_t *info = pinfo;
1150
    struct ucontext *uc = puc;
1151
    unsigned long pc;
1152
    int is_write;
1153

    
1154
    pc = uc->uc_mcontext.psw.addr;
1155
    /* XXX: compute is_write */
1156
    is_write = 0;
1157
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1158
                             is_write, &uc->uc_sigmask, puc);
1159
}
1160

    
1161
#elif defined(__mips__)
1162

    
1163
int cpu_signal_handler(int host_signum, void *pinfo,
1164
                       void *puc)
1165
{
1166
    siginfo_t *info = pinfo;
1167
    struct ucontext *uc = puc;
1168
    greg_t pc = uc->uc_mcontext.pc;
1169
    int is_write;
1170

    
1171
    /* XXX: compute is_write */
1172
    is_write = 0;
1173
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1174
                             is_write, &uc->uc_sigmask, puc);
1175
}
1176

    
1177
#elif defined(__hppa__)
1178

    
1179
int cpu_signal_handler(int host_signum, void *pinfo,
1180
                       void *puc)
1181
{
1182
    struct siginfo *info = pinfo;
1183
    struct ucontext *uc = puc;
1184
    unsigned long pc;
1185
    int is_write;
1186

    
1187
    pc = uc->uc_mcontext.sc_iaoq[0];
1188
    /* FIXME: compute is_write */
1189
    is_write = 0;
1190
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1191
                             is_write,
1192
                             &uc->uc_sigmask, puc);
1193
}
1194

    
1195
#else
1196

    
1197
#error host CPU specific signal handler needed
1198

    
1199
#endif
1200

    
1201
#endif /* !defined(CONFIG_SOFTMMU) */