Statistics
| Branch: | Revision:

root / cpu-exec.c @ 24ebf5f3

History | View | Annotate | Download (41.3 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24

    
25
#if !defined(CONFIG_SOFTMMU)
26
#undef EAX
27
#undef ECX
28
#undef EDX
29
#undef EBX
30
#undef ESP
31
#undef EBP
32
#undef ESI
33
#undef EDI
34
#undef EIP
35
#include <signal.h>
36
#ifdef __linux__
37
#include <sys/ucontext.h>
38
#endif
39
#endif
40

    
41
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42
// Work around ugly bugs in glibc that mangle global register contents
43
#undef env
44
#define env cpu_single_env
45
#endif
46

    
47
int tb_invalidated_flag;
48

    
49
//#define CONFIG_DEBUG_EXEC
50
//#define DEBUG_SIGNAL
51

    
52
int qemu_cpu_has_work(CPUState *env)
53
{
54
    return cpu_has_work(env);
55
}
56

    
57
void cpu_loop_exit(void)
58
{
59
    env->current_tb = NULL;
60
    longjmp(env->jmp_env, 1);
61
}
62

    
63
/* exit the current TB from a signal handler. The host registers are
64
   restored in a state compatible with the CPU emulator
65
 */
66
void cpu_resume_from_signal(CPUState *env1, void *puc)
67
{
68
#if !defined(CONFIG_SOFTMMU)
69
#ifdef __linux__
70
    struct ucontext *uc = puc;
71
#elif defined(__OpenBSD__)
72
    struct sigcontext *uc = puc;
73
#endif
74
#endif
75

    
76
    env = env1;
77

    
78
    /* XXX: restore cpu registers saved in host registers */
79

    
80
#if !defined(CONFIG_SOFTMMU)
81
    if (puc) {
82
        /* XXX: use siglongjmp ? */
83
#ifdef __linux__
84
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85
#elif defined(__OpenBSD__)
86
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
87
#endif
88
    }
89
#endif
90
    env->exception_index = -1;
91
    longjmp(env->jmp_env, 1);
92
}
93

    
94
/* Execute the code without caching the generated code. An interpreter
95
   could be used if available. */
96
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97
{
98
    unsigned long next_tb;
99
    TranslationBlock *tb;
100

    
101
    /* Should never happen.
102
       We only end up here when an existing TB is too long.  */
103
    if (max_cycles > CF_COUNT_MASK)
104
        max_cycles = CF_COUNT_MASK;
105

    
106
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107
                     max_cycles);
108
    env->current_tb = tb;
109
    /* execute the generated code */
110
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111
    env->current_tb = NULL;
112

    
113
    if ((next_tb & 3) == 2) {
114
        /* Restore PC.  This may happen if async event occurs before
115
           the TB starts executing.  */
116
        cpu_pc_from_tb(env, tb);
117
    }
118
    tb_phys_invalidate(tb, -1);
119
    tb_free(tb);
120
}
121

    
122
static TranslationBlock *tb_find_slow(target_ulong pc,
123
                                      target_ulong cs_base,
124
                                      uint64_t flags)
125
{
126
    TranslationBlock *tb, **ptb1;
127
    unsigned int h;
128
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129

    
130
    tb_invalidated_flag = 0;
131

    
132
    /* find translated block using physical mappings */
133
    phys_pc = get_phys_addr_code(env, pc);
134
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
135
    phys_page2 = -1;
136
    h = tb_phys_hash_func(phys_pc);
137
    ptb1 = &tb_phys_hash[h];
138
    for(;;) {
139
        tb = *ptb1;
140
        if (!tb)
141
            goto not_found;
142
        if (tb->pc == pc &&
143
            tb->page_addr[0] == phys_page1 &&
144
            tb->cs_base == cs_base &&
145
            tb->flags == flags) {
146
            /* check next page if needed */
147
            if (tb->page_addr[1] != -1) {
148
                virt_page2 = (pc & TARGET_PAGE_MASK) +
149
                    TARGET_PAGE_SIZE;
150
                phys_page2 = get_phys_addr_code(env, virt_page2);
151
                if (tb->page_addr[1] == phys_page2)
152
                    goto found;
153
            } else {
154
                goto found;
155
            }
156
        }
157
        ptb1 = &tb->phys_hash_next;
158
    }
159
 not_found:
160
   /* if no translated code available, then translate it now */
161
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
162

    
163
 found:
164
    /* we add the TB in the virtual pc hash table */
165
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166
    return tb;
167
}
168

    
169
static inline TranslationBlock *tb_find_fast(void)
170
{
171
    TranslationBlock *tb;
172
    target_ulong cs_base, pc;
173
    int flags;
174

    
175
    /* we record a subset of the CPU state. It will
176
       always be the same before a given translated block
177
       is executed. */
178
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181
                 tb->flags != flags)) {
182
        tb = tb_find_slow(pc, cs_base, flags);
183
    }
184
    return tb;
185
}
186

    
187
static CPUDebugExcpHandler *debug_excp_handler;
188

    
189
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190
{
191
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
192

    
193
    debug_excp_handler = handler;
194
    return old_handler;
195
}
196

    
197
static void cpu_handle_debug_exception(CPUState *env)
198
{
199
    CPUWatchpoint *wp;
200

    
201
    if (!env->watchpoint_hit)
202
        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
203
            wp->flags &= ~BP_WATCHPOINT_HIT;
204

    
205
    if (debug_excp_handler)
206
        debug_excp_handler(env);
207
}
208

    
209
/* main execution loop */
210

    
211
int cpu_exec(CPUState *env1)
212
{
213
    host_reg_t saved_env_reg;
214
    int ret, interrupt_request;
215
    TranslationBlock *tb;
216
    uint8_t *tc_ptr;
217
    unsigned long next_tb;
218

    
219
    if (cpu_halted(env1) == EXCP_HALTED)
220
        return EXCP_HALTED;
221

    
222
    cpu_single_env = env1;
223

    
224
    /* the access to env below is actually saving the global register's
225
       value, so that files not including target-xyz/exec.h are free to
226
       use it.  */
227
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
228
    saved_env_reg = (host_reg_t) env;
229
    asm("");
230
    env = env1;
231

    
232
#if defined(TARGET_I386)
233
    /* put eflags in CPU temporary format */
234
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
235
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
236
    CC_OP = CC_OP_EFLAGS;
237
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238
#elif defined(TARGET_SPARC)
239
#elif defined(TARGET_M68K)
240
    env->cc_op = CC_OP_FLAGS;
241
    env->cc_dest = env->sr & 0xf;
242
    env->cc_x = (env->sr >> 4) & 1;
243
#elif defined(TARGET_ALPHA)
244
#elif defined(TARGET_ARM)
245
#elif defined(TARGET_PPC)
246
#elif defined(TARGET_MICROBLAZE)
247
#elif defined(TARGET_MIPS)
248
#elif defined(TARGET_SH4)
249
#elif defined(TARGET_CRIS)
250
#elif defined(TARGET_S390X)
251
    /* XXXXX */
252
#else
253
#error unsupported target CPU
254
#endif
255
    env->exception_index = -1;
256

    
257
    /* prepare setjmp context for exception handling */
258
    for(;;) {
259
        if (setjmp(env->jmp_env) == 0) {
260
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
261
#undef env
262
                    env = cpu_single_env;
263
#define env cpu_single_env
264
#endif
265
            /* if an exception is pending, we execute it here */
266
            if (env->exception_index >= 0) {
267
                if (env->exception_index >= EXCP_INTERRUPT) {
268
                    /* exit request from the cpu execution loop */
269
                    ret = env->exception_index;
270
                    if (ret == EXCP_DEBUG)
271
                        cpu_handle_debug_exception(env);
272
                    break;
273
                } else {
274
#if defined(CONFIG_USER_ONLY)
275
                    /* if user mode only, we simulate a fake exception
276
                       which will be handled outside the cpu execution
277
                       loop */
278
#if defined(TARGET_I386)
279
                    do_interrupt_user(env->exception_index,
280
                                      env->exception_is_int,
281
                                      env->error_code,
282
                                      env->exception_next_eip);
283
                    /* successfully delivered */
284
                    env->old_exception = -1;
285
#endif
286
                    ret = env->exception_index;
287
                    break;
288
#else
289
#if defined(TARGET_I386)
290
                    /* simulate a real cpu exception. On i386, it can
291
                       trigger new exceptions, but we do not handle
292
                       double or triple faults yet. */
293
                    do_interrupt(env->exception_index,
294
                                 env->exception_is_int,
295
                                 env->error_code,
296
                                 env->exception_next_eip, 0);
297
                    /* successfully delivered */
298
                    env->old_exception = -1;
299
#elif defined(TARGET_PPC)
300
                    do_interrupt(env);
301
#elif defined(TARGET_MICROBLAZE)
302
                    do_interrupt(env);
303
#elif defined(TARGET_MIPS)
304
                    do_interrupt(env);
305
#elif defined(TARGET_SPARC)
306
                    do_interrupt(env);
307
#elif defined(TARGET_ARM)
308
                    do_interrupt(env);
309
#elif defined(TARGET_SH4)
310
                    do_interrupt(env);
311
#elif defined(TARGET_ALPHA)
312
                    do_interrupt(env);
313
#elif defined(TARGET_CRIS)
314
                    do_interrupt(env);
315
#elif defined(TARGET_M68K)
316
                    do_interrupt(0);
317
#endif
318
                    env->exception_index = -1;
319
#endif
320
                }
321
            }
322

    
323
            if (kvm_enabled()) {
324
                kvm_cpu_exec(env);
325
                longjmp(env->jmp_env, 1);
326
            }
327

    
328
            next_tb = 0; /* force lookup of first TB */
329
            for(;;) {
330
                interrupt_request = env->interrupt_request;
331
                if (unlikely(interrupt_request)) {
332
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
333
                        /* Mask out external interrupts for this step. */
334
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
335
                                               CPU_INTERRUPT_FIQ |
336
                                               CPU_INTERRUPT_SMI |
337
                                               CPU_INTERRUPT_NMI);
338
                    }
339
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
340
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
341
                        env->exception_index = EXCP_DEBUG;
342
                        cpu_loop_exit();
343
                    }
344
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
345
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
346
    defined(TARGET_MICROBLAZE)
347
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
348
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
349
                        env->halted = 1;
350
                        env->exception_index = EXCP_HLT;
351
                        cpu_loop_exit();
352
                    }
353
#endif
354
#if defined(TARGET_I386)
355
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
356
                            svm_check_intercept(SVM_EXIT_INIT);
357
                            do_cpu_init(env);
358
                            env->exception_index = EXCP_HALTED;
359
                            cpu_loop_exit();
360
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
361
                            do_cpu_sipi(env);
362
                    } else if (env->hflags2 & HF2_GIF_MASK) {
363
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
364
                            !(env->hflags & HF_SMM_MASK)) {
365
                            svm_check_intercept(SVM_EXIT_SMI);
366
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
367
                            do_smm_enter();
368
                            next_tb = 0;
369
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
370
                                   !(env->hflags2 & HF2_NMI_MASK)) {
371
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
372
                            env->hflags2 |= HF2_NMI_MASK;
373
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
374
                            next_tb = 0;
375
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
376
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
377
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
378
                            next_tb = 0;
379
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
380
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
381
                                     (env->hflags2 & HF2_HIF_MASK)) ||
382
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
383
                                     (env->eflags & IF_MASK && 
384
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
385
                            int intno;
386
                            svm_check_intercept(SVM_EXIT_INTR);
387
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
388
                            intno = cpu_get_pic_interrupt(env);
389
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
390
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
391
#undef env
392
                    env = cpu_single_env;
393
#define env cpu_single_env
394
#endif
395
                            do_interrupt(intno, 0, 0, 0, 1);
396
                            /* ensure that no TB jump will be modified as
397
                               the program flow was changed */
398
                            next_tb = 0;
399
#if !defined(CONFIG_USER_ONLY)
400
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
401
                                   (env->eflags & IF_MASK) && 
402
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
403
                            int intno;
404
                            /* FIXME: this should respect TPR */
405
                            svm_check_intercept(SVM_EXIT_VINTR);
406
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
407
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
408
                            do_interrupt(intno, 0, 0, 0, 1);
409
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
410
                            next_tb = 0;
411
#endif
412
                        }
413
                    }
414
#elif defined(TARGET_PPC)
415
#if 0
416
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
417
                        cpu_reset(env);
418
                    }
419
#endif
420
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
421
                        ppc_hw_interrupt(env);
422
                        if (env->pending_interrupts == 0)
423
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
424
                        next_tb = 0;
425
                    }
426
#elif defined(TARGET_MICROBLAZE)
427
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
428
                        && (env->sregs[SR_MSR] & MSR_IE)
429
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
430
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
431
                        env->exception_index = EXCP_IRQ;
432
                        do_interrupt(env);
433
                        next_tb = 0;
434
                    }
435
#elif defined(TARGET_MIPS)
436
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
437
                        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
438
                        (env->CP0_Status & (1 << CP0St_IE)) &&
439
                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
440
                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
441
                        !(env->hflags & MIPS_HFLAG_DM)) {
442
                        /* Raise it */
443
                        env->exception_index = EXCP_EXT_INTERRUPT;
444
                        env->error_code = 0;
445
                        do_interrupt(env);
446
                        next_tb = 0;
447
                    }
448
#elif defined(TARGET_SPARC)
449
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
450
                        if (cpu_interrupts_enabled(env) &&
451
                            env->interrupt_index > 0) {
452
                            int pil = env->interrupt_index & 0xf;
453
                            int type = env->interrupt_index & 0xf0;
454

    
455
                            if (((type == TT_EXTINT) &&
456
                                  cpu_pil_allowed(env, pil)) ||
457
                                  type != TT_EXTINT) {
458
                                env->exception_index = env->interrupt_index;
459
                                do_interrupt(env);
460
                                next_tb = 0;
461
                            }
462
                        }
463
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
464
                        //do_interrupt(0, 0, 0, 0, 0);
465
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
466
                    }
467
#elif defined(TARGET_ARM)
468
                    if (interrupt_request & CPU_INTERRUPT_FIQ
469
                        && !(env->uncached_cpsr & CPSR_F)) {
470
                        env->exception_index = EXCP_FIQ;
471
                        do_interrupt(env);
472
                        next_tb = 0;
473
                    }
474
                    /* ARMv7-M interrupt return works by loading a magic value
475
                       into the PC.  On real hardware the load causes the
476
                       return to occur.  The qemu implementation performs the
477
                       jump normally, then does the exception return when the
478
                       CPU tries to execute code at the magic address.
479
                       This will cause the magic PC value to be pushed to
480
                       the stack if an interrupt occured at the wrong time.
481
                       We avoid this by disabling interrupts when
482
                       pc contains a magic address.  */
483
                    if (interrupt_request & CPU_INTERRUPT_HARD
484
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
485
                            || !(env->uncached_cpsr & CPSR_I))) {
486
                        env->exception_index = EXCP_IRQ;
487
                        do_interrupt(env);
488
                        next_tb = 0;
489
                    }
490
#elif defined(TARGET_SH4)
491
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
492
                        do_interrupt(env);
493
                        next_tb = 0;
494
                    }
495
#elif defined(TARGET_ALPHA)
496
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
497
                        do_interrupt(env);
498
                        next_tb = 0;
499
                    }
500
#elif defined(TARGET_CRIS)
501
                    if (interrupt_request & CPU_INTERRUPT_HARD
502
                        && (env->pregs[PR_CCS] & I_FLAG)
503
                        && !env->locked_irq) {
504
                        env->exception_index = EXCP_IRQ;
505
                        do_interrupt(env);
506
                        next_tb = 0;
507
                    }
508
                    if (interrupt_request & CPU_INTERRUPT_NMI
509
                        && (env->pregs[PR_CCS] & M_FLAG)) {
510
                        env->exception_index = EXCP_NMI;
511
                        do_interrupt(env);
512
                        next_tb = 0;
513
                    }
514
#elif defined(TARGET_M68K)
515
                    if (interrupt_request & CPU_INTERRUPT_HARD
516
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
517
                            < env->pending_level) {
518
                        /* Real hardware gets the interrupt vector via an
519
                           IACK cycle at this point.  Current emulated
520
                           hardware doesn't rely on this, so we
521
                           provide/save the vector when the interrupt is
522
                           first signalled.  */
523
                        env->exception_index = env->pending_vector;
524
                        do_interrupt(1);
525
                        next_tb = 0;
526
                    }
527
#endif
528
                   /* Don't use the cached interupt_request value,
529
                      do_interrupt may have updated the EXITTB flag. */
530
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
531
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
532
                        /* ensure that no TB jump will be modified as
533
                           the program flow was changed */
534
                        next_tb = 0;
535
                    }
536
                }
537
                if (unlikely(env->exit_request)) {
538
                    env->exit_request = 0;
539
                    env->exception_index = EXCP_INTERRUPT;
540
                    cpu_loop_exit();
541
                }
542
#ifdef CONFIG_DEBUG_EXEC
543
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
544
                    /* restore flags in standard format */
545
#if defined(TARGET_I386)
546
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
547
                    log_cpu_state(env, X86_DUMP_CCOP);
548
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
549
#elif defined(TARGET_ARM)
550
                    log_cpu_state(env, 0);
551
#elif defined(TARGET_SPARC)
552
                    log_cpu_state(env, 0);
553
#elif defined(TARGET_PPC)
554
                    log_cpu_state(env, 0);
555
#elif defined(TARGET_M68K)
556
                    cpu_m68k_flush_flags(env, env->cc_op);
557
                    env->cc_op = CC_OP_FLAGS;
558
                    env->sr = (env->sr & 0xffe0)
559
                              | env->cc_dest | (env->cc_x << 4);
560
                    log_cpu_state(env, 0);
561
#elif defined(TARGET_MICROBLAZE)
562
                    log_cpu_state(env, 0);
563
#elif defined(TARGET_MIPS)
564
                    log_cpu_state(env, 0);
565
#elif defined(TARGET_SH4)
566
                    log_cpu_state(env, 0);
567
#elif defined(TARGET_ALPHA)
568
                    log_cpu_state(env, 0);
569
#elif defined(TARGET_CRIS)
570
                    log_cpu_state(env, 0);
571
#else
572
#error unsupported target CPU
573
#endif
574
                }
575
#endif
576
                spin_lock(&tb_lock);
577
                tb = tb_find_fast();
578
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
579
                   doing it in tb_find_slow */
580
                if (tb_invalidated_flag) {
581
                    /* as some TB could have been invalidated because
582
                       of memory exceptions while generating the code, we
583
                       must recompute the hash index here */
584
                    next_tb = 0;
585
                    tb_invalidated_flag = 0;
586
                }
587
#ifdef CONFIG_DEBUG_EXEC
588
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
589
                             (long)tb->tc_ptr, tb->pc,
590
                             lookup_symbol(tb->pc));
591
#endif
592
                /* see if we can patch the calling TB. When the TB
593
                   spans two pages, we cannot safely do a direct
594
                   jump. */
595
                if (next_tb != 0 && tb->page_addr[1] == -1) {
596
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
597
                }
598
                spin_unlock(&tb_lock);
599

    
600
                /* cpu_interrupt might be called while translating the
601
                   TB, but before it is linked into a potentially
602
                   infinite loop and becomes env->current_tb. Avoid
603
                   starting execution if there is a pending interrupt. */
604
                if (!unlikely (env->exit_request)) {
605
                    env->current_tb = tb;
606
                    tc_ptr = tb->tc_ptr;
607
                /* execute the generated code */
608
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
609
#undef env
610
                    env = cpu_single_env;
611
#define env cpu_single_env
612
#endif
613
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
614
                    env->current_tb = NULL;
615
                    if ((next_tb & 3) == 2) {
616
                        /* Instruction counter expired.  */
617
                        int insns_left;
618
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
619
                        /* Restore PC.  */
620
                        cpu_pc_from_tb(env, tb);
621
                        insns_left = env->icount_decr.u32;
622
                        if (env->icount_extra && insns_left >= 0) {
623
                            /* Refill decrementer and continue execution.  */
624
                            env->icount_extra += insns_left;
625
                            if (env->icount_extra > 0xffff) {
626
                                insns_left = 0xffff;
627
                            } else {
628
                                insns_left = env->icount_extra;
629
                            }
630
                            env->icount_extra -= insns_left;
631
                            env->icount_decr.u16.low = insns_left;
632
                        } else {
633
                            if (insns_left > 0) {
634
                                /* Execute remaining instructions.  */
635
                                cpu_exec_nocache(insns_left, tb);
636
                            }
637
                            env->exception_index = EXCP_INTERRUPT;
638
                            next_tb = 0;
639
                            cpu_loop_exit();
640
                        }
641
                    }
642
                }
643
                /* reset soft MMU for next block (it can currently
644
                   only be set by a memory fault) */
645
            } /* for(;;) */
646
        }
647
    } /* for(;;) */
648

    
649

    
650
#if defined(TARGET_I386)
651
    /* restore flags in standard format */
652
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
653
#elif defined(TARGET_ARM)
654
    /* XXX: Save/restore host fpu exception state?.  */
655
#elif defined(TARGET_SPARC)
656
#elif defined(TARGET_PPC)
657
#elif defined(TARGET_M68K)
658
    cpu_m68k_flush_flags(env, env->cc_op);
659
    env->cc_op = CC_OP_FLAGS;
660
    env->sr = (env->sr & 0xffe0)
661
              | env->cc_dest | (env->cc_x << 4);
662
#elif defined(TARGET_MICROBLAZE)
663
#elif defined(TARGET_MIPS)
664
#elif defined(TARGET_SH4)
665
#elif defined(TARGET_ALPHA)
666
#elif defined(TARGET_CRIS)
667
#elif defined(TARGET_S390X)
668
    /* XXXXX */
669
#else
670
#error unsupported target CPU
671
#endif
672

    
673
    /* restore global registers */
674
    asm("");
675
    env = (void *) saved_env_reg;
676

    
677
    /* fail safe : never use cpu_single_env outside cpu_exec() */
678
    cpu_single_env = NULL;
679
    return ret;
680
}
681

    
682
/* must only be called from the generated code as an exception can be
683
   generated */
684
void tb_invalidate_page_range(target_ulong start, target_ulong end)
685
{
686
    /* XXX: cannot enable it yet because it yields to MMU exception
687
       where NIP != read address on PowerPC */
688
#if 0
689
    target_ulong phys_addr;
690
    phys_addr = get_phys_addr_code(env, start);
691
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
692
#endif
693
}
694

    
695
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
696

    
697
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
698
{
699
    CPUX86State *saved_env;
700

    
701
    saved_env = env;
702
    env = s;
703
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
704
        selector &= 0xffff;
705
        cpu_x86_load_seg_cache(env, seg_reg, selector,
706
                               (selector << 4), 0xffff, 0);
707
    } else {
708
        helper_load_seg(seg_reg, selector);
709
    }
710
    env = saved_env;
711
}
712

    
713
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
714
{
715
    CPUX86State *saved_env;
716

    
717
    saved_env = env;
718
    env = s;
719

    
720
    helper_fsave(ptr, data32);
721

    
722
    env = saved_env;
723
}
724

    
725
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
726
{
727
    CPUX86State *saved_env;
728

    
729
    saved_env = env;
730
    env = s;
731

    
732
    helper_frstor(ptr, data32);
733

    
734
    env = saved_env;
735
}
736

    
737
#endif /* TARGET_I386 */
738

    
739
#if !defined(CONFIG_SOFTMMU)
740

    
741
#if defined(TARGET_I386)
742
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
743
#else
744
#define EXCEPTION_ACTION cpu_loop_exit()
745
#endif
746

    
747
/* 'pc' is the host PC at which the exception was raised. 'address' is
748
   the effective address of the memory exception. 'is_write' is 1 if a
749
   write caused the exception and otherwise 0'. 'old_set' is the
750
   signal set which should be restored */
751
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
752
                                    int is_write, sigset_t *old_set,
753
                                    void *puc)
754
{
755
    TranslationBlock *tb;
756
    int ret;
757

    
758
    if (cpu_single_env)
759
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
760
#if defined(DEBUG_SIGNAL)
761
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
762
                pc, address, is_write, *(unsigned long *)old_set);
763
#endif
764
    /* XXX: locking issue */
765
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
766
        return 1;
767
    }
768

    
769
    /* see if it is an MMU fault */
770
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
771
    if (ret < 0)
772
        return 0; /* not an MMU fault */
773
    if (ret == 0)
774
        return 1; /* the MMU fault was handled without causing real CPU fault */
775
    /* now we have a real cpu fault */
776
    tb = tb_find_pc(pc);
777
    if (tb) {
778
        /* the PC is inside the translated code. It means that we have
779
           a virtual CPU fault */
780
        cpu_restore_state(tb, env, pc, puc);
781
    }
782

    
783
    /* we restore the process signal mask as the sigreturn should
784
       do it (XXX: use sigsetjmp) */
785
    sigprocmask(SIG_SETMASK, old_set, NULL);
786
    EXCEPTION_ACTION;
787

    
788
    /* never comes here */
789
    return 1;
790
}
791

    
792
#if defined(__i386__)
793

    
794
#if defined(__APPLE__)
795
# include <sys/ucontext.h>
796

    
797
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
798
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
799
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
800
# define MASK_sig(context)    ((context)->uc_sigmask)
801
#elif defined (__NetBSD__)
802
# include <ucontext.h>
803

    
804
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
805
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
806
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
807
# define MASK_sig(context)    ((context)->uc_sigmask)
808
#elif defined (__FreeBSD__) || defined(__DragonFly__)
809
# include <ucontext.h>
810

    
811
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
812
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
813
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
814
# define MASK_sig(context)    ((context)->uc_sigmask)
815
#elif defined(__OpenBSD__)
816
# define EIP_sig(context)     ((context)->sc_eip)
817
# define TRAP_sig(context)    ((context)->sc_trapno)
818
# define ERROR_sig(context)   ((context)->sc_err)
819
# define MASK_sig(context)    ((context)->sc_mask)
820
#else
821
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
822
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
823
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
824
# define MASK_sig(context)    ((context)->uc_sigmask)
825
#endif
826

    
827
int cpu_signal_handler(int host_signum, void *pinfo,
828
                       void *puc)
829
{
830
    siginfo_t *info = pinfo;
831
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
832
    ucontext_t *uc = puc;
833
#elif defined(__OpenBSD__)
834
    struct sigcontext *uc = puc;
835
#else
836
    struct ucontext *uc = puc;
837
#endif
838
    unsigned long pc;
839
    int trapno;
840

    
841
#ifndef REG_EIP
842
/* for glibc 2.1 */
843
#define REG_EIP    EIP
844
#define REG_ERR    ERR
845
#define REG_TRAPNO TRAPNO
846
#endif
847
    pc = EIP_sig(uc);
848
    trapno = TRAP_sig(uc);
849
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
850
                             trapno == 0xe ?
851
                             (ERROR_sig(uc) >> 1) & 1 : 0,
852
                             &MASK_sig(uc), puc);
853
}
854

    
855
#elif defined(__x86_64__)
856

    
857
#ifdef __NetBSD__
858
#define PC_sig(context)       _UC_MACHINE_PC(context)
859
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
860
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
861
#define MASK_sig(context)     ((context)->uc_sigmask)
862
#elif defined(__OpenBSD__)
863
#define PC_sig(context)       ((context)->sc_rip)
864
#define TRAP_sig(context)     ((context)->sc_trapno)
865
#define ERROR_sig(context)    ((context)->sc_err)
866
#define MASK_sig(context)     ((context)->sc_mask)
867
#elif defined (__FreeBSD__) || defined(__DragonFly__)
868
#include <ucontext.h>
869

    
870
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
871
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
872
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
873
#define MASK_sig(context)     ((context)->uc_sigmask)
874
#else
875
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
876
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
877
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
878
#define MASK_sig(context)     ((context)->uc_sigmask)
879
#endif
880

    
881
int cpu_signal_handler(int host_signum, void *pinfo,
882
                       void *puc)
883
{
884
    siginfo_t *info = pinfo;
885
    unsigned long pc;
886
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
887
    ucontext_t *uc = puc;
888
#elif defined(__OpenBSD__)
889
    struct sigcontext *uc = puc;
890
#else
891
    struct ucontext *uc = puc;
892
#endif
893

    
894
    pc = PC_sig(uc);
895
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
896
                             TRAP_sig(uc) == 0xe ?
897
                             (ERROR_sig(uc) >> 1) & 1 : 0,
898
                             &MASK_sig(uc), puc);
899
}
900

    
901
#elif defined(_ARCH_PPC)
902

    
903
/***********************************************************************
904
 * signal context platform-specific definitions
905
 * From Wine
906
 */
907
#ifdef linux
908
/* All Registers access - only for local access */
909
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
910
/* Gpr Registers access  */
911
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
912
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
913
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
914
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
915
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
916
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
917
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
918
/* Float Registers access  */
919
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
920
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
921
/* Exception Registers access */
922
# define DAR_sig(context)                        REG_sig(dar, context)
923
# define DSISR_sig(context)                        REG_sig(dsisr, context)
924
# define TRAP_sig(context)                        REG_sig(trap, context)
925
#endif /* linux */
926

    
927
#ifdef __APPLE__
928
# include <sys/ucontext.h>
929
typedef struct ucontext SIGCONTEXT;
930
/* All Registers access - only for local access */
931
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
932
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
933
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
934
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
935
/* Gpr Registers access */
936
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
937
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
938
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
939
# define CTR_sig(context)                        REG_sig(ctr, context)
940
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
941
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
942
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
943
/* Float Registers access */
944
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
945
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
946
/* Exception Registers access */
947
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
948
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
949
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
950
#endif /* __APPLE__ */
951

    
952
int cpu_signal_handler(int host_signum, void *pinfo,
953
                       void *puc)
954
{
955
    siginfo_t *info = pinfo;
956
    struct ucontext *uc = puc;
957
    unsigned long pc;
958
    int is_write;
959

    
960
    pc = IAR_sig(uc);
961
    is_write = 0;
962
#if 0
963
    /* ppc 4xx case */
964
    if (DSISR_sig(uc) & 0x00800000)
965
        is_write = 1;
966
#else
967
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
968
        is_write = 1;
969
#endif
970
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
971
                             is_write, &uc->uc_sigmask, puc);
972
}
973

    
974
#elif defined(__alpha__)
975

    
976
int cpu_signal_handler(int host_signum, void *pinfo,
977
                           void *puc)
978
{
979
    siginfo_t *info = pinfo;
980
    struct ucontext *uc = puc;
981
    uint32_t *pc = uc->uc_mcontext.sc_pc;
982
    uint32_t insn = *pc;
983
    int is_write = 0;
984

    
985
    /* XXX: need kernel patch to get write flag faster */
986
    switch (insn >> 26) {
987
    case 0x0d: // stw
988
    case 0x0e: // stb
989
    case 0x0f: // stq_u
990
    case 0x24: // stf
991
    case 0x25: // stg
992
    case 0x26: // sts
993
    case 0x27: // stt
994
    case 0x2c: // stl
995
    case 0x2d: // stq
996
    case 0x2e: // stl_c
997
    case 0x2f: // stq_c
998
        is_write = 1;
999
    }
1000

    
1001
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1002
                             is_write, &uc->uc_sigmask, puc);
1003
}
1004
#elif defined(__sparc__)
1005

    
1006
int cpu_signal_handler(int host_signum, void *pinfo,
1007
                       void *puc)
1008
{
1009
    siginfo_t *info = pinfo;
1010
    int is_write;
1011
    uint32_t insn;
1012
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1013
    uint32_t *regs = (uint32_t *)(info + 1);
1014
    void *sigmask = (regs + 20);
1015
    /* XXX: is there a standard glibc define ? */
1016
    unsigned long pc = regs[1];
1017
#else
1018
#ifdef __linux__
1019
    struct sigcontext *sc = puc;
1020
    unsigned long pc = sc->sigc_regs.tpc;
1021
    void *sigmask = (void *)sc->sigc_mask;
1022
#elif defined(__OpenBSD__)
1023
    struct sigcontext *uc = puc;
1024
    unsigned long pc = uc->sc_pc;
1025
    void *sigmask = (void *)(long)uc->sc_mask;
1026
#endif
1027
#endif
1028

    
1029
    /* XXX: need kernel patch to get write flag faster */
1030
    is_write = 0;
1031
    insn = *(uint32_t *)pc;
1032
    if ((insn >> 30) == 3) {
1033
      switch((insn >> 19) & 0x3f) {
1034
      case 0x05: // stb
1035
      case 0x15: // stba
1036
      case 0x06: // sth
1037
      case 0x16: // stha
1038
      case 0x04: // st
1039
      case 0x14: // sta
1040
      case 0x07: // std
1041
      case 0x17: // stda
1042
      case 0x0e: // stx
1043
      case 0x1e: // stxa
1044
      case 0x24: // stf
1045
      case 0x34: // stfa
1046
      case 0x27: // stdf
1047
      case 0x37: // stdfa
1048
      case 0x26: // stqf
1049
      case 0x36: // stqfa
1050
      case 0x25: // stfsr
1051
      case 0x3c: // casa
1052
      case 0x3e: // casxa
1053
        is_write = 1;
1054
        break;
1055
      }
1056
    }
1057
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1058
                             is_write, sigmask, NULL);
1059
}
1060

    
1061
#elif defined(__arm__)
1062

    
1063
int cpu_signal_handler(int host_signum, void *pinfo,
1064
                       void *puc)
1065
{
1066
    siginfo_t *info = pinfo;
1067
    struct ucontext *uc = puc;
1068
    unsigned long pc;
1069
    int is_write;
1070

    
1071
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1072
    pc = uc->uc_mcontext.gregs[R15];
1073
#else
1074
    pc = uc->uc_mcontext.arm_pc;
1075
#endif
1076
    /* XXX: compute is_write */
1077
    is_write = 0;
1078
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1079
                             is_write,
1080
                             &uc->uc_sigmask, puc);
1081
}
1082

    
1083
#elif defined(__mc68000)
1084

    
1085
int cpu_signal_handler(int host_signum, void *pinfo,
1086
                       void *puc)
1087
{
1088
    siginfo_t *info = pinfo;
1089
    struct ucontext *uc = puc;
1090
    unsigned long pc;
1091
    int is_write;
1092

    
1093
    pc = uc->uc_mcontext.gregs[16];
1094
    /* XXX: compute is_write */
1095
    is_write = 0;
1096
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1097
                             is_write,
1098
                             &uc->uc_sigmask, puc);
1099
}
1100

    
1101
#elif defined(__ia64)
1102

    
1103
#ifndef __ISR_VALID
1104
  /* This ought to be in <bits/siginfo.h>... */
1105
# define __ISR_VALID        1
1106
#endif
1107

    
1108
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1109
{
1110
    siginfo_t *info = pinfo;
1111
    struct ucontext *uc = puc;
1112
    unsigned long ip;
1113
    int is_write = 0;
1114

    
1115
    ip = uc->uc_mcontext.sc_ip;
1116
    switch (host_signum) {
1117
      case SIGILL:
1118
      case SIGFPE:
1119
      case SIGSEGV:
1120
      case SIGBUS:
1121
      case SIGTRAP:
1122
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1123
              /* ISR.W (write-access) is bit 33:  */
1124
              is_write = (info->si_isr >> 33) & 1;
1125
          break;
1126

    
1127
      default:
1128
          break;
1129
    }
1130
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1131
                             is_write,
1132
                             &uc->uc_sigmask, puc);
1133
}
1134

    
1135
#elif defined(__s390__)
1136

    
1137
int cpu_signal_handler(int host_signum, void *pinfo,
1138
                       void *puc)
1139
{
1140
    siginfo_t *info = pinfo;
1141
    struct ucontext *uc = puc;
1142
    unsigned long pc;
1143
    int is_write;
1144

    
1145
    pc = uc->uc_mcontext.psw.addr;
1146
    /* XXX: compute is_write */
1147
    is_write = 0;
1148
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1149
                             is_write, &uc->uc_sigmask, puc);
1150
}
1151

    
1152
#elif defined(__mips__)
1153

    
1154
int cpu_signal_handler(int host_signum, void *pinfo,
1155
                       void *puc)
1156
{
1157
    siginfo_t *info = pinfo;
1158
    struct ucontext *uc = puc;
1159
    greg_t pc = uc->uc_mcontext.pc;
1160
    int is_write;
1161

    
1162
    /* XXX: compute is_write */
1163
    is_write = 0;
1164
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1165
                             is_write, &uc->uc_sigmask, puc);
1166
}
1167

    
1168
#elif defined(__hppa__)
1169

    
1170
int cpu_signal_handler(int host_signum, void *pinfo,
1171
                       void *puc)
1172
{
1173
    struct siginfo *info = pinfo;
1174
    struct ucontext *uc = puc;
1175
    unsigned long pc;
1176
    int is_write;
1177

    
1178
    pc = uc->uc_mcontext.sc_iaoq[0];
1179
    /* FIXME: compute is_write */
1180
    is_write = 0;
1181
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1182
                             is_write,
1183
                             &uc->uc_sigmask, puc);
1184
}
1185

    
1186
#else
1187

    
1188
#error host CPU specific signal handler needed
1189

    
1190
#endif
1191

    
1192
#endif /* !defined(CONFIG_SOFTMMU) */