Statistics
| Branch: | Revision:

root / cpu-exec.c @ 3125f763

History | View | Annotate | Download (44.6 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24
#include "qemu-barrier.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define CONFIG_DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
int qemu_cpu_has_work(CPUState *env)
54
{
55
    return cpu_has_work(env);
56
}
57

    
58
void cpu_loop_exit(void)
59
{
60
    env->current_tb = NULL;
61
    longjmp(env->jmp_env, 1);
62
}
63

    
64
/* exit the current TB from a signal handler. The host registers are
65
   restored in a state compatible with the CPU emulator
66
 */
67
void cpu_resume_from_signal(CPUState *env1, void *puc)
68
{
69
#if !defined(CONFIG_SOFTMMU)
70
#ifdef __linux__
71
    struct ucontext *uc = puc;
72
#elif defined(__OpenBSD__)
73
    struct sigcontext *uc = puc;
74
#endif
75
#endif
76

    
77
    env = env1;
78

    
79
    /* XXX: restore cpu registers saved in host registers */
80

    
81
#if !defined(CONFIG_SOFTMMU)
82
    if (puc) {
83
        /* XXX: use siglongjmp ? */
84
#ifdef __linux__
85
#ifdef __ia64
86
        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87
#else
88
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89
#endif
90
#elif defined(__OpenBSD__)
91
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92
#endif
93
    }
94
#endif
95
    env->exception_index = -1;
96
    longjmp(env->jmp_env, 1);
97
}
98

    
99
/* Execute the code without caching the generated code. An interpreter
100
   could be used if available. */
101
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102
{
103
    unsigned long next_tb;
104
    TranslationBlock *tb;
105

    
106
    /* Should never happen.
107
       We only end up here when an existing TB is too long.  */
108
    if (max_cycles > CF_COUNT_MASK)
109
        max_cycles = CF_COUNT_MASK;
110

    
111
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112
                     max_cycles);
113
    env->current_tb = tb;
114
    /* execute the generated code */
115
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116
    env->current_tb = NULL;
117

    
118
    if ((next_tb & 3) == 2) {
119
        /* Restore PC.  This may happen if async event occurs before
120
           the TB starts executing.  */
121
        cpu_pc_from_tb(env, tb);
122
    }
123
    tb_phys_invalidate(tb, -1);
124
    tb_free(tb);
125
}
126

    
127
static TranslationBlock *tb_find_slow(target_ulong pc,
128
                                      target_ulong cs_base,
129
                                      uint64_t flags)
130
{
131
    TranslationBlock *tb, **ptb1;
132
    unsigned int h;
133
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
134
    target_ulong virt_page2;
135

    
136
    tb_invalidated_flag = 0;
137

    
138
    /* find translated block using physical mappings */
139
    phys_pc = get_page_addr_code(env, pc);
140
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
141
    phys_page2 = -1;
142
    h = tb_phys_hash_func(phys_pc);
143
    ptb1 = &tb_phys_hash[h];
144
    for(;;) {
145
        tb = *ptb1;
146
        if (!tb)
147
            goto not_found;
148
        if (tb->pc == pc &&
149
            tb->page_addr[0] == phys_page1 &&
150
            tb->cs_base == cs_base &&
151
            tb->flags == flags) {
152
            /* check next page if needed */
153
            if (tb->page_addr[1] != -1) {
154
                virt_page2 = (pc & TARGET_PAGE_MASK) +
155
                    TARGET_PAGE_SIZE;
156
                phys_page2 = get_page_addr_code(env, virt_page2);
157
                if (tb->page_addr[1] == phys_page2)
158
                    goto found;
159
            } else {
160
                goto found;
161
            }
162
        }
163
        ptb1 = &tb->phys_hash_next;
164
    }
165
 not_found:
166
   /* if no translated code available, then translate it now */
167
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
168

    
169
 found:
170
    /* Move the last found TB to the head of the list */
171
    if (likely(*ptb1)) {
172
        *ptb1 = tb->phys_hash_next;
173
        tb->phys_hash_next = tb_phys_hash[h];
174
        tb_phys_hash[h] = tb;
175
    }
176
    /* we add the TB in the virtual pc hash table */
177
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178
    return tb;
179
}
180

    
181
static inline TranslationBlock *tb_find_fast(void)
182
{
183
    TranslationBlock *tb;
184
    target_ulong cs_base, pc;
185
    int flags;
186

    
187
    /* we record a subset of the CPU state. It will
188
       always be the same before a given translated block
189
       is executed. */
190
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193
                 tb->flags != flags)) {
194
        tb = tb_find_slow(pc, cs_base, flags);
195
    }
196
    return tb;
197
}
198

    
199
static CPUDebugExcpHandler *debug_excp_handler;
200

    
201
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
202
{
203
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
204

    
205
    debug_excp_handler = handler;
206
    return old_handler;
207
}
208

    
209
static void cpu_handle_debug_exception(CPUState *env)
210
{
211
    CPUWatchpoint *wp;
212

    
213
    if (!env->watchpoint_hit) {
214
        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
215
            wp->flags &= ~BP_WATCHPOINT_HIT;
216
        }
217
    }
218
    if (debug_excp_handler) {
219
        debug_excp_handler(env);
220
    }
221
}
222

    
223
/* main execution loop */
224

    
225
volatile sig_atomic_t exit_request;
226

    
227
int cpu_exec(CPUState *env1)
228
{
229
    volatile host_reg_t saved_env_reg;
230
    int ret, interrupt_request;
231
    TranslationBlock *tb;
232
    uint8_t *tc_ptr;
233
    unsigned long next_tb;
234

    
235
    if (env1->halted) {
236
        if (!cpu_has_work(env1)) {
237
            return EXCP_HALTED;
238
        }
239

    
240
        env1->halted = 0;
241
    }
242

    
243
    cpu_single_env = env1;
244

    
245
    /* the access to env below is actually saving the global register's
246
       value, so that files not including target-xyz/exec.h are free to
247
       use it.  */
248
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
249
    saved_env_reg = (host_reg_t) env;
250
    barrier();
251
    env = env1;
252

    
253
    if (unlikely(exit_request)) {
254
        env->exit_request = 1;
255
    }
256

    
257
#if defined(TARGET_I386)
258
    /* put eflags in CPU temporary format */
259
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
261
    CC_OP = CC_OP_EFLAGS;
262
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263
#elif defined(TARGET_SPARC)
264
#elif defined(TARGET_M68K)
265
    env->cc_op = CC_OP_FLAGS;
266
    env->cc_dest = env->sr & 0xf;
267
    env->cc_x = (env->sr >> 4) & 1;
268
#elif defined(TARGET_ALPHA)
269
#elif defined(TARGET_ARM)
270
#elif defined(TARGET_UNICORE32)
271
#elif defined(TARGET_PPC)
272
#elif defined(TARGET_LM32)
273
#elif defined(TARGET_MICROBLAZE)
274
#elif defined(TARGET_MIPS)
275
#elif defined(TARGET_SH4)
276
#elif defined(TARGET_CRIS)
277
#elif defined(TARGET_S390X)
278
    /* XXXXX */
279
#else
280
#error unsupported target CPU
281
#endif
282
    env->exception_index = -1;
283

    
284
    /* prepare setjmp context for exception handling */
285
    for(;;) {
286
        if (setjmp(env->jmp_env) == 0) {
287
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
288
#undef env
289
            env = cpu_single_env;
290
#define env cpu_single_env
291
#endif
292
            /* if an exception is pending, we execute it here */
293
            if (env->exception_index >= 0) {
294
                if (env->exception_index >= EXCP_INTERRUPT) {
295
                    /* exit request from the cpu execution loop */
296
                    ret = env->exception_index;
297
                    if (ret == EXCP_DEBUG) {
298
                        cpu_handle_debug_exception(env);
299
                    }
300
                    break;
301
                } else {
302
#if defined(CONFIG_USER_ONLY)
303
                    /* if user mode only, we simulate a fake exception
304
                       which will be handled outside the cpu execution
305
                       loop */
306
#if defined(TARGET_I386)
307
                    do_interrupt_user(env->exception_index,
308
                                      env->exception_is_int,
309
                                      env->error_code,
310
                                      env->exception_next_eip);
311
                    /* successfully delivered */
312
                    env->old_exception = -1;
313
#endif
314
                    ret = env->exception_index;
315
                    break;
316
#else
317
#if defined(TARGET_I386)
318
                    /* simulate a real cpu exception. On i386, it can
319
                       trigger new exceptions, but we do not handle
320
                       double or triple faults yet. */
321
                    do_interrupt(env->exception_index,
322
                                 env->exception_is_int,
323
                                 env->error_code,
324
                                 env->exception_next_eip, 0);
325
                    /* successfully delivered */
326
                    env->old_exception = -1;
327
#elif defined(TARGET_PPC)
328
                    do_interrupt(env);
329
#elif defined(TARGET_LM32)
330
                    do_interrupt(env);
331
#elif defined(TARGET_MICROBLAZE)
332
                    do_interrupt(env);
333
#elif defined(TARGET_MIPS)
334
                    do_interrupt(env);
335
#elif defined(TARGET_SPARC)
336
                    do_interrupt(env);
337
#elif defined(TARGET_ARM)
338
                    do_interrupt(env);
339
#elif defined(TARGET_UNICORE32)
340
                    do_interrupt(env);
341
#elif defined(TARGET_SH4)
342
                    do_interrupt(env);
343
#elif defined(TARGET_ALPHA)
344
                    do_interrupt(env);
345
#elif defined(TARGET_CRIS)
346
                    do_interrupt(env);
347
#elif defined(TARGET_M68K)
348
                    do_interrupt(0);
349
#elif defined(TARGET_S390X)
350
                    do_interrupt(env);
351
#endif
352
                    env->exception_index = -1;
353
#endif
354
                }
355
            }
356

    
357
            next_tb = 0; /* force lookup of first TB */
358
            for(;;) {
359
                interrupt_request = env->interrupt_request;
360
                if (unlikely(interrupt_request)) {
361
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
362
                        /* Mask out external interrupts for this step. */
363
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
364
                    }
365
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
366
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
367
                        env->exception_index = EXCP_DEBUG;
368
                        cpu_loop_exit();
369
                    }
370
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
371
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
372
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
373
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
374
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
375
                        env->halted = 1;
376
                        env->exception_index = EXCP_HLT;
377
                        cpu_loop_exit();
378
                    }
379
#endif
380
#if defined(TARGET_I386)
381
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
382
                            svm_check_intercept(SVM_EXIT_INIT);
383
                            do_cpu_init(env);
384
                            env->exception_index = EXCP_HALTED;
385
                            cpu_loop_exit();
386
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
387
                            do_cpu_sipi(env);
388
                    } else if (env->hflags2 & HF2_GIF_MASK) {
389
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
390
                            !(env->hflags & HF_SMM_MASK)) {
391
                            svm_check_intercept(SVM_EXIT_SMI);
392
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
393
                            do_smm_enter();
394
                            next_tb = 0;
395
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
396
                                   !(env->hflags2 & HF2_NMI_MASK)) {
397
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
398
                            env->hflags2 |= HF2_NMI_MASK;
399
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
400
                            next_tb = 0;
401
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
402
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
403
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
404
                            next_tb = 0;
405
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
406
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
407
                                     (env->hflags2 & HF2_HIF_MASK)) ||
408
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
409
                                     (env->eflags & IF_MASK && 
410
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
411
                            int intno;
412
                            svm_check_intercept(SVM_EXIT_INTR);
413
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
414
                            intno = cpu_get_pic_interrupt(env);
415
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
416
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
417
#undef env
418
                    env = cpu_single_env;
419
#define env cpu_single_env
420
#endif
421
                            do_interrupt(intno, 0, 0, 0, 1);
422
                            /* ensure that no TB jump will be modified as
423
                               the program flow was changed */
424
                            next_tb = 0;
425
#if !defined(CONFIG_USER_ONLY)
426
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
427
                                   (env->eflags & IF_MASK) && 
428
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429
                            int intno;
430
                            /* FIXME: this should respect TPR */
431
                            svm_check_intercept(SVM_EXIT_VINTR);
432
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
434
                            do_interrupt(intno, 0, 0, 0, 1);
435
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
436
                            next_tb = 0;
437
#endif
438
                        }
439
                    }
440
#elif defined(TARGET_PPC)
441
#if 0
442
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
443
                        cpu_reset(env);
444
                    }
445
#endif
446
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
447
                        ppc_hw_interrupt(env);
448
                        if (env->pending_interrupts == 0)
449
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
450
                        next_tb = 0;
451
                    }
452
#elif defined(TARGET_LM32)
453
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
454
                        && (env->ie & IE_IE)) {
455
                        env->exception_index = EXCP_IRQ;
456
                        do_interrupt(env);
457
                        next_tb = 0;
458
                    }
459
#elif defined(TARGET_MICROBLAZE)
460
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
461
                        && (env->sregs[SR_MSR] & MSR_IE)
462
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
463
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
464
                        env->exception_index = EXCP_IRQ;
465
                        do_interrupt(env);
466
                        next_tb = 0;
467
                    }
468
#elif defined(TARGET_MIPS)
469
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470
                        cpu_mips_hw_interrupts_pending(env)) {
471
                        /* Raise it */
472
                        env->exception_index = EXCP_EXT_INTERRUPT;
473
                        env->error_code = 0;
474
                        do_interrupt(env);
475
                        next_tb = 0;
476
                    }
477
#elif defined(TARGET_SPARC)
478
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
479
                        if (cpu_interrupts_enabled(env) &&
480
                            env->interrupt_index > 0) {
481
                            int pil = env->interrupt_index & 0xf;
482
                            int type = env->interrupt_index & 0xf0;
483

    
484
                            if (((type == TT_EXTINT) &&
485
                                  cpu_pil_allowed(env, pil)) ||
486
                                  type != TT_EXTINT) {
487
                                env->exception_index = env->interrupt_index;
488
                                do_interrupt(env);
489
                                next_tb = 0;
490
                            }
491
                        }
492
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
493
                        //do_interrupt(0, 0, 0, 0, 0);
494
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
495
                    }
496
#elif defined(TARGET_ARM)
497
                    if (interrupt_request & CPU_INTERRUPT_FIQ
498
                        && !(env->uncached_cpsr & CPSR_F)) {
499
                        env->exception_index = EXCP_FIQ;
500
                        do_interrupt(env);
501
                        next_tb = 0;
502
                    }
503
                    /* ARMv7-M interrupt return works by loading a magic value
504
                       into the PC.  On real hardware the load causes the
505
                       return to occur.  The qemu implementation performs the
506
                       jump normally, then does the exception return when the
507
                       CPU tries to execute code at the magic address.
508
                       This will cause the magic PC value to be pushed to
509
                       the stack if an interrupt occured at the wrong time.
510
                       We avoid this by disabling interrupts when
511
                       pc contains a magic address.  */
512
                    if (interrupt_request & CPU_INTERRUPT_HARD
513
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
514
                            || !(env->uncached_cpsr & CPSR_I))) {
515
                        env->exception_index = EXCP_IRQ;
516
                        do_interrupt(env);
517
                        next_tb = 0;
518
                    }
519
#elif defined(TARGET_UNICORE32)
520
                    if (interrupt_request & CPU_INTERRUPT_HARD
521
                        && !(env->uncached_asr & ASR_I)) {
522
                        do_interrupt(env);
523
                        next_tb = 0;
524
                    }
525
#elif defined(TARGET_SH4)
526
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
527
                        do_interrupt(env);
528
                        next_tb = 0;
529
                    }
530
#elif defined(TARGET_ALPHA)
531
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
532
                        do_interrupt(env);
533
                        next_tb = 0;
534
                    }
535
#elif defined(TARGET_CRIS)
536
                    if (interrupt_request & CPU_INTERRUPT_HARD
537
                        && (env->pregs[PR_CCS] & I_FLAG)
538
                        && !env->locked_irq) {
539
                        env->exception_index = EXCP_IRQ;
540
                        do_interrupt(env);
541
                        next_tb = 0;
542
                    }
543
                    if (interrupt_request & CPU_INTERRUPT_NMI
544
                        && (env->pregs[PR_CCS] & M_FLAG)) {
545
                        env->exception_index = EXCP_NMI;
546
                        do_interrupt(env);
547
                        next_tb = 0;
548
                    }
549
#elif defined(TARGET_M68K)
550
                    if (interrupt_request & CPU_INTERRUPT_HARD
551
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
552
                            < env->pending_level) {
553
                        /* Real hardware gets the interrupt vector via an
554
                           IACK cycle at this point.  Current emulated
555
                           hardware doesn't rely on this, so we
556
                           provide/save the vector when the interrupt is
557
                           first signalled.  */
558
                        env->exception_index = env->pending_vector;
559
                        do_interrupt(1);
560
                        next_tb = 0;
561
                    }
562
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
563
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
564
                        (env->psw.mask & PSW_MASK_EXT)) {
565
                        do_interrupt(env);
566
                        next_tb = 0;
567
                    }
568
#endif
569
                   /* Don't use the cached interupt_request value,
570
                      do_interrupt may have updated the EXITTB flag. */
571
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
572
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
573
                        /* ensure that no TB jump will be modified as
574
                           the program flow was changed */
575
                        next_tb = 0;
576
                    }
577
                }
578
                if (unlikely(env->exit_request)) {
579
                    env->exit_request = 0;
580
                    env->exception_index = EXCP_INTERRUPT;
581
                    cpu_loop_exit();
582
                }
583
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
584
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
585
                    /* restore flags in standard format */
586
#if defined(TARGET_I386)
587
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
588
                    log_cpu_state(env, X86_DUMP_CCOP);
589
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
590
#elif defined(TARGET_M68K)
591
                    cpu_m68k_flush_flags(env, env->cc_op);
592
                    env->cc_op = CC_OP_FLAGS;
593
                    env->sr = (env->sr & 0xffe0)
594
                              | env->cc_dest | (env->cc_x << 4);
595
                    log_cpu_state(env, 0);
596
#else
597
                    log_cpu_state(env, 0);
598
#endif
599
                }
600
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
601
                spin_lock(&tb_lock);
602
                tb = tb_find_fast();
603
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
604
                   doing it in tb_find_slow */
605
                if (tb_invalidated_flag) {
606
                    /* as some TB could have been invalidated because
607
                       of memory exceptions while generating the code, we
608
                       must recompute the hash index here */
609
                    next_tb = 0;
610
                    tb_invalidated_flag = 0;
611
                }
612
#ifdef CONFIG_DEBUG_EXEC
613
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
614
                             (long)tb->tc_ptr, tb->pc,
615
                             lookup_symbol(tb->pc));
616
#endif
617
                /* see if we can patch the calling TB. When the TB
618
                   spans two pages, we cannot safely do a direct
619
                   jump. */
620
                if (next_tb != 0 && tb->page_addr[1] == -1) {
621
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
622
                }
623
                spin_unlock(&tb_lock);
624

    
625
                /* cpu_interrupt might be called while translating the
626
                   TB, but before it is linked into a potentially
627
                   infinite loop and becomes env->current_tb. Avoid
628
                   starting execution if there is a pending interrupt. */
629
                env->current_tb = tb;
630
                barrier();
631
                if (likely(!env->exit_request)) {
632
                    tc_ptr = tb->tc_ptr;
633
                /* execute the generated code */
634
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
635
#undef env
636
                    env = cpu_single_env;
637
#define env cpu_single_env
638
#endif
639
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
640
                    if ((next_tb & 3) == 2) {
641
                        /* Instruction counter expired.  */
642
                        int insns_left;
643
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
644
                        /* Restore PC.  */
645
                        cpu_pc_from_tb(env, tb);
646
                        insns_left = env->icount_decr.u32;
647
                        if (env->icount_extra && insns_left >= 0) {
648
                            /* Refill decrementer and continue execution.  */
649
                            env->icount_extra += insns_left;
650
                            if (env->icount_extra > 0xffff) {
651
                                insns_left = 0xffff;
652
                            } else {
653
                                insns_left = env->icount_extra;
654
                            }
655
                            env->icount_extra -= insns_left;
656
                            env->icount_decr.u16.low = insns_left;
657
                        } else {
658
                            if (insns_left > 0) {
659
                                /* Execute remaining instructions.  */
660
                                cpu_exec_nocache(insns_left, tb);
661
                            }
662
                            env->exception_index = EXCP_INTERRUPT;
663
                            next_tb = 0;
664
                            cpu_loop_exit();
665
                        }
666
                    }
667
                }
668
                env->current_tb = NULL;
669
                /* reset soft MMU for next block (it can currently
670
                   only be set by a memory fault) */
671
            } /* for(;;) */
672
        }
673
    } /* for(;;) */
674

    
675

    
676
#if defined(TARGET_I386)
677
    /* restore flags in standard format */
678
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
679
#elif defined(TARGET_ARM)
680
    /* XXX: Save/restore host fpu exception state?.  */
681
#elif defined(TARGET_UNICORE32)
682
#elif defined(TARGET_SPARC)
683
#elif defined(TARGET_PPC)
684
#elif defined(TARGET_LM32)
685
#elif defined(TARGET_M68K)
686
    cpu_m68k_flush_flags(env, env->cc_op);
687
    env->cc_op = CC_OP_FLAGS;
688
    env->sr = (env->sr & 0xffe0)
689
              | env->cc_dest | (env->cc_x << 4);
690
#elif defined(TARGET_MICROBLAZE)
691
#elif defined(TARGET_MIPS)
692
#elif defined(TARGET_SH4)
693
#elif defined(TARGET_ALPHA)
694
#elif defined(TARGET_CRIS)
695
#elif defined(TARGET_S390X)
696
    /* XXXXX */
697
#else
698
#error unsupported target CPU
699
#endif
700

    
701
    /* restore global registers */
702
    barrier();
703
    env = (void *) saved_env_reg;
704

    
705
    /* fail safe : never use cpu_single_env outside cpu_exec() */
706
    cpu_single_env = NULL;
707
    return ret;
708
}
709

    
710
/* must only be called from the generated code as an exception can be
711
   generated */
712
void tb_invalidate_page_range(target_ulong start, target_ulong end)
713
{
714
    /* XXX: cannot enable it yet because it yields to MMU exception
715
       where NIP != read address on PowerPC */
716
#if 0
717
    target_ulong phys_addr;
718
    phys_addr = get_phys_addr_code(env, start);
719
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
720
#endif
721
}
722

    
723
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
724

    
725
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
726
{
727
    CPUX86State *saved_env;
728

    
729
    saved_env = env;
730
    env = s;
731
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
732
        selector &= 0xffff;
733
        cpu_x86_load_seg_cache(env, seg_reg, selector,
734
                               (selector << 4), 0xffff, 0);
735
    } else {
736
        helper_load_seg(seg_reg, selector);
737
    }
738
    env = saved_env;
739
}
740

    
741
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
742
{
743
    CPUX86State *saved_env;
744

    
745
    saved_env = env;
746
    env = s;
747

    
748
    helper_fsave(ptr, data32);
749

    
750
    env = saved_env;
751
}
752

    
753
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
754
{
755
    CPUX86State *saved_env;
756

    
757
    saved_env = env;
758
    env = s;
759

    
760
    helper_frstor(ptr, data32);
761

    
762
    env = saved_env;
763
}
764

    
765
#endif /* TARGET_I386 */
766

    
767
#if !defined(CONFIG_SOFTMMU)
768

    
769
#if defined(TARGET_I386)
770
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
771
#else
772
#define EXCEPTION_ACTION cpu_loop_exit()
773
#endif
774

    
775
/* 'pc' is the host PC at which the exception was raised. 'address' is
776
   the effective address of the memory exception. 'is_write' is 1 if a
777
   write caused the exception and otherwise 0'. 'old_set' is the
778
   signal set which should be restored */
779
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
780
                                    int is_write, sigset_t *old_set,
781
                                    void *puc)
782
{
783
    TranslationBlock *tb;
784
    int ret;
785

    
786
    if (cpu_single_env)
787
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
788
#if defined(DEBUG_SIGNAL)
789
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
790
                pc, address, is_write, *(unsigned long *)old_set);
791
#endif
792
    /* XXX: locking issue */
793
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
794
        return 1;
795
    }
796

    
797
    /* see if it is an MMU fault */
798
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
799
    if (ret < 0)
800
        return 0; /* not an MMU fault */
801
    if (ret == 0)
802
        return 1; /* the MMU fault was handled without causing real CPU fault */
803
    /* now we have a real cpu fault */
804
    tb = tb_find_pc(pc);
805
    if (tb) {
806
        /* the PC is inside the translated code. It means that we have
807
           a virtual CPU fault */
808
        cpu_restore_state(tb, env, pc);
809
    }
810

    
811
    /* we restore the process signal mask as the sigreturn should
812
       do it (XXX: use sigsetjmp) */
813
    sigprocmask(SIG_SETMASK, old_set, NULL);
814
    EXCEPTION_ACTION;
815

    
816
    /* never comes here */
817
    return 1;
818
}
819

    
820
#if defined(__i386__)
821

    
822
#if defined(__APPLE__)
823
# include <sys/ucontext.h>
824

    
825
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
826
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
827
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
828
# define MASK_sig(context)    ((context)->uc_sigmask)
829
#elif defined (__NetBSD__)
830
# include <ucontext.h>
831

    
832
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
833
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
834
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
835
# define MASK_sig(context)    ((context)->uc_sigmask)
836
#elif defined (__FreeBSD__) || defined(__DragonFly__)
837
# include <ucontext.h>
838

    
839
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
840
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
841
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
842
# define MASK_sig(context)    ((context)->uc_sigmask)
843
#elif defined(__OpenBSD__)
844
# define EIP_sig(context)     ((context)->sc_eip)
845
# define TRAP_sig(context)    ((context)->sc_trapno)
846
# define ERROR_sig(context)   ((context)->sc_err)
847
# define MASK_sig(context)    ((context)->sc_mask)
848
#else
849
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
850
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
851
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
852
# define MASK_sig(context)    ((context)->uc_sigmask)
853
#endif
854

    
855
int cpu_signal_handler(int host_signum, void *pinfo,
856
                       void *puc)
857
{
858
    siginfo_t *info = pinfo;
859
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
860
    ucontext_t *uc = puc;
861
#elif defined(__OpenBSD__)
862
    struct sigcontext *uc = puc;
863
#else
864
    struct ucontext *uc = puc;
865
#endif
866
    unsigned long pc;
867
    int trapno;
868

    
869
#ifndef REG_EIP
870
/* for glibc 2.1 */
871
#define REG_EIP    EIP
872
#define REG_ERR    ERR
873
#define REG_TRAPNO TRAPNO
874
#endif
875
    pc = EIP_sig(uc);
876
    trapno = TRAP_sig(uc);
877
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
878
                             trapno == 0xe ?
879
                             (ERROR_sig(uc) >> 1) & 1 : 0,
880
                             &MASK_sig(uc), puc);
881
}
882

    
883
#elif defined(__x86_64__)
884

    
885
#ifdef __NetBSD__
886
#define PC_sig(context)       _UC_MACHINE_PC(context)
887
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
888
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
889
#define MASK_sig(context)     ((context)->uc_sigmask)
890
#elif defined(__OpenBSD__)
891
#define PC_sig(context)       ((context)->sc_rip)
892
#define TRAP_sig(context)     ((context)->sc_trapno)
893
#define ERROR_sig(context)    ((context)->sc_err)
894
#define MASK_sig(context)     ((context)->sc_mask)
895
#elif defined (__FreeBSD__) || defined(__DragonFly__)
896
#include <ucontext.h>
897

    
898
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
899
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
900
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
901
#define MASK_sig(context)     ((context)->uc_sigmask)
902
#else
903
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
904
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
905
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
906
#define MASK_sig(context)     ((context)->uc_sigmask)
907
#endif
908

    
909
int cpu_signal_handler(int host_signum, void *pinfo,
910
                       void *puc)
911
{
912
    siginfo_t *info = pinfo;
913
    unsigned long pc;
914
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
915
    ucontext_t *uc = puc;
916
#elif defined(__OpenBSD__)
917
    struct sigcontext *uc = puc;
918
#else
919
    struct ucontext *uc = puc;
920
#endif
921

    
922
    pc = PC_sig(uc);
923
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
924
                             TRAP_sig(uc) == 0xe ?
925
                             (ERROR_sig(uc) >> 1) & 1 : 0,
926
                             &MASK_sig(uc), puc);
927
}
928

    
929
#elif defined(_ARCH_PPC)
930

    
931
/***********************************************************************
932
 * signal context platform-specific definitions
933
 * From Wine
934
 */
935
#ifdef linux
936
/* All Registers access - only for local access */
937
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
938
/* Gpr Registers access  */
939
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
940
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
941
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
942
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
943
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
944
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
945
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
946
/* Float Registers access  */
947
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
948
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
949
/* Exception Registers access */
950
# define DAR_sig(context)                        REG_sig(dar, context)
951
# define DSISR_sig(context)                        REG_sig(dsisr, context)
952
# define TRAP_sig(context)                        REG_sig(trap, context)
953
#endif /* linux */
954

    
955
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
956
#include <ucontext.h>
957
# define IAR_sig(context)                ((context)->uc_mcontext.mc_srr0)
958
# define MSR_sig(context)                ((context)->uc_mcontext.mc_srr1)
959
# define CTR_sig(context)                ((context)->uc_mcontext.mc_ctr)
960
# define XER_sig(context)                ((context)->uc_mcontext.mc_xer)
961
# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
962
# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
963
/* Exception Registers access */
964
# define DAR_sig(context)                ((context)->uc_mcontext.mc_dar)
965
# define DSISR_sig(context)                ((context)->uc_mcontext.mc_dsisr)
966
# define TRAP_sig(context)                ((context)->uc_mcontext.mc_exc)
967
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
968

    
969
#ifdef __APPLE__
970
# include <sys/ucontext.h>
971
typedef struct ucontext SIGCONTEXT;
972
/* All Registers access - only for local access */
973
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
974
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
975
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
976
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
977
/* Gpr Registers access */
978
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
979
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
980
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
981
# define CTR_sig(context)                        REG_sig(ctr, context)
982
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
983
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
984
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
985
/* Float Registers access */
986
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
987
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
988
/* Exception Registers access */
989
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
990
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
991
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
992
#endif /* __APPLE__ */
993

    
994
int cpu_signal_handler(int host_signum, void *pinfo,
995
                       void *puc)
996
{
997
    siginfo_t *info = pinfo;
998
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
999
    ucontext_t *uc = puc;
1000
#else
1001
    struct ucontext *uc = puc;
1002
#endif
1003
    unsigned long pc;
1004
    int is_write;
1005

    
1006
    pc = IAR_sig(uc);
1007
    is_write = 0;
1008
#if 0
1009
    /* ppc 4xx case */
1010
    if (DSISR_sig(uc) & 0x00800000)
1011
        is_write = 1;
1012
#else
1013
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1014
        is_write = 1;
1015
#endif
1016
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1017
                             is_write, &uc->uc_sigmask, puc);
1018
}
1019

    
1020
#elif defined(__alpha__)
1021

    
1022
int cpu_signal_handler(int host_signum, void *pinfo,
1023
                           void *puc)
1024
{
1025
    siginfo_t *info = pinfo;
1026
    struct ucontext *uc = puc;
1027
    uint32_t *pc = uc->uc_mcontext.sc_pc;
1028
    uint32_t insn = *pc;
1029
    int is_write = 0;
1030

    
1031
    /* XXX: need kernel patch to get write flag faster */
1032
    switch (insn >> 26) {
1033
    case 0x0d: // stw
1034
    case 0x0e: // stb
1035
    case 0x0f: // stq_u
1036
    case 0x24: // stf
1037
    case 0x25: // stg
1038
    case 0x26: // sts
1039
    case 0x27: // stt
1040
    case 0x2c: // stl
1041
    case 0x2d: // stq
1042
    case 0x2e: // stl_c
1043
    case 0x2f: // stq_c
1044
        is_write = 1;
1045
    }
1046

    
1047
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1048
                             is_write, &uc->uc_sigmask, puc);
1049
}
1050
#elif defined(__sparc__)
1051

    
1052
int cpu_signal_handler(int host_signum, void *pinfo,
1053
                       void *puc)
1054
{
1055
    siginfo_t *info = pinfo;
1056
    int is_write;
1057
    uint32_t insn;
1058
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1059
    uint32_t *regs = (uint32_t *)(info + 1);
1060
    void *sigmask = (regs + 20);
1061
    /* XXX: is there a standard glibc define ? */
1062
    unsigned long pc = regs[1];
1063
#else
1064
#ifdef __linux__
1065
    struct sigcontext *sc = puc;
1066
    unsigned long pc = sc->sigc_regs.tpc;
1067
    void *sigmask = (void *)sc->sigc_mask;
1068
#elif defined(__OpenBSD__)
1069
    struct sigcontext *uc = puc;
1070
    unsigned long pc = uc->sc_pc;
1071
    void *sigmask = (void *)(long)uc->sc_mask;
1072
#endif
1073
#endif
1074

    
1075
    /* XXX: need kernel patch to get write flag faster */
1076
    is_write = 0;
1077
    insn = *(uint32_t *)pc;
1078
    if ((insn >> 30) == 3) {
1079
      switch((insn >> 19) & 0x3f) {
1080
      case 0x05: // stb
1081
      case 0x15: // stba
1082
      case 0x06: // sth
1083
      case 0x16: // stha
1084
      case 0x04: // st
1085
      case 0x14: // sta
1086
      case 0x07: // std
1087
      case 0x17: // stda
1088
      case 0x0e: // stx
1089
      case 0x1e: // stxa
1090
      case 0x24: // stf
1091
      case 0x34: // stfa
1092
      case 0x27: // stdf
1093
      case 0x37: // stdfa
1094
      case 0x26: // stqf
1095
      case 0x36: // stqfa
1096
      case 0x25: // stfsr
1097
      case 0x3c: // casa
1098
      case 0x3e: // casxa
1099
        is_write = 1;
1100
        break;
1101
      }
1102
    }
1103
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1104
                             is_write, sigmask, NULL);
1105
}
1106

    
1107
#elif defined(__arm__)
1108

    
1109
int cpu_signal_handler(int host_signum, void *pinfo,
1110
                       void *puc)
1111
{
1112
    siginfo_t *info = pinfo;
1113
    struct ucontext *uc = puc;
1114
    unsigned long pc;
1115
    int is_write;
1116

    
1117
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1118
    pc = uc->uc_mcontext.gregs[R15];
1119
#else
1120
    pc = uc->uc_mcontext.arm_pc;
1121
#endif
1122
    /* XXX: compute is_write */
1123
    is_write = 0;
1124
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1125
                             is_write,
1126
                             &uc->uc_sigmask, puc);
1127
}
1128

    
1129
#elif defined(__mc68000)
1130

    
1131
int cpu_signal_handler(int host_signum, void *pinfo,
1132
                       void *puc)
1133
{
1134
    siginfo_t *info = pinfo;
1135
    struct ucontext *uc = puc;
1136
    unsigned long pc;
1137
    int is_write;
1138

    
1139
    pc = uc->uc_mcontext.gregs[16];
1140
    /* XXX: compute is_write */
1141
    is_write = 0;
1142
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1143
                             is_write,
1144
                             &uc->uc_sigmask, puc);
1145
}
1146

    
1147
#elif defined(__ia64)
1148

    
1149
#ifndef __ISR_VALID
1150
  /* This ought to be in <bits/siginfo.h>... */
1151
# define __ISR_VALID        1
1152
#endif
1153

    
1154
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1155
{
1156
    siginfo_t *info = pinfo;
1157
    struct ucontext *uc = puc;
1158
    unsigned long ip;
1159
    int is_write = 0;
1160

    
1161
    ip = uc->uc_mcontext.sc_ip;
1162
    switch (host_signum) {
1163
      case SIGILL:
1164
      case SIGFPE:
1165
      case SIGSEGV:
1166
      case SIGBUS:
1167
      case SIGTRAP:
1168
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1169
              /* ISR.W (write-access) is bit 33:  */
1170
              is_write = (info->si_isr >> 33) & 1;
1171
          break;
1172

    
1173
      default:
1174
          break;
1175
    }
1176
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1177
                             is_write,
1178
                             (sigset_t *)&uc->uc_sigmask, puc);
1179
}
1180

    
1181
#elif defined(__s390__)
1182

    
1183
int cpu_signal_handler(int host_signum, void *pinfo,
1184
                       void *puc)
1185
{
1186
    siginfo_t *info = pinfo;
1187
    struct ucontext *uc = puc;
1188
    unsigned long pc;
1189
    uint16_t *pinsn;
1190
    int is_write = 0;
1191

    
1192
    pc = uc->uc_mcontext.psw.addr;
1193

    
1194
    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1195
       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1196
       from the hardware which does in fact contain the is_write value.
1197
       The rt signal handler, as far as I can tell, does not give this value
1198
       at all.  Not that we could get to it from here even if it were.  */
1199
    /* ??? This is not even close to complete, since it ignores all
1200
       of the read-modify-write instructions.  */
1201
    pinsn = (uint16_t *)pc;
1202
    switch (pinsn[0] >> 8) {
1203
    case 0x50: /* ST */
1204
    case 0x42: /* STC */
1205
    case 0x40: /* STH */
1206
        is_write = 1;
1207
        break;
1208
    case 0xc4: /* RIL format insns */
1209
        switch (pinsn[0] & 0xf) {
1210
        case 0xf: /* STRL */
1211
        case 0xb: /* STGRL */
1212
        case 0x7: /* STHRL */
1213
            is_write = 1;
1214
        }
1215
        break;
1216
    case 0xe3: /* RXY format insns */
1217
        switch (pinsn[2] & 0xff) {
1218
        case 0x50: /* STY */
1219
        case 0x24: /* STG */
1220
        case 0x72: /* STCY */
1221
        case 0x70: /* STHY */
1222
        case 0x8e: /* STPQ */
1223
        case 0x3f: /* STRVH */
1224
        case 0x3e: /* STRV */
1225
        case 0x2f: /* STRVG */
1226
            is_write = 1;
1227
        }
1228
        break;
1229
    }
1230
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1231
                             is_write, &uc->uc_sigmask, puc);
1232
}
1233

    
1234
#elif defined(__mips__)
1235

    
1236
int cpu_signal_handler(int host_signum, void *pinfo,
1237
                       void *puc)
1238
{
1239
    siginfo_t *info = pinfo;
1240
    struct ucontext *uc = puc;
1241
    greg_t pc = uc->uc_mcontext.pc;
1242
    int is_write;
1243

    
1244
    /* XXX: compute is_write */
1245
    is_write = 0;
1246
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1247
                             is_write, &uc->uc_sigmask, puc);
1248
}
1249

    
1250
#elif defined(__hppa__)
1251

    
1252
int cpu_signal_handler(int host_signum, void *pinfo,
1253
                       void *puc)
1254
{
1255
    struct siginfo *info = pinfo;
1256
    struct ucontext *uc = puc;
1257
    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1258
    uint32_t insn = *(uint32_t *)pc;
1259
    int is_write = 0;
1260

    
1261
    /* XXX: need kernel patch to get write flag faster.  */
1262
    switch (insn >> 26) {
1263
    case 0x1a: /* STW */
1264
    case 0x19: /* STH */
1265
    case 0x18: /* STB */
1266
    case 0x1b: /* STWM */
1267
        is_write = 1;
1268
        break;
1269

    
1270
    case 0x09: /* CSTWX, FSTWX, FSTWS */
1271
    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1272
        /* Distinguish from coprocessor load ... */
1273
        is_write = (insn >> 9) & 1;
1274
        break;
1275

    
1276
    case 0x03:
1277
        switch ((insn >> 6) & 15) {
1278
        case 0xa: /* STWS */
1279
        case 0x9: /* STHS */
1280
        case 0x8: /* STBS */
1281
        case 0xe: /* STWAS */
1282
        case 0xc: /* STBYS */
1283
            is_write = 1;
1284
        }
1285
        break;
1286
    }
1287

    
1288
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1289
                             is_write, &uc->uc_sigmask, puc);
1290
}
1291

    
1292
#else
1293

    
1294
#error host CPU specific signal handler needed
1295

    
1296
#endif
1297

    
1298
#endif /* !defined(CONFIG_SOFTMMU) */