Statistics
| Branch: | Revision:

root / cpu-exec.c @ eda48c34

History | View | Annotate | Download (43.4 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24
#include "qemu-barrier.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define CONFIG_DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
int qemu_cpu_has_work(CPUState *env)
54
{
55
    return cpu_has_work(env);
56
}
57

    
58
void cpu_loop_exit(void)
59
{
60
    env->current_tb = NULL;
61
    longjmp(env->jmp_env, 1);
62
}
63

    
64
/* exit the current TB from a signal handler. The host registers are
65
   restored in a state compatible with the CPU emulator
66
 */
67
void cpu_resume_from_signal(CPUState *env1, void *puc)
68
{
69
#if !defined(CONFIG_SOFTMMU)
70
#ifdef __linux__
71
    struct ucontext *uc = puc;
72
#elif defined(__OpenBSD__)
73
    struct sigcontext *uc = puc;
74
#endif
75
#endif
76

    
77
    env = env1;
78

    
79
    /* XXX: restore cpu registers saved in host registers */
80

    
81
#if !defined(CONFIG_SOFTMMU)
82
    if (puc) {
83
        /* XXX: use siglongjmp ? */
84
#ifdef __linux__
85
#ifdef __ia64
86
        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87
#else
88
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89
#endif
90
#elif defined(__OpenBSD__)
91
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92
#endif
93
    }
94
#endif
95
    env->exception_index = -1;
96
    longjmp(env->jmp_env, 1);
97
}
98

    
99
/* Execute the code without caching the generated code. An interpreter
100
   could be used if available. */
101
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102
{
103
    unsigned long next_tb;
104
    TranslationBlock *tb;
105

    
106
    /* Should never happen.
107
       We only end up here when an existing TB is too long.  */
108
    if (max_cycles > CF_COUNT_MASK)
109
        max_cycles = CF_COUNT_MASK;
110

    
111
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112
                     max_cycles);
113
    env->current_tb = tb;
114
    /* execute the generated code */
115
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116
    env->current_tb = NULL;
117

    
118
    if ((next_tb & 3) == 2) {
119
        /* Restore PC.  This may happen if async event occurs before
120
           the TB starts executing.  */
121
        cpu_pc_from_tb(env, tb);
122
    }
123
    tb_phys_invalidate(tb, -1);
124
    tb_free(tb);
125
}
126

    
127
static TranslationBlock *tb_find_slow(target_ulong pc,
128
                                      target_ulong cs_base,
129
                                      uint64_t flags)
130
{
131
    TranslationBlock *tb, **ptb1;
132
    unsigned int h;
133
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
134
    target_ulong virt_page2;
135

    
136
    tb_invalidated_flag = 0;
137

    
138
    /* find translated block using physical mappings */
139
    phys_pc = get_page_addr_code(env, pc);
140
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
141
    phys_page2 = -1;
142
    h = tb_phys_hash_func(phys_pc);
143
    ptb1 = &tb_phys_hash[h];
144
    for(;;) {
145
        tb = *ptb1;
146
        if (!tb)
147
            goto not_found;
148
        if (tb->pc == pc &&
149
            tb->page_addr[0] == phys_page1 &&
150
            tb->cs_base == cs_base &&
151
            tb->flags == flags) {
152
            /* check next page if needed */
153
            if (tb->page_addr[1] != -1) {
154
                virt_page2 = (pc & TARGET_PAGE_MASK) +
155
                    TARGET_PAGE_SIZE;
156
                phys_page2 = get_page_addr_code(env, virt_page2);
157
                if (tb->page_addr[1] == phys_page2)
158
                    goto found;
159
            } else {
160
                goto found;
161
            }
162
        }
163
        ptb1 = &tb->phys_hash_next;
164
    }
165
 not_found:
166
   /* if no translated code available, then translate it now */
167
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
168

    
169
 found:
170
    /* Move the last found TB to the head of the list */
171
    if (likely(*ptb1)) {
172
        *ptb1 = tb->phys_hash_next;
173
        tb->phys_hash_next = tb_phys_hash[h];
174
        tb_phys_hash[h] = tb;
175
    }
176
    /* we add the TB in the virtual pc hash table */
177
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178
    return tb;
179
}
180

    
181
static inline TranslationBlock *tb_find_fast(void)
182
{
183
    TranslationBlock *tb;
184
    target_ulong cs_base, pc;
185
    int flags;
186

    
187
    /* we record a subset of the CPU state. It will
188
       always be the same before a given translated block
189
       is executed. */
190
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193
                 tb->flags != flags)) {
194
        tb = tb_find_slow(pc, cs_base, flags);
195
    }
196
    return tb;
197
}
198

    
199
/* main execution loop */
200

    
201
volatile sig_atomic_t exit_request;
202

    
203
int cpu_exec(CPUState *env1)
204
{
205
    volatile host_reg_t saved_env_reg;
206
    int ret, interrupt_request;
207
    TranslationBlock *tb;
208
    uint8_t *tc_ptr;
209
    unsigned long next_tb;
210

    
211
    if (env1->halted) {
212
        if (!cpu_has_work(env1)) {
213
            return EXCP_HALTED;
214
        }
215

    
216
        env1->halted = 0;
217
    }
218

    
219
    cpu_single_env = env1;
220

    
221
    /* the access to env below is actually saving the global register's
222
       value, so that files not including target-xyz/exec.h are free to
223
       use it.  */
224
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
225
    saved_env_reg = (host_reg_t) env;
226
    barrier();
227
    env = env1;
228

    
229
    if (unlikely(exit_request)) {
230
        env->exit_request = 1;
231
    }
232

    
233
#if defined(TARGET_I386)
234
    /* put eflags in CPU temporary format */
235
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
237
    CC_OP = CC_OP_EFLAGS;
238
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
239
#elif defined(TARGET_SPARC)
240
#elif defined(TARGET_M68K)
241
    env->cc_op = CC_OP_FLAGS;
242
    env->cc_dest = env->sr & 0xf;
243
    env->cc_x = (env->sr >> 4) & 1;
244
#elif defined(TARGET_ALPHA)
245
#elif defined(TARGET_ARM)
246
#elif defined(TARGET_PPC)
247
#elif defined(TARGET_LM32)
248
#elif defined(TARGET_MICROBLAZE)
249
#elif defined(TARGET_MIPS)
250
#elif defined(TARGET_SH4)
251
#elif defined(TARGET_CRIS)
252
#elif defined(TARGET_S390X)
253
    /* XXXXX */
254
#else
255
#error unsupported target CPU
256
#endif
257
    env->exception_index = -1;
258

    
259
    /* prepare setjmp context for exception handling */
260
    for(;;) {
261
        if (setjmp(env->jmp_env) == 0) {
262
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
263
#undef env
264
            env = cpu_single_env;
265
#define env cpu_single_env
266
#endif
267
            /* if an exception is pending, we execute it here */
268
            if (env->exception_index >= 0) {
269
                if (env->exception_index >= EXCP_INTERRUPT) {
270
                    /* exit request from the cpu execution loop */
271
                    ret = env->exception_index;
272
                    break;
273
                } else {
274
#if defined(CONFIG_USER_ONLY)
275
                    /* if user mode only, we simulate a fake exception
276
                       which will be handled outside the cpu execution
277
                       loop */
278
#if defined(TARGET_I386)
279
                    do_interrupt_user(env->exception_index,
280
                                      env->exception_is_int,
281
                                      env->error_code,
282
                                      env->exception_next_eip);
283
                    /* successfully delivered */
284
                    env->old_exception = -1;
285
#endif
286
                    ret = env->exception_index;
287
                    break;
288
#else
289
#if defined(TARGET_I386)
290
                    /* simulate a real cpu exception. On i386, it can
291
                       trigger new exceptions, but we do not handle
292
                       double or triple faults yet. */
293
                    do_interrupt(env->exception_index,
294
                                 env->exception_is_int,
295
                                 env->error_code,
296
                                 env->exception_next_eip, 0);
297
                    /* successfully delivered */
298
                    env->old_exception = -1;
299
#elif defined(TARGET_PPC)
300
                    do_interrupt(env);
301
#elif defined(TARGET_LM32)
302
                    do_interrupt(env);
303
#elif defined(TARGET_MICROBLAZE)
304
                    do_interrupt(env);
305
#elif defined(TARGET_MIPS)
306
                    do_interrupt(env);
307
#elif defined(TARGET_SPARC)
308
                    do_interrupt(env);
309
#elif defined(TARGET_ARM)
310
                    do_interrupt(env);
311
#elif defined(TARGET_SH4)
312
                    do_interrupt(env);
313
#elif defined(TARGET_ALPHA)
314
                    do_interrupt(env);
315
#elif defined(TARGET_CRIS)
316
                    do_interrupt(env);
317
#elif defined(TARGET_M68K)
318
                    do_interrupt(0);
319
#endif
320
                    env->exception_index = -1;
321
#endif
322
                }
323
            }
324

    
325
            next_tb = 0; /* force lookup of first TB */
326
            for(;;) {
327
                interrupt_request = env->interrupt_request;
328
                if (unlikely(interrupt_request)) {
329
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
330
                        /* Mask out external interrupts for this step. */
331
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
332
                                               CPU_INTERRUPT_FIQ |
333
                                               CPU_INTERRUPT_SMI |
334
                                               CPU_INTERRUPT_NMI);
335
                    }
336
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
337
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
338
                        env->exception_index = EXCP_DEBUG;
339
                        cpu_loop_exit();
340
                    }
341
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
342
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
343
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32)
344
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
345
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
346
                        env->halted = 1;
347
                        env->exception_index = EXCP_HLT;
348
                        cpu_loop_exit();
349
                    }
350
#endif
351
#if defined(TARGET_I386)
352
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
353
                            svm_check_intercept(SVM_EXIT_INIT);
354
                            do_cpu_init(env);
355
                            env->exception_index = EXCP_HALTED;
356
                            cpu_loop_exit();
357
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
358
                            do_cpu_sipi(env);
359
                    } else if (env->hflags2 & HF2_GIF_MASK) {
360
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
361
                            !(env->hflags & HF_SMM_MASK)) {
362
                            svm_check_intercept(SVM_EXIT_SMI);
363
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
364
                            do_smm_enter();
365
                            next_tb = 0;
366
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
367
                                   !(env->hflags2 & HF2_NMI_MASK)) {
368
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
369
                            env->hflags2 |= HF2_NMI_MASK;
370
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
371
                            next_tb = 0;
372
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
373
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
374
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
375
                            next_tb = 0;
376
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
377
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
378
                                     (env->hflags2 & HF2_HIF_MASK)) ||
379
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
380
                                     (env->eflags & IF_MASK && 
381
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
382
                            int intno;
383
                            svm_check_intercept(SVM_EXIT_INTR);
384
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
385
                            intno = cpu_get_pic_interrupt(env);
386
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
387
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
388
#undef env
389
                    env = cpu_single_env;
390
#define env cpu_single_env
391
#endif
392
                            do_interrupt(intno, 0, 0, 0, 1);
393
                            /* ensure that no TB jump will be modified as
394
                               the program flow was changed */
395
                            next_tb = 0;
396
#if !defined(CONFIG_USER_ONLY)
397
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
398
                                   (env->eflags & IF_MASK) && 
399
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
400
                            int intno;
401
                            /* FIXME: this should respect TPR */
402
                            svm_check_intercept(SVM_EXIT_VINTR);
403
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
404
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
405
                            do_interrupt(intno, 0, 0, 0, 1);
406
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
407
                            next_tb = 0;
408
#endif
409
                        }
410
                    }
411
#elif defined(TARGET_PPC)
412
#if 0
413
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
414
                        cpu_reset(env);
415
                    }
416
#endif
417
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
418
                        ppc_hw_interrupt(env);
419
                        if (env->pending_interrupts == 0)
420
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
421
                        next_tb = 0;
422
                    }
423
#elif defined(TARGET_LM32)
424
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
425
                        && (env->ie & IE_IE)) {
426
                        env->exception_index = EXCP_IRQ;
427
                        do_interrupt(env);
428
                        next_tb = 0;
429
                    }
430
#elif defined(TARGET_MICROBLAZE)
431
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
432
                        && (env->sregs[SR_MSR] & MSR_IE)
433
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
434
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
435
                        env->exception_index = EXCP_IRQ;
436
                        do_interrupt(env);
437
                        next_tb = 0;
438
                    }
439
#elif defined(TARGET_MIPS)
440
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
441
                        cpu_mips_hw_interrupts_pending(env)) {
442
                        /* Raise it */
443
                        env->exception_index = EXCP_EXT_INTERRUPT;
444
                        env->error_code = 0;
445
                        do_interrupt(env);
446
                        next_tb = 0;
447
                    }
448
#elif defined(TARGET_SPARC)
449
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
450
                        if (cpu_interrupts_enabled(env) &&
451
                            env->interrupt_index > 0) {
452
                            int pil = env->interrupt_index & 0xf;
453
                            int type = env->interrupt_index & 0xf0;
454

    
455
                            if (((type == TT_EXTINT) &&
456
                                  cpu_pil_allowed(env, pil)) ||
457
                                  type != TT_EXTINT) {
458
                                env->exception_index = env->interrupt_index;
459
                                do_interrupt(env);
460
                                next_tb = 0;
461
                            }
462
                        }
463
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
464
                        //do_interrupt(0, 0, 0, 0, 0);
465
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
466
                    }
467
#elif defined(TARGET_ARM)
468
                    if (interrupt_request & CPU_INTERRUPT_FIQ
469
                        && !(env->uncached_cpsr & CPSR_F)) {
470
                        env->exception_index = EXCP_FIQ;
471
                        do_interrupt(env);
472
                        next_tb = 0;
473
                    }
474
                    /* ARMv7-M interrupt return works by loading a magic value
475
                       into the PC.  On real hardware the load causes the
476
                       return to occur.  The qemu implementation performs the
477
                       jump normally, then does the exception return when the
478
                       CPU tries to execute code at the magic address.
479
                       This will cause the magic PC value to be pushed to
480
                       the stack if an interrupt occured at the wrong time.
481
                       We avoid this by disabling interrupts when
482
                       pc contains a magic address.  */
483
                    if (interrupt_request & CPU_INTERRUPT_HARD
484
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
485
                            || !(env->uncached_cpsr & CPSR_I))) {
486
                        env->exception_index = EXCP_IRQ;
487
                        do_interrupt(env);
488
                        next_tb = 0;
489
                    }
490
#elif defined(TARGET_SH4)
491
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
492
                        do_interrupt(env);
493
                        next_tb = 0;
494
                    }
495
#elif defined(TARGET_ALPHA)
496
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
497
                        do_interrupt(env);
498
                        next_tb = 0;
499
                    }
500
#elif defined(TARGET_CRIS)
501
                    if (interrupt_request & CPU_INTERRUPT_HARD
502
                        && (env->pregs[PR_CCS] & I_FLAG)
503
                        && !env->locked_irq) {
504
                        env->exception_index = EXCP_IRQ;
505
                        do_interrupt(env);
506
                        next_tb = 0;
507
                    }
508
                    if (interrupt_request & CPU_INTERRUPT_NMI
509
                        && (env->pregs[PR_CCS] & M_FLAG)) {
510
                        env->exception_index = EXCP_NMI;
511
                        do_interrupt(env);
512
                        next_tb = 0;
513
                    }
514
#elif defined(TARGET_M68K)
515
                    if (interrupt_request & CPU_INTERRUPT_HARD
516
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
517
                            < env->pending_level) {
518
                        /* Real hardware gets the interrupt vector via an
519
                           IACK cycle at this point.  Current emulated
520
                           hardware doesn't rely on this, so we
521
                           provide/save the vector when the interrupt is
522
                           first signalled.  */
523
                        env->exception_index = env->pending_vector;
524
                        do_interrupt(1);
525
                        next_tb = 0;
526
                    }
527
#endif
528
                   /* Don't use the cached interupt_request value,
529
                      do_interrupt may have updated the EXITTB flag. */
530
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
531
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
532
                        /* ensure that no TB jump will be modified as
533
                           the program flow was changed */
534
                        next_tb = 0;
535
                    }
536
                }
537
                if (unlikely(env->exit_request)) {
538
                    env->exit_request = 0;
539
                    env->exception_index = EXCP_INTERRUPT;
540
                    cpu_loop_exit();
541
                }
542
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
543
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
544
                    /* restore flags in standard format */
545
#if defined(TARGET_I386)
546
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
547
                    log_cpu_state(env, X86_DUMP_CCOP);
548
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
549
#elif defined(TARGET_M68K)
550
                    cpu_m68k_flush_flags(env, env->cc_op);
551
                    env->cc_op = CC_OP_FLAGS;
552
                    env->sr = (env->sr & 0xffe0)
553
                              | env->cc_dest | (env->cc_x << 4);
554
                    log_cpu_state(env, 0);
555
#else
556
                    log_cpu_state(env, 0);
557
#endif
558
                }
559
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
560
                spin_lock(&tb_lock);
561
                tb = tb_find_fast();
562
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
563
                   doing it in tb_find_slow */
564
                if (tb_invalidated_flag) {
565
                    /* as some TB could have been invalidated because
566
                       of memory exceptions while generating the code, we
567
                       must recompute the hash index here */
568
                    next_tb = 0;
569
                    tb_invalidated_flag = 0;
570
                }
571
#ifdef CONFIG_DEBUG_EXEC
572
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
573
                             (long)tb->tc_ptr, tb->pc,
574
                             lookup_symbol(tb->pc));
575
#endif
576
                /* see if we can patch the calling TB. When the TB
577
                   spans two pages, we cannot safely do a direct
578
                   jump. */
579
                if (next_tb != 0 && tb->page_addr[1] == -1) {
580
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
581
                }
582
                spin_unlock(&tb_lock);
583

    
584
                /* cpu_interrupt might be called while translating the
585
                   TB, but before it is linked into a potentially
586
                   infinite loop and becomes env->current_tb. Avoid
587
                   starting execution if there is a pending interrupt. */
588
                env->current_tb = tb;
589
                barrier();
590
                if (likely(!env->exit_request)) {
591
                    tc_ptr = tb->tc_ptr;
592
                /* execute the generated code */
593
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
594
#undef env
595
                    env = cpu_single_env;
596
#define env cpu_single_env
597
#endif
598
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
599
                    if ((next_tb & 3) == 2) {
600
                        /* Instruction counter expired.  */
601
                        int insns_left;
602
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
603
                        /* Restore PC.  */
604
                        cpu_pc_from_tb(env, tb);
605
                        insns_left = env->icount_decr.u32;
606
                        if (env->icount_extra && insns_left >= 0) {
607
                            /* Refill decrementer and continue execution.  */
608
                            env->icount_extra += insns_left;
609
                            if (env->icount_extra > 0xffff) {
610
                                insns_left = 0xffff;
611
                            } else {
612
                                insns_left = env->icount_extra;
613
                            }
614
                            env->icount_extra -= insns_left;
615
                            env->icount_decr.u16.low = insns_left;
616
                        } else {
617
                            if (insns_left > 0) {
618
                                /* Execute remaining instructions.  */
619
                                cpu_exec_nocache(insns_left, tb);
620
                            }
621
                            env->exception_index = EXCP_INTERRUPT;
622
                            next_tb = 0;
623
                            cpu_loop_exit();
624
                        }
625
                    }
626
                }
627
                env->current_tb = NULL;
628
                /* reset soft MMU for next block (it can currently
629
                   only be set by a memory fault) */
630
            } /* for(;;) */
631
        }
632
    } /* for(;;) */
633

    
634

    
635
#if defined(TARGET_I386)
636
    /* restore flags in standard format */
637
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
638
#elif defined(TARGET_ARM)
639
    /* XXX: Save/restore host fpu exception state?.  */
640
#elif defined(TARGET_SPARC)
641
#elif defined(TARGET_PPC)
642
#elif defined(TARGET_LM32)
643
#elif defined(TARGET_M68K)
644
    cpu_m68k_flush_flags(env, env->cc_op);
645
    env->cc_op = CC_OP_FLAGS;
646
    env->sr = (env->sr & 0xffe0)
647
              | env->cc_dest | (env->cc_x << 4);
648
#elif defined(TARGET_MICROBLAZE)
649
#elif defined(TARGET_MIPS)
650
#elif defined(TARGET_SH4)
651
#elif defined(TARGET_ALPHA)
652
#elif defined(TARGET_CRIS)
653
#elif defined(TARGET_S390X)
654
    /* XXXXX */
655
#else
656
#error unsupported target CPU
657
#endif
658

    
659
    /* restore global registers */
660
    barrier();
661
    env = (void *) saved_env_reg;
662

    
663
    /* fail safe : never use cpu_single_env outside cpu_exec() */
664
    cpu_single_env = NULL;
665
    return ret;
666
}
667

    
668
/* must only be called from the generated code as an exception can be
669
   generated */
670
void tb_invalidate_page_range(target_ulong start, target_ulong end)
671
{
672
    /* XXX: cannot enable it yet because it yields to MMU exception
673
       where NIP != read address on PowerPC */
674
#if 0
675
    target_ulong phys_addr;
676
    phys_addr = get_phys_addr_code(env, start);
677
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
678
#endif
679
}
680

    
681
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
682

    
683
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
684
{
685
    CPUX86State *saved_env;
686

    
687
    saved_env = env;
688
    env = s;
689
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
690
        selector &= 0xffff;
691
        cpu_x86_load_seg_cache(env, seg_reg, selector,
692
                               (selector << 4), 0xffff, 0);
693
    } else {
694
        helper_load_seg(seg_reg, selector);
695
    }
696
    env = saved_env;
697
}
698

    
699
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
700
{
701
    CPUX86State *saved_env;
702

    
703
    saved_env = env;
704
    env = s;
705

    
706
    helper_fsave(ptr, data32);
707

    
708
    env = saved_env;
709
}
710

    
711
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
712
{
713
    CPUX86State *saved_env;
714

    
715
    saved_env = env;
716
    env = s;
717

    
718
    helper_frstor(ptr, data32);
719

    
720
    env = saved_env;
721
}
722

    
723
#endif /* TARGET_I386 */
724

    
725
#if !defined(CONFIG_SOFTMMU)
726

    
727
#if defined(TARGET_I386)
728
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
729
#else
730
#define EXCEPTION_ACTION cpu_loop_exit()
731
#endif
732

    
733
/* 'pc' is the host PC at which the exception was raised. 'address' is
734
   the effective address of the memory exception. 'is_write' is 1 if a
735
   write caused the exception and otherwise 0'. 'old_set' is the
736
   signal set which should be restored */
737
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
738
                                    int is_write, sigset_t *old_set,
739
                                    void *puc)
740
{
741
    TranslationBlock *tb;
742
    int ret;
743

    
744
    if (cpu_single_env)
745
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
746
#if defined(DEBUG_SIGNAL)
747
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
748
                pc, address, is_write, *(unsigned long *)old_set);
749
#endif
750
    /* XXX: locking issue */
751
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
752
        return 1;
753
    }
754

    
755
    /* see if it is an MMU fault */
756
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
757
    if (ret < 0)
758
        return 0; /* not an MMU fault */
759
    if (ret == 0)
760
        return 1; /* the MMU fault was handled without causing real CPU fault */
761
    /* now we have a real cpu fault */
762
    tb = tb_find_pc(pc);
763
    if (tb) {
764
        /* the PC is inside the translated code. It means that we have
765
           a virtual CPU fault */
766
        cpu_restore_state(tb, env, pc, puc);
767
    }
768

    
769
    /* we restore the process signal mask as the sigreturn should
770
       do it (XXX: use sigsetjmp) */
771
    sigprocmask(SIG_SETMASK, old_set, NULL);
772
    EXCEPTION_ACTION;
773

    
774
    /* never comes here */
775
    return 1;
776
}
777

    
778
#if defined(__i386__)
779

    
780
#if defined(__APPLE__)
781
# include <sys/ucontext.h>
782

    
783
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
784
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
785
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
786
# define MASK_sig(context)    ((context)->uc_sigmask)
787
#elif defined (__NetBSD__)
788
# include <ucontext.h>
789

    
790
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
791
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
792
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
793
# define MASK_sig(context)    ((context)->uc_sigmask)
794
#elif defined (__FreeBSD__) || defined(__DragonFly__)
795
# include <ucontext.h>
796

    
797
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
798
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
799
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
800
# define MASK_sig(context)    ((context)->uc_sigmask)
801
#elif defined(__OpenBSD__)
802
# define EIP_sig(context)     ((context)->sc_eip)
803
# define TRAP_sig(context)    ((context)->sc_trapno)
804
# define ERROR_sig(context)   ((context)->sc_err)
805
# define MASK_sig(context)    ((context)->sc_mask)
806
#else
807
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
808
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
809
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
810
# define MASK_sig(context)    ((context)->uc_sigmask)
811
#endif
812

    
813
int cpu_signal_handler(int host_signum, void *pinfo,
814
                       void *puc)
815
{
816
    siginfo_t *info = pinfo;
817
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
818
    ucontext_t *uc = puc;
819
#elif defined(__OpenBSD__)
820
    struct sigcontext *uc = puc;
821
#else
822
    struct ucontext *uc = puc;
823
#endif
824
    unsigned long pc;
825
    int trapno;
826

    
827
#ifndef REG_EIP
828
/* for glibc 2.1 */
829
#define REG_EIP    EIP
830
#define REG_ERR    ERR
831
#define REG_TRAPNO TRAPNO
832
#endif
833
    pc = EIP_sig(uc);
834
    trapno = TRAP_sig(uc);
835
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
836
                             trapno == 0xe ?
837
                             (ERROR_sig(uc) >> 1) & 1 : 0,
838
                             &MASK_sig(uc), puc);
839
}
840

    
841
#elif defined(__x86_64__)
842

    
843
#ifdef __NetBSD__
844
#define PC_sig(context)       _UC_MACHINE_PC(context)
845
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
846
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
847
#define MASK_sig(context)     ((context)->uc_sigmask)
848
#elif defined(__OpenBSD__)
849
#define PC_sig(context)       ((context)->sc_rip)
850
#define TRAP_sig(context)     ((context)->sc_trapno)
851
#define ERROR_sig(context)    ((context)->sc_err)
852
#define MASK_sig(context)     ((context)->sc_mask)
853
#elif defined (__FreeBSD__) || defined(__DragonFly__)
854
#include <ucontext.h>
855

    
856
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
857
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
858
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
859
#define MASK_sig(context)     ((context)->uc_sigmask)
860
#else
861
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
862
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
863
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
864
#define MASK_sig(context)     ((context)->uc_sigmask)
865
#endif
866

    
867
int cpu_signal_handler(int host_signum, void *pinfo,
868
                       void *puc)
869
{
870
    siginfo_t *info = pinfo;
871
    unsigned long pc;
872
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
873
    ucontext_t *uc = puc;
874
#elif defined(__OpenBSD__)
875
    struct sigcontext *uc = puc;
876
#else
877
    struct ucontext *uc = puc;
878
#endif
879

    
880
    pc = PC_sig(uc);
881
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
882
                             TRAP_sig(uc) == 0xe ?
883
                             (ERROR_sig(uc) >> 1) & 1 : 0,
884
                             &MASK_sig(uc), puc);
885
}
886

    
887
#elif defined(_ARCH_PPC)
888

    
889
/***********************************************************************
890
 * signal context platform-specific definitions
891
 * From Wine
892
 */
893
#ifdef linux
894
/* All Registers access - only for local access */
895
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
896
/* Gpr Registers access  */
897
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
898
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
899
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
900
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
901
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
902
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
903
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
904
/* Float Registers access  */
905
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
906
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
907
/* Exception Registers access */
908
# define DAR_sig(context)                        REG_sig(dar, context)
909
# define DSISR_sig(context)                        REG_sig(dsisr, context)
910
# define TRAP_sig(context)                        REG_sig(trap, context)
911
#endif /* linux */
912

    
913
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
914
#include <ucontext.h>
915
# define IAR_sig(context)                ((context)->uc_mcontext.mc_srr0)
916
# define MSR_sig(context)                ((context)->uc_mcontext.mc_srr1)
917
# define CTR_sig(context)                ((context)->uc_mcontext.mc_ctr)
918
# define XER_sig(context)                ((context)->uc_mcontext.mc_xer)
919
# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
920
# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
921
/* Exception Registers access */
922
# define DAR_sig(context)                ((context)->uc_mcontext.mc_dar)
923
# define DSISR_sig(context)                ((context)->uc_mcontext.mc_dsisr)
924
# define TRAP_sig(context)                ((context)->uc_mcontext.mc_exc)
925
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
926

    
927
#ifdef __APPLE__
928
# include <sys/ucontext.h>
929
typedef struct ucontext SIGCONTEXT;
930
/* All Registers access - only for local access */
931
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
932
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
933
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
934
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
935
/* Gpr Registers access */
936
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
937
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
938
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
939
# define CTR_sig(context)                        REG_sig(ctr, context)
940
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
941
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
942
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
943
/* Float Registers access */
944
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
945
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
946
/* Exception Registers access */
947
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
948
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
949
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
950
#endif /* __APPLE__ */
951

    
952
int cpu_signal_handler(int host_signum, void *pinfo,
953
                       void *puc)
954
{
955
    siginfo_t *info = pinfo;
956
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
957
    ucontext_t *uc = puc;
958
#else
959
    struct ucontext *uc = puc;
960
#endif
961
    unsigned long pc;
962
    int is_write;
963

    
964
    pc = IAR_sig(uc);
965
    is_write = 0;
966
#if 0
967
    /* ppc 4xx case */
968
    if (DSISR_sig(uc) & 0x00800000)
969
        is_write = 1;
970
#else
971
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
972
        is_write = 1;
973
#endif
974
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
975
                             is_write, &uc->uc_sigmask, puc);
976
}
977

    
978
#elif defined(__alpha__)
979

    
980
int cpu_signal_handler(int host_signum, void *pinfo,
981
                           void *puc)
982
{
983
    siginfo_t *info = pinfo;
984
    struct ucontext *uc = puc;
985
    uint32_t *pc = uc->uc_mcontext.sc_pc;
986
    uint32_t insn = *pc;
987
    int is_write = 0;
988

    
989
    /* XXX: need kernel patch to get write flag faster */
990
    switch (insn >> 26) {
991
    case 0x0d: // stw
992
    case 0x0e: // stb
993
    case 0x0f: // stq_u
994
    case 0x24: // stf
995
    case 0x25: // stg
996
    case 0x26: // sts
997
    case 0x27: // stt
998
    case 0x2c: // stl
999
    case 0x2d: // stq
1000
    case 0x2e: // stl_c
1001
    case 0x2f: // stq_c
1002
        is_write = 1;
1003
    }
1004

    
1005
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1006
                             is_write, &uc->uc_sigmask, puc);
1007
}
1008
#elif defined(__sparc__)
1009

    
1010
int cpu_signal_handler(int host_signum, void *pinfo,
1011
                       void *puc)
1012
{
1013
    siginfo_t *info = pinfo;
1014
    int is_write;
1015
    uint32_t insn;
1016
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1017
    uint32_t *regs = (uint32_t *)(info + 1);
1018
    void *sigmask = (regs + 20);
1019
    /* XXX: is there a standard glibc define ? */
1020
    unsigned long pc = regs[1];
1021
#else
1022
#ifdef __linux__
1023
    struct sigcontext *sc = puc;
1024
    unsigned long pc = sc->sigc_regs.tpc;
1025
    void *sigmask = (void *)sc->sigc_mask;
1026
#elif defined(__OpenBSD__)
1027
    struct sigcontext *uc = puc;
1028
    unsigned long pc = uc->sc_pc;
1029
    void *sigmask = (void *)(long)uc->sc_mask;
1030
#endif
1031
#endif
1032

    
1033
    /* XXX: need kernel patch to get write flag faster */
1034
    is_write = 0;
1035
    insn = *(uint32_t *)pc;
1036
    if ((insn >> 30) == 3) {
1037
      switch((insn >> 19) & 0x3f) {
1038
      case 0x05: // stb
1039
      case 0x15: // stba
1040
      case 0x06: // sth
1041
      case 0x16: // stha
1042
      case 0x04: // st
1043
      case 0x14: // sta
1044
      case 0x07: // std
1045
      case 0x17: // stda
1046
      case 0x0e: // stx
1047
      case 0x1e: // stxa
1048
      case 0x24: // stf
1049
      case 0x34: // stfa
1050
      case 0x27: // stdf
1051
      case 0x37: // stdfa
1052
      case 0x26: // stqf
1053
      case 0x36: // stqfa
1054
      case 0x25: // stfsr
1055
      case 0x3c: // casa
1056
      case 0x3e: // casxa
1057
        is_write = 1;
1058
        break;
1059
      }
1060
    }
1061
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1062
                             is_write, sigmask, NULL);
1063
}
1064

    
1065
#elif defined(__arm__)
1066

    
1067
int cpu_signal_handler(int host_signum, void *pinfo,
1068
                       void *puc)
1069
{
1070
    siginfo_t *info = pinfo;
1071
    struct ucontext *uc = puc;
1072
    unsigned long pc;
1073
    int is_write;
1074

    
1075
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1076
    pc = uc->uc_mcontext.gregs[R15];
1077
#else
1078
    pc = uc->uc_mcontext.arm_pc;
1079
#endif
1080
    /* XXX: compute is_write */
1081
    is_write = 0;
1082
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1083
                             is_write,
1084
                             &uc->uc_sigmask, puc);
1085
}
1086

    
1087
#elif defined(__mc68000)
1088

    
1089
int cpu_signal_handler(int host_signum, void *pinfo,
1090
                       void *puc)
1091
{
1092
    siginfo_t *info = pinfo;
1093
    struct ucontext *uc = puc;
1094
    unsigned long pc;
1095
    int is_write;
1096

    
1097
    pc = uc->uc_mcontext.gregs[16];
1098
    /* XXX: compute is_write */
1099
    is_write = 0;
1100
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1101
                             is_write,
1102
                             &uc->uc_sigmask, puc);
1103
}
1104

    
1105
#elif defined(__ia64)
1106

    
1107
#ifndef __ISR_VALID
1108
  /* This ought to be in <bits/siginfo.h>... */
1109
# define __ISR_VALID        1
1110
#endif
1111

    
1112
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1113
{
1114
    siginfo_t *info = pinfo;
1115
    struct ucontext *uc = puc;
1116
    unsigned long ip;
1117
    int is_write = 0;
1118

    
1119
    ip = uc->uc_mcontext.sc_ip;
1120
    switch (host_signum) {
1121
      case SIGILL:
1122
      case SIGFPE:
1123
      case SIGSEGV:
1124
      case SIGBUS:
1125
      case SIGTRAP:
1126
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1127
              /* ISR.W (write-access) is bit 33:  */
1128
              is_write = (info->si_isr >> 33) & 1;
1129
          break;
1130

    
1131
      default:
1132
          break;
1133
    }
1134
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1135
                             is_write,
1136
                             (sigset_t *)&uc->uc_sigmask, puc);
1137
}
1138

    
1139
#elif defined(__s390__)
1140

    
1141
int cpu_signal_handler(int host_signum, void *pinfo,
1142
                       void *puc)
1143
{
1144
    siginfo_t *info = pinfo;
1145
    struct ucontext *uc = puc;
1146
    unsigned long pc;
1147
    uint16_t *pinsn;
1148
    int is_write = 0;
1149

    
1150
    pc = uc->uc_mcontext.psw.addr;
1151

    
1152
    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1153
       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1154
       from the hardware which does in fact contain the is_write value.
1155
       The rt signal handler, as far as I can tell, does not give this value
1156
       at all.  Not that we could get to it from here even if it were.  */
1157
    /* ??? This is not even close to complete, since it ignores all
1158
       of the read-modify-write instructions.  */
1159
    pinsn = (uint16_t *)pc;
1160
    switch (pinsn[0] >> 8) {
1161
    case 0x50: /* ST */
1162
    case 0x42: /* STC */
1163
    case 0x40: /* STH */
1164
        is_write = 1;
1165
        break;
1166
    case 0xc4: /* RIL format insns */
1167
        switch (pinsn[0] & 0xf) {
1168
        case 0xf: /* STRL */
1169
        case 0xb: /* STGRL */
1170
        case 0x7: /* STHRL */
1171
            is_write = 1;
1172
        }
1173
        break;
1174
    case 0xe3: /* RXY format insns */
1175
        switch (pinsn[2] & 0xff) {
1176
        case 0x50: /* STY */
1177
        case 0x24: /* STG */
1178
        case 0x72: /* STCY */
1179
        case 0x70: /* STHY */
1180
        case 0x8e: /* STPQ */
1181
        case 0x3f: /* STRVH */
1182
        case 0x3e: /* STRV */
1183
        case 0x2f: /* STRVG */
1184
            is_write = 1;
1185
        }
1186
        break;
1187
    }
1188
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1189
                             is_write, &uc->uc_sigmask, puc);
1190
}
1191

    
1192
#elif defined(__mips__)
1193

    
1194
int cpu_signal_handler(int host_signum, void *pinfo,
1195
                       void *puc)
1196
{
1197
    siginfo_t *info = pinfo;
1198
    struct ucontext *uc = puc;
1199
    greg_t pc = uc->uc_mcontext.pc;
1200
    int is_write;
1201

    
1202
    /* XXX: compute is_write */
1203
    is_write = 0;
1204
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1205
                             is_write, &uc->uc_sigmask, puc);
1206
}
1207

    
1208
#elif defined(__hppa__)
1209

    
1210
int cpu_signal_handler(int host_signum, void *pinfo,
1211
                       void *puc)
1212
{
1213
    struct siginfo *info = pinfo;
1214
    struct ucontext *uc = puc;
1215
    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1216
    uint32_t insn = *(uint32_t *)pc;
1217
    int is_write = 0;
1218

    
1219
    /* XXX: need kernel patch to get write flag faster.  */
1220
    switch (insn >> 26) {
1221
    case 0x1a: /* STW */
1222
    case 0x19: /* STH */
1223
    case 0x18: /* STB */
1224
    case 0x1b: /* STWM */
1225
        is_write = 1;
1226
        break;
1227

    
1228
    case 0x09: /* CSTWX, FSTWX, FSTWS */
1229
    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1230
        /* Distinguish from coprocessor load ... */
1231
        is_write = (insn >> 9) & 1;
1232
        break;
1233

    
1234
    case 0x03:
1235
        switch ((insn >> 6) & 15) {
1236
        case 0xa: /* STWS */
1237
        case 0x9: /* STHS */
1238
        case 0x8: /* STBS */
1239
        case 0xe: /* STWAS */
1240
        case 0xc: /* STBYS */
1241
            is_write = 1;
1242
        }
1243
        break;
1244
    }
1245

    
1246
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1247
                             is_write, &uc->uc_sigmask, puc);
1248
}
1249

    
1250
#else
1251

    
1252
#error host CPU specific signal handler needed
1253

    
1254
#endif
1255

    
1256
#endif /* !defined(CONFIG_SOFTMMU) */