Statistics
| Branch: | Revision:

root / cpu-exec.c @ b651fc6f

History | View | Annotate | Download (43.4 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24
#include "qemu-barrier.h"
25

    
26
#if !defined(CONFIG_SOFTMMU)
27
#undef EAX
28
#undef ECX
29
#undef EDX
30
#undef EBX
31
#undef ESP
32
#undef EBP
33
#undef ESI
34
#undef EDI
35
#undef EIP
36
#include <signal.h>
37
#ifdef __linux__
38
#include <sys/ucontext.h>
39
#endif
40
#endif
41

    
42
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43
// Work around ugly bugs in glibc that mangle global register contents
44
#undef env
45
#define env cpu_single_env
46
#endif
47

    
48
int tb_invalidated_flag;
49

    
50
//#define CONFIG_DEBUG_EXEC
51
//#define DEBUG_SIGNAL
52

    
53
int qemu_cpu_has_work(CPUState *env)
54
{
55
    return cpu_has_work(env);
56
}
57

    
58
void cpu_loop_exit(void)
59
{
60
    env->current_tb = NULL;
61
    longjmp(env->jmp_env, 1);
62
}
63

    
64
/* exit the current TB from a signal handler. The host registers are
65
   restored in a state compatible with the CPU emulator
66
 */
67
void cpu_resume_from_signal(CPUState *env1, void *puc)
68
{
69
#if !defined(CONFIG_SOFTMMU)
70
#ifdef __linux__
71
    struct ucontext *uc = puc;
72
#elif defined(__OpenBSD__)
73
    struct sigcontext *uc = puc;
74
#endif
75
#endif
76

    
77
    env = env1;
78

    
79
    /* XXX: restore cpu registers saved in host registers */
80

    
81
#if !defined(CONFIG_SOFTMMU)
82
    if (puc) {
83
        /* XXX: use siglongjmp ? */
84
#ifdef __linux__
85
#ifdef __ia64
86
        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87
#else
88
        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89
#endif
90
#elif defined(__OpenBSD__)
91
        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92
#endif
93
    }
94
#endif
95
    env->exception_index = -1;
96
    longjmp(env->jmp_env, 1);
97
}
98

    
99
/* Execute the code without caching the generated code. An interpreter
100
   could be used if available. */
101
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102
{
103
    unsigned long next_tb;
104
    TranslationBlock *tb;
105

    
106
    /* Should never happen.
107
       We only end up here when an existing TB is too long.  */
108
    if (max_cycles > CF_COUNT_MASK)
109
        max_cycles = CF_COUNT_MASK;
110

    
111
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112
                     max_cycles);
113
    env->current_tb = tb;
114
    /* execute the generated code */
115
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116
    env->current_tb = NULL;
117

    
118
    if ((next_tb & 3) == 2) {
119
        /* Restore PC.  This may happen if async event occurs before
120
           the TB starts executing.  */
121
        cpu_pc_from_tb(env, tb);
122
    }
123
    tb_phys_invalidate(tb, -1);
124
    tb_free(tb);
125
}
126

    
127
static TranslationBlock *tb_find_slow(target_ulong pc,
128
                                      target_ulong cs_base,
129
                                      uint64_t flags)
130
{
131
    TranslationBlock *tb, **ptb1;
132
    unsigned int h;
133
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
134
    target_ulong virt_page2;
135

    
136
    tb_invalidated_flag = 0;
137

    
138
    /* find translated block using physical mappings */
139
    phys_pc = get_page_addr_code(env, pc);
140
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
141
    phys_page2 = -1;
142
    h = tb_phys_hash_func(phys_pc);
143
    ptb1 = &tb_phys_hash[h];
144
    for(;;) {
145
        tb = *ptb1;
146
        if (!tb)
147
            goto not_found;
148
        if (tb->pc == pc &&
149
            tb->page_addr[0] == phys_page1 &&
150
            tb->cs_base == cs_base &&
151
            tb->flags == flags) {
152
            /* check next page if needed */
153
            if (tb->page_addr[1] != -1) {
154
                virt_page2 = (pc & TARGET_PAGE_MASK) +
155
                    TARGET_PAGE_SIZE;
156
                phys_page2 = get_page_addr_code(env, virt_page2);
157
                if (tb->page_addr[1] == phys_page2)
158
                    goto found;
159
            } else {
160
                goto found;
161
            }
162
        }
163
        ptb1 = &tb->phys_hash_next;
164
    }
165
 not_found:
166
   /* if no translated code available, then translate it now */
167
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
168

    
169
 found:
170
    /* Move the last found TB to the head of the list */
171
    if (likely(*ptb1)) {
172
        *ptb1 = tb->phys_hash_next;
173
        tb->phys_hash_next = tb_phys_hash[h];
174
        tb_phys_hash[h] = tb;
175
    }
176
    /* we add the TB in the virtual pc hash table */
177
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178
    return tb;
179
}
180

    
181
static inline TranslationBlock *tb_find_fast(void)
182
{
183
    TranslationBlock *tb;
184
    target_ulong cs_base, pc;
185
    int flags;
186

    
187
    /* we record a subset of the CPU state. It will
188
       always be the same before a given translated block
189
       is executed. */
190
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193
                 tb->flags != flags)) {
194
        tb = tb_find_slow(pc, cs_base, flags);
195
    }
196
    return tb;
197
}
198

    
199
/* main execution loop */
200

    
201
volatile sig_atomic_t exit_request;
202

    
203
int cpu_exec(CPUState *env1)
204
{
205
    volatile host_reg_t saved_env_reg;
206
    int ret, interrupt_request;
207
    TranslationBlock *tb;
208
    uint8_t *tc_ptr;
209
    unsigned long next_tb;
210

    
211
    if (cpu_halted(env1) == EXCP_HALTED)
212
        return EXCP_HALTED;
213

    
214
    cpu_single_env = env1;
215

    
216
    /* the access to env below is actually saving the global register's
217
       value, so that files not including target-xyz/exec.h are free to
218
       use it.  */
219
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
220
    saved_env_reg = (host_reg_t) env;
221
    barrier();
222
    env = env1;
223

    
224
    if (unlikely(exit_request)) {
225
        env->exit_request = 1;
226
    }
227

    
228
#if defined(TARGET_I386)
229
    /* put eflags in CPU temporary format */
230
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
231
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
232
    CC_OP = CC_OP_EFLAGS;
233
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234
#elif defined(TARGET_SPARC)
235
#elif defined(TARGET_M68K)
236
    env->cc_op = CC_OP_FLAGS;
237
    env->cc_dest = env->sr & 0xf;
238
    env->cc_x = (env->sr >> 4) & 1;
239
#elif defined(TARGET_ALPHA)
240
#elif defined(TARGET_ARM)
241
#elif defined(TARGET_PPC)
242
#elif defined(TARGET_LM32)
243
#elif defined(TARGET_MICROBLAZE)
244
#elif defined(TARGET_MIPS)
245
#elif defined(TARGET_SH4)
246
#elif defined(TARGET_CRIS)
247
#elif defined(TARGET_S390X)
248
    /* XXXXX */
249
#else
250
#error unsupported target CPU
251
#endif
252
    env->exception_index = -1;
253

    
254
    /* prepare setjmp context for exception handling */
255
    for(;;) {
256
        if (setjmp(env->jmp_env) == 0) {
257
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
258
#undef env
259
            env = cpu_single_env;
260
#define env cpu_single_env
261
#endif
262
            /* if an exception is pending, we execute it here */
263
            if (env->exception_index >= 0) {
264
                if (env->exception_index >= EXCP_INTERRUPT) {
265
                    /* exit request from the cpu execution loop */
266
                    ret = env->exception_index;
267
                    break;
268
                } else {
269
#if defined(CONFIG_USER_ONLY)
270
                    /* if user mode only, we simulate a fake exception
271
                       which will be handled outside the cpu execution
272
                       loop */
273
#if defined(TARGET_I386)
274
                    do_interrupt_user(env->exception_index,
275
                                      env->exception_is_int,
276
                                      env->error_code,
277
                                      env->exception_next_eip);
278
                    /* successfully delivered */
279
                    env->old_exception = -1;
280
#endif
281
                    ret = env->exception_index;
282
                    break;
283
#else
284
#if defined(TARGET_I386)
285
                    /* simulate a real cpu exception. On i386, it can
286
                       trigger new exceptions, but we do not handle
287
                       double or triple faults yet. */
288
                    do_interrupt(env->exception_index,
289
                                 env->exception_is_int,
290
                                 env->error_code,
291
                                 env->exception_next_eip, 0);
292
                    /* successfully delivered */
293
                    env->old_exception = -1;
294
#elif defined(TARGET_PPC)
295
                    do_interrupt(env);
296
#elif defined(TARGET_LM32)
297
                    do_interrupt(env);
298
#elif defined(TARGET_MICROBLAZE)
299
                    do_interrupt(env);
300
#elif defined(TARGET_MIPS)
301
                    do_interrupt(env);
302
#elif defined(TARGET_SPARC)
303
                    do_interrupt(env);
304
#elif defined(TARGET_ARM)
305
                    do_interrupt(env);
306
#elif defined(TARGET_SH4)
307
                    do_interrupt(env);
308
#elif defined(TARGET_ALPHA)
309
                    do_interrupt(env);
310
#elif defined(TARGET_CRIS)
311
                    do_interrupt(env);
312
#elif defined(TARGET_M68K)
313
                    do_interrupt(0);
314
#endif
315
                    env->exception_index = -1;
316
#endif
317
                }
318
            }
319

    
320
            next_tb = 0; /* force lookup of first TB */
321
            for(;;) {
322
                interrupt_request = env->interrupt_request;
323
                if (unlikely(interrupt_request)) {
324
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
325
                        /* Mask out external interrupts for this step. */
326
                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
327
                                               CPU_INTERRUPT_FIQ |
328
                                               CPU_INTERRUPT_SMI |
329
                                               CPU_INTERRUPT_NMI);
330
                    }
331
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
332
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
333
                        env->exception_index = EXCP_DEBUG;
334
                        cpu_loop_exit();
335
                    }
336
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
337
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
338
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32)
339
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
340
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
341
                        env->halted = 1;
342
                        env->exception_index = EXCP_HLT;
343
                        cpu_loop_exit();
344
                    }
345
#endif
346
#if defined(TARGET_I386)
347
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
348
                            svm_check_intercept(SVM_EXIT_INIT);
349
                            do_cpu_init(env);
350
                            env->exception_index = EXCP_HALTED;
351
                            cpu_loop_exit();
352
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
353
                            do_cpu_sipi(env);
354
                    } else if (env->hflags2 & HF2_GIF_MASK) {
355
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
356
                            !(env->hflags & HF_SMM_MASK)) {
357
                            svm_check_intercept(SVM_EXIT_SMI);
358
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
359
                            do_smm_enter();
360
                            next_tb = 0;
361
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
362
                                   !(env->hflags2 & HF2_NMI_MASK)) {
363
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
364
                            env->hflags2 |= HF2_NMI_MASK;
365
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
366
                            next_tb = 0;
367
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
368
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
369
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
370
                            next_tb = 0;
371
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
372
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
373
                                     (env->hflags2 & HF2_HIF_MASK)) ||
374
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
375
                                     (env->eflags & IF_MASK && 
376
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
377
                            int intno;
378
                            svm_check_intercept(SVM_EXIT_INTR);
379
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
380
                            intno = cpu_get_pic_interrupt(env);
381
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
382
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
383
#undef env
384
                    env = cpu_single_env;
385
#define env cpu_single_env
386
#endif
387
                            do_interrupt(intno, 0, 0, 0, 1);
388
                            /* ensure that no TB jump will be modified as
389
                               the program flow was changed */
390
                            next_tb = 0;
391
#if !defined(CONFIG_USER_ONLY)
392
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
393
                                   (env->eflags & IF_MASK) && 
394
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
395
                            int intno;
396
                            /* FIXME: this should respect TPR */
397
                            svm_check_intercept(SVM_EXIT_VINTR);
398
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
399
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
400
                            do_interrupt(intno, 0, 0, 0, 1);
401
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
402
                            next_tb = 0;
403
#endif
404
                        }
405
                    }
406
#elif defined(TARGET_PPC)
407
#if 0
408
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
409
                        cpu_reset(env);
410
                    }
411
#endif
412
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
413
                        ppc_hw_interrupt(env);
414
                        if (env->pending_interrupts == 0)
415
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
416
                        next_tb = 0;
417
                    }
418
#elif defined(TARGET_LM32)
419
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
420
                        && (env->ie & IE_IE)) {
421
                        env->exception_index = EXCP_IRQ;
422
                        do_interrupt(env);
423
                        next_tb = 0;
424
                    }
425
#elif defined(TARGET_MICROBLAZE)
426
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
427
                        && (env->sregs[SR_MSR] & MSR_IE)
428
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
429
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
430
                        env->exception_index = EXCP_IRQ;
431
                        do_interrupt(env);
432
                        next_tb = 0;
433
                    }
434
#elif defined(TARGET_MIPS)
435
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
436
                        cpu_mips_hw_interrupts_pending(env)) {
437
                        /* Raise it */
438
                        env->exception_index = EXCP_EXT_INTERRUPT;
439
                        env->error_code = 0;
440
                        do_interrupt(env);
441
                        next_tb = 0;
442
                    }
443
#elif defined(TARGET_SPARC)
444
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
445
                        if (cpu_interrupts_enabled(env) &&
446
                            env->interrupt_index > 0) {
447
                            int pil = env->interrupt_index & 0xf;
448
                            int type = env->interrupt_index & 0xf0;
449

    
450
                            if (((type == TT_EXTINT) &&
451
                                  cpu_pil_allowed(env, pil)) ||
452
                                  type != TT_EXTINT) {
453
                                env->exception_index = env->interrupt_index;
454
                                do_interrupt(env);
455
                                next_tb = 0;
456
                            }
457
                        }
458
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
459
                        //do_interrupt(0, 0, 0, 0, 0);
460
                        env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
461
                    }
462
#elif defined(TARGET_ARM)
463
                    if (interrupt_request & CPU_INTERRUPT_FIQ
464
                        && !(env->uncached_cpsr & CPSR_F)) {
465
                        env->exception_index = EXCP_FIQ;
466
                        do_interrupt(env);
467
                        next_tb = 0;
468
                    }
469
                    /* ARMv7-M interrupt return works by loading a magic value
470
                       into the PC.  On real hardware the load causes the
471
                       return to occur.  The qemu implementation performs the
472
                       jump normally, then does the exception return when the
473
                       CPU tries to execute code at the magic address.
474
                       This will cause the magic PC value to be pushed to
475
                       the stack if an interrupt occured at the wrong time.
476
                       We avoid this by disabling interrupts when
477
                       pc contains a magic address.  */
478
                    if (interrupt_request & CPU_INTERRUPT_HARD
479
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
480
                            || !(env->uncached_cpsr & CPSR_I))) {
481
                        env->exception_index = EXCP_IRQ;
482
                        do_interrupt(env);
483
                        next_tb = 0;
484
                    }
485
#elif defined(TARGET_SH4)
486
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
487
                        do_interrupt(env);
488
                        next_tb = 0;
489
                    }
490
#elif defined(TARGET_ALPHA)
491
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
492
                        do_interrupt(env);
493
                        next_tb = 0;
494
                    }
495
#elif defined(TARGET_CRIS)
496
                    if (interrupt_request & CPU_INTERRUPT_HARD
497
                        && (env->pregs[PR_CCS] & I_FLAG)
498
                        && !env->locked_irq) {
499
                        env->exception_index = EXCP_IRQ;
500
                        do_interrupt(env);
501
                        next_tb = 0;
502
                    }
503
                    if (interrupt_request & CPU_INTERRUPT_NMI
504
                        && (env->pregs[PR_CCS] & M_FLAG)) {
505
                        env->exception_index = EXCP_NMI;
506
                        do_interrupt(env);
507
                        next_tb = 0;
508
                    }
509
#elif defined(TARGET_M68K)
510
                    if (interrupt_request & CPU_INTERRUPT_HARD
511
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
512
                            < env->pending_level) {
513
                        /* Real hardware gets the interrupt vector via an
514
                           IACK cycle at this point.  Current emulated
515
                           hardware doesn't rely on this, so we
516
                           provide/save the vector when the interrupt is
517
                           first signalled.  */
518
                        env->exception_index = env->pending_vector;
519
                        do_interrupt(1);
520
                        next_tb = 0;
521
                    }
522
#endif
523
                   /* Don't use the cached interupt_request value,
524
                      do_interrupt may have updated the EXITTB flag. */
525
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
526
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
527
                        /* ensure that no TB jump will be modified as
528
                           the program flow was changed */
529
                        next_tb = 0;
530
                    }
531
                }
532
                if (unlikely(env->exit_request)) {
533
                    env->exit_request = 0;
534
                    env->exception_index = EXCP_INTERRUPT;
535
                    cpu_loop_exit();
536
                }
537
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
538
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
539
                    /* restore flags in standard format */
540
#if defined(TARGET_I386)
541
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
542
                    log_cpu_state(env, X86_DUMP_CCOP);
543
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
544
#elif defined(TARGET_M68K)
545
                    cpu_m68k_flush_flags(env, env->cc_op);
546
                    env->cc_op = CC_OP_FLAGS;
547
                    env->sr = (env->sr & 0xffe0)
548
                              | env->cc_dest | (env->cc_x << 4);
549
                    log_cpu_state(env, 0);
550
#else
551
                    log_cpu_state(env, 0);
552
#endif
553
                }
554
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
555
                spin_lock(&tb_lock);
556
                tb = tb_find_fast();
557
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
558
                   doing it in tb_find_slow */
559
                if (tb_invalidated_flag) {
560
                    /* as some TB could have been invalidated because
561
                       of memory exceptions while generating the code, we
562
                       must recompute the hash index here */
563
                    next_tb = 0;
564
                    tb_invalidated_flag = 0;
565
                }
566
#ifdef CONFIG_DEBUG_EXEC
567
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
568
                             (long)tb->tc_ptr, tb->pc,
569
                             lookup_symbol(tb->pc));
570
#endif
571
                /* see if we can patch the calling TB. When the TB
572
                   spans two pages, we cannot safely do a direct
573
                   jump. */
574
                if (next_tb != 0 && tb->page_addr[1] == -1) {
575
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
576
                }
577
                spin_unlock(&tb_lock);
578

    
579
                /* cpu_interrupt might be called while translating the
580
                   TB, but before it is linked into a potentially
581
                   infinite loop and becomes env->current_tb. Avoid
582
                   starting execution if there is a pending interrupt. */
583
                env->current_tb = tb;
584
                barrier();
585
                if (likely(!env->exit_request)) {
586
                    tc_ptr = tb->tc_ptr;
587
                /* execute the generated code */
588
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
589
#undef env
590
                    env = cpu_single_env;
591
#define env cpu_single_env
592
#endif
593
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
594
                    if ((next_tb & 3) == 2) {
595
                        /* Instruction counter expired.  */
596
                        int insns_left;
597
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
598
                        /* Restore PC.  */
599
                        cpu_pc_from_tb(env, tb);
600
                        insns_left = env->icount_decr.u32;
601
                        if (env->icount_extra && insns_left >= 0) {
602
                            /* Refill decrementer and continue execution.  */
603
                            env->icount_extra += insns_left;
604
                            if (env->icount_extra > 0xffff) {
605
                                insns_left = 0xffff;
606
                            } else {
607
                                insns_left = env->icount_extra;
608
                            }
609
                            env->icount_extra -= insns_left;
610
                            env->icount_decr.u16.low = insns_left;
611
                        } else {
612
                            if (insns_left > 0) {
613
                                /* Execute remaining instructions.  */
614
                                cpu_exec_nocache(insns_left, tb);
615
                            }
616
                            env->exception_index = EXCP_INTERRUPT;
617
                            next_tb = 0;
618
                            cpu_loop_exit();
619
                        }
620
                    }
621
                }
622
                env->current_tb = NULL;
623
                /* reset soft MMU for next block (it can currently
624
                   only be set by a memory fault) */
625
            } /* for(;;) */
626
        }
627
    } /* for(;;) */
628

    
629

    
630
#if defined(TARGET_I386)
631
    /* restore flags in standard format */
632
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
633
#elif defined(TARGET_ARM)
634
    /* XXX: Save/restore host fpu exception state?.  */
635
#elif defined(TARGET_SPARC)
636
#elif defined(TARGET_PPC)
637
#elif defined(TARGET_LM32)
638
#elif defined(TARGET_M68K)
639
    cpu_m68k_flush_flags(env, env->cc_op);
640
    env->cc_op = CC_OP_FLAGS;
641
    env->sr = (env->sr & 0xffe0)
642
              | env->cc_dest | (env->cc_x << 4);
643
#elif defined(TARGET_MICROBLAZE)
644
#elif defined(TARGET_MIPS)
645
#elif defined(TARGET_SH4)
646
#elif defined(TARGET_ALPHA)
647
#elif defined(TARGET_CRIS)
648
#elif defined(TARGET_S390X)
649
    /* XXXXX */
650
#else
651
#error unsupported target CPU
652
#endif
653

    
654
    /* restore global registers */
655
    barrier();
656
    env = (void *) saved_env_reg;
657

    
658
    /* fail safe : never use cpu_single_env outside cpu_exec() */
659
    cpu_single_env = NULL;
660
    return ret;
661
}
662

    
663
/* must only be called from the generated code as an exception can be
664
   generated */
665
void tb_invalidate_page_range(target_ulong start, target_ulong end)
666
{
667
    /* XXX: cannot enable it yet because it yields to MMU exception
668
       where NIP != read address on PowerPC */
669
#if 0
670
    target_ulong phys_addr;
671
    phys_addr = get_phys_addr_code(env, start);
672
    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
673
#endif
674
}
675

    
676
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
677

    
678
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
679
{
680
    CPUX86State *saved_env;
681

    
682
    saved_env = env;
683
    env = s;
684
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
685
        selector &= 0xffff;
686
        cpu_x86_load_seg_cache(env, seg_reg, selector,
687
                               (selector << 4), 0xffff, 0);
688
    } else {
689
        helper_load_seg(seg_reg, selector);
690
    }
691
    env = saved_env;
692
}
693

    
694
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
695
{
696
    CPUX86State *saved_env;
697

    
698
    saved_env = env;
699
    env = s;
700

    
701
    helper_fsave(ptr, data32);
702

    
703
    env = saved_env;
704
}
705

    
706
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
707
{
708
    CPUX86State *saved_env;
709

    
710
    saved_env = env;
711
    env = s;
712

    
713
    helper_frstor(ptr, data32);
714

    
715
    env = saved_env;
716
}
717

    
718
#endif /* TARGET_I386 */
719

    
720
#if !defined(CONFIG_SOFTMMU)
721

    
722
#if defined(TARGET_I386)
723
#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
724
#else
725
#define EXCEPTION_ACTION cpu_loop_exit()
726
#endif
727

    
728
/* 'pc' is the host PC at which the exception was raised. 'address' is
729
   the effective address of the memory exception. 'is_write' is 1 if a
730
   write caused the exception and otherwise 0'. 'old_set' is the
731
   signal set which should be restored */
732
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
733
                                    int is_write, sigset_t *old_set,
734
                                    void *puc)
735
{
736
    TranslationBlock *tb;
737
    int ret;
738

    
739
    if (cpu_single_env)
740
        env = cpu_single_env; /* XXX: find a correct solution for multithread */
741
#if defined(DEBUG_SIGNAL)
742
    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
743
                pc, address, is_write, *(unsigned long *)old_set);
744
#endif
745
    /* XXX: locking issue */
746
    if (is_write && page_unprotect(h2g(address), pc, puc)) {
747
        return 1;
748
    }
749

    
750
    /* see if it is an MMU fault */
751
    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
752
    if (ret < 0)
753
        return 0; /* not an MMU fault */
754
    if (ret == 0)
755
        return 1; /* the MMU fault was handled without causing real CPU fault */
756
    /* now we have a real cpu fault */
757
    tb = tb_find_pc(pc);
758
    if (tb) {
759
        /* the PC is inside the translated code. It means that we have
760
           a virtual CPU fault */
761
        cpu_restore_state(tb, env, pc, puc);
762
    }
763

    
764
    /* we restore the process signal mask as the sigreturn should
765
       do it (XXX: use sigsetjmp) */
766
    sigprocmask(SIG_SETMASK, old_set, NULL);
767
    EXCEPTION_ACTION;
768

    
769
    /* never comes here */
770
    return 1;
771
}
772

    
773
#if defined(__i386__)
774

    
775
#if defined(__APPLE__)
776
# include <sys/ucontext.h>
777

    
778
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
779
# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
780
# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
781
# define MASK_sig(context)    ((context)->uc_sigmask)
782
#elif defined (__NetBSD__)
783
# include <ucontext.h>
784

    
785
# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
786
# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
787
# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
788
# define MASK_sig(context)    ((context)->uc_sigmask)
789
#elif defined (__FreeBSD__) || defined(__DragonFly__)
790
# include <ucontext.h>
791

    
792
# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
793
# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
794
# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
795
# define MASK_sig(context)    ((context)->uc_sigmask)
796
#elif defined(__OpenBSD__)
797
# define EIP_sig(context)     ((context)->sc_eip)
798
# define TRAP_sig(context)    ((context)->sc_trapno)
799
# define ERROR_sig(context)   ((context)->sc_err)
800
# define MASK_sig(context)    ((context)->sc_mask)
801
#else
802
# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
803
# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
804
# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
805
# define MASK_sig(context)    ((context)->uc_sigmask)
806
#endif
807

    
808
int cpu_signal_handler(int host_signum, void *pinfo,
809
                       void *puc)
810
{
811
    siginfo_t *info = pinfo;
812
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
813
    ucontext_t *uc = puc;
814
#elif defined(__OpenBSD__)
815
    struct sigcontext *uc = puc;
816
#else
817
    struct ucontext *uc = puc;
818
#endif
819
    unsigned long pc;
820
    int trapno;
821

    
822
#ifndef REG_EIP
823
/* for glibc 2.1 */
824
#define REG_EIP    EIP
825
#define REG_ERR    ERR
826
#define REG_TRAPNO TRAPNO
827
#endif
828
    pc = EIP_sig(uc);
829
    trapno = TRAP_sig(uc);
830
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
831
                             trapno == 0xe ?
832
                             (ERROR_sig(uc) >> 1) & 1 : 0,
833
                             &MASK_sig(uc), puc);
834
}
835

    
836
#elif defined(__x86_64__)
837

    
838
#ifdef __NetBSD__
839
#define PC_sig(context)       _UC_MACHINE_PC(context)
840
#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
841
#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
842
#define MASK_sig(context)     ((context)->uc_sigmask)
843
#elif defined(__OpenBSD__)
844
#define PC_sig(context)       ((context)->sc_rip)
845
#define TRAP_sig(context)     ((context)->sc_trapno)
846
#define ERROR_sig(context)    ((context)->sc_err)
847
#define MASK_sig(context)     ((context)->sc_mask)
848
#elif defined (__FreeBSD__) || defined(__DragonFly__)
849
#include <ucontext.h>
850

    
851
#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
852
#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
853
#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
854
#define MASK_sig(context)     ((context)->uc_sigmask)
855
#else
856
#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
857
#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
858
#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
859
#define MASK_sig(context)     ((context)->uc_sigmask)
860
#endif
861

    
862
int cpu_signal_handler(int host_signum, void *pinfo,
863
                       void *puc)
864
{
865
    siginfo_t *info = pinfo;
866
    unsigned long pc;
867
#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
868
    ucontext_t *uc = puc;
869
#elif defined(__OpenBSD__)
870
    struct sigcontext *uc = puc;
871
#else
872
    struct ucontext *uc = puc;
873
#endif
874

    
875
    pc = PC_sig(uc);
876
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
877
                             TRAP_sig(uc) == 0xe ?
878
                             (ERROR_sig(uc) >> 1) & 1 : 0,
879
                             &MASK_sig(uc), puc);
880
}
881

    
882
#elif defined(_ARCH_PPC)
883

    
884
/***********************************************************************
885
 * signal context platform-specific definitions
886
 * From Wine
887
 */
888
#ifdef linux
889
/* All Registers access - only for local access */
890
# define REG_sig(reg_name, context)                ((context)->uc_mcontext.regs->reg_name)
891
/* Gpr Registers access  */
892
# define GPR_sig(reg_num, context)                REG_sig(gpr[reg_num], context)
893
# define IAR_sig(context)                        REG_sig(nip, context)        /* Program counter */
894
# define MSR_sig(context)                        REG_sig(msr, context)   /* Machine State Register (Supervisor) */
895
# define CTR_sig(context)                        REG_sig(ctr, context)   /* Count register */
896
# define XER_sig(context)                        REG_sig(xer, context) /* User's integer exception register */
897
# define LR_sig(context)                        REG_sig(link, context) /* Link register */
898
# define CR_sig(context)                        REG_sig(ccr, context) /* Condition register */
899
/* Float Registers access  */
900
# define FLOAT_sig(reg_num, context)                (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
901
# define FPSCR_sig(context)                        (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
902
/* Exception Registers access */
903
# define DAR_sig(context)                        REG_sig(dar, context)
904
# define DSISR_sig(context)                        REG_sig(dsisr, context)
905
# define TRAP_sig(context)                        REG_sig(trap, context)
906
#endif /* linux */
907

    
908
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
909
#include <ucontext.h>
910
# define IAR_sig(context)                ((context)->uc_mcontext.mc_srr0)
911
# define MSR_sig(context)                ((context)->uc_mcontext.mc_srr1)
912
# define CTR_sig(context)                ((context)->uc_mcontext.mc_ctr)
913
# define XER_sig(context)                ((context)->uc_mcontext.mc_xer)
914
# define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
915
# define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
916
/* Exception Registers access */
917
# define DAR_sig(context)                ((context)->uc_mcontext.mc_dar)
918
# define DSISR_sig(context)                ((context)->uc_mcontext.mc_dsisr)
919
# define TRAP_sig(context)                ((context)->uc_mcontext.mc_exc)
920
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
921

    
922
#ifdef __APPLE__
923
# include <sys/ucontext.h>
924
typedef struct ucontext SIGCONTEXT;
925
/* All Registers access - only for local access */
926
# define REG_sig(reg_name, context)                ((context)->uc_mcontext->ss.reg_name)
927
# define FLOATREG_sig(reg_name, context)        ((context)->uc_mcontext->fs.reg_name)
928
# define EXCEPREG_sig(reg_name, context)        ((context)->uc_mcontext->es.reg_name)
929
# define VECREG_sig(reg_name, context)                ((context)->uc_mcontext->vs.reg_name)
930
/* Gpr Registers access */
931
# define GPR_sig(reg_num, context)                REG_sig(r##reg_num, context)
932
# define IAR_sig(context)                        REG_sig(srr0, context)        /* Program counter */
933
# define MSR_sig(context)                        REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
934
# define CTR_sig(context)                        REG_sig(ctr, context)
935
# define XER_sig(context)                        REG_sig(xer, context) /* Link register */
936
# define LR_sig(context)                        REG_sig(lr, context)  /* User's integer exception register */
937
# define CR_sig(context)                        REG_sig(cr, context)  /* Condition register */
938
/* Float Registers access */
939
# define FLOAT_sig(reg_num, context)                FLOATREG_sig(fpregs[reg_num], context)
940
# define FPSCR_sig(context)                        ((double)FLOATREG_sig(fpscr, context))
941
/* Exception Registers access */
942
# define DAR_sig(context)                        EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
943
# define DSISR_sig(context)                        EXCEPREG_sig(dsisr, context)
944
# define TRAP_sig(context)                        EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
945
#endif /* __APPLE__ */
946

    
947
int cpu_signal_handler(int host_signum, void *pinfo,
948
                       void *puc)
949
{
950
    siginfo_t *info = pinfo;
951
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
952
    ucontext_t *uc = puc;
953
#else
954
    struct ucontext *uc = puc;
955
#endif
956
    unsigned long pc;
957
    int is_write;
958

    
959
    pc = IAR_sig(uc);
960
    is_write = 0;
961
#if 0
962
    /* ppc 4xx case */
963
    if (DSISR_sig(uc) & 0x00800000)
964
        is_write = 1;
965
#else
966
    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
967
        is_write = 1;
968
#endif
969
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
970
                             is_write, &uc->uc_sigmask, puc);
971
}
972

    
973
#elif defined(__alpha__)
974

    
975
int cpu_signal_handler(int host_signum, void *pinfo,
976
                           void *puc)
977
{
978
    siginfo_t *info = pinfo;
979
    struct ucontext *uc = puc;
980
    uint32_t *pc = uc->uc_mcontext.sc_pc;
981
    uint32_t insn = *pc;
982
    int is_write = 0;
983

    
984
    /* XXX: need kernel patch to get write flag faster */
985
    switch (insn >> 26) {
986
    case 0x0d: // stw
987
    case 0x0e: // stb
988
    case 0x0f: // stq_u
989
    case 0x24: // stf
990
    case 0x25: // stg
991
    case 0x26: // sts
992
    case 0x27: // stt
993
    case 0x2c: // stl
994
    case 0x2d: // stq
995
    case 0x2e: // stl_c
996
    case 0x2f: // stq_c
997
        is_write = 1;
998
    }
999

    
1000
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1001
                             is_write, &uc->uc_sigmask, puc);
1002
}
1003
#elif defined(__sparc__)
1004

    
1005
int cpu_signal_handler(int host_signum, void *pinfo,
1006
                       void *puc)
1007
{
1008
    siginfo_t *info = pinfo;
1009
    int is_write;
1010
    uint32_t insn;
1011
#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1012
    uint32_t *regs = (uint32_t *)(info + 1);
1013
    void *sigmask = (regs + 20);
1014
    /* XXX: is there a standard glibc define ? */
1015
    unsigned long pc = regs[1];
1016
#else
1017
#ifdef __linux__
1018
    struct sigcontext *sc = puc;
1019
    unsigned long pc = sc->sigc_regs.tpc;
1020
    void *sigmask = (void *)sc->sigc_mask;
1021
#elif defined(__OpenBSD__)
1022
    struct sigcontext *uc = puc;
1023
    unsigned long pc = uc->sc_pc;
1024
    void *sigmask = (void *)(long)uc->sc_mask;
1025
#endif
1026
#endif
1027

    
1028
    /* XXX: need kernel patch to get write flag faster */
1029
    is_write = 0;
1030
    insn = *(uint32_t *)pc;
1031
    if ((insn >> 30) == 3) {
1032
      switch((insn >> 19) & 0x3f) {
1033
      case 0x05: // stb
1034
      case 0x15: // stba
1035
      case 0x06: // sth
1036
      case 0x16: // stha
1037
      case 0x04: // st
1038
      case 0x14: // sta
1039
      case 0x07: // std
1040
      case 0x17: // stda
1041
      case 0x0e: // stx
1042
      case 0x1e: // stxa
1043
      case 0x24: // stf
1044
      case 0x34: // stfa
1045
      case 0x27: // stdf
1046
      case 0x37: // stdfa
1047
      case 0x26: // stqf
1048
      case 0x36: // stqfa
1049
      case 0x25: // stfsr
1050
      case 0x3c: // casa
1051
      case 0x3e: // casxa
1052
        is_write = 1;
1053
        break;
1054
      }
1055
    }
1056
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1057
                             is_write, sigmask, NULL);
1058
}
1059

    
1060
#elif defined(__arm__)
1061

    
1062
int cpu_signal_handler(int host_signum, void *pinfo,
1063
                       void *puc)
1064
{
1065
    siginfo_t *info = pinfo;
1066
    struct ucontext *uc = puc;
1067
    unsigned long pc;
1068
    int is_write;
1069

    
1070
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1071
    pc = uc->uc_mcontext.gregs[R15];
1072
#else
1073
    pc = uc->uc_mcontext.arm_pc;
1074
#endif
1075
    /* XXX: compute is_write */
1076
    is_write = 0;
1077
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1078
                             is_write,
1079
                             &uc->uc_sigmask, puc);
1080
}
1081

    
1082
#elif defined(__mc68000)
1083

    
1084
int cpu_signal_handler(int host_signum, void *pinfo,
1085
                       void *puc)
1086
{
1087
    siginfo_t *info = pinfo;
1088
    struct ucontext *uc = puc;
1089
    unsigned long pc;
1090
    int is_write;
1091

    
1092
    pc = uc->uc_mcontext.gregs[16];
1093
    /* XXX: compute is_write */
1094
    is_write = 0;
1095
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1096
                             is_write,
1097
                             &uc->uc_sigmask, puc);
1098
}
1099

    
1100
#elif defined(__ia64)
1101

    
1102
#ifndef __ISR_VALID
1103
  /* This ought to be in <bits/siginfo.h>... */
1104
# define __ISR_VALID        1
1105
#endif
1106

    
1107
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1108
{
1109
    siginfo_t *info = pinfo;
1110
    struct ucontext *uc = puc;
1111
    unsigned long ip;
1112
    int is_write = 0;
1113

    
1114
    ip = uc->uc_mcontext.sc_ip;
1115
    switch (host_signum) {
1116
      case SIGILL:
1117
      case SIGFPE:
1118
      case SIGSEGV:
1119
      case SIGBUS:
1120
      case SIGTRAP:
1121
          if (info->si_code && (info->si_segvflags & __ISR_VALID))
1122
              /* ISR.W (write-access) is bit 33:  */
1123
              is_write = (info->si_isr >> 33) & 1;
1124
          break;
1125

    
1126
      default:
1127
          break;
1128
    }
1129
    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1130
                             is_write,
1131
                             (sigset_t *)&uc->uc_sigmask, puc);
1132
}
1133

    
1134
#elif defined(__s390__)
1135

    
1136
int cpu_signal_handler(int host_signum, void *pinfo,
1137
                       void *puc)
1138
{
1139
    siginfo_t *info = pinfo;
1140
    struct ucontext *uc = puc;
1141
    unsigned long pc;
1142
    uint16_t *pinsn;
1143
    int is_write = 0;
1144

    
1145
    pc = uc->uc_mcontext.psw.addr;
1146

    
1147
    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1148
       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1149
       from the hardware which does in fact contain the is_write value.
1150
       The rt signal handler, as far as I can tell, does not give this value
1151
       at all.  Not that we could get to it from here even if it were.  */
1152
    /* ??? This is not even close to complete, since it ignores all
1153
       of the read-modify-write instructions.  */
1154
    pinsn = (uint16_t *)pc;
1155
    switch (pinsn[0] >> 8) {
1156
    case 0x50: /* ST */
1157
    case 0x42: /* STC */
1158
    case 0x40: /* STH */
1159
        is_write = 1;
1160
        break;
1161
    case 0xc4: /* RIL format insns */
1162
        switch (pinsn[0] & 0xf) {
1163
        case 0xf: /* STRL */
1164
        case 0xb: /* STGRL */
1165
        case 0x7: /* STHRL */
1166
            is_write = 1;
1167
        }
1168
        break;
1169
    case 0xe3: /* RXY format insns */
1170
        switch (pinsn[2] & 0xff) {
1171
        case 0x50: /* STY */
1172
        case 0x24: /* STG */
1173
        case 0x72: /* STCY */
1174
        case 0x70: /* STHY */
1175
        case 0x8e: /* STPQ */
1176
        case 0x3f: /* STRVH */
1177
        case 0x3e: /* STRV */
1178
        case 0x2f: /* STRVG */
1179
            is_write = 1;
1180
        }
1181
        break;
1182
    }
1183
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1184
                             is_write, &uc->uc_sigmask, puc);
1185
}
1186

    
1187
#elif defined(__mips__)
1188

    
1189
int cpu_signal_handler(int host_signum, void *pinfo,
1190
                       void *puc)
1191
{
1192
    siginfo_t *info = pinfo;
1193
    struct ucontext *uc = puc;
1194
    greg_t pc = uc->uc_mcontext.pc;
1195
    int is_write;
1196

    
1197
    /* XXX: compute is_write */
1198
    is_write = 0;
1199
    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1200
                             is_write, &uc->uc_sigmask, puc);
1201
}
1202

    
1203
#elif defined(__hppa__)
1204

    
1205
int cpu_signal_handler(int host_signum, void *pinfo,
1206
                       void *puc)
1207
{
1208
    struct siginfo *info = pinfo;
1209
    struct ucontext *uc = puc;
1210
    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1211
    uint32_t insn = *(uint32_t *)pc;
1212
    int is_write = 0;
1213

    
1214
    /* XXX: need kernel patch to get write flag faster.  */
1215
    switch (insn >> 26) {
1216
    case 0x1a: /* STW */
1217
    case 0x19: /* STH */
1218
    case 0x18: /* STB */
1219
    case 0x1b: /* STWM */
1220
        is_write = 1;
1221
        break;
1222

    
1223
    case 0x09: /* CSTWX, FSTWX, FSTWS */
1224
    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1225
        /* Distinguish from coprocessor load ... */
1226
        is_write = (insn >> 9) & 1;
1227
        break;
1228

    
1229
    case 0x03:
1230
        switch ((insn >> 6) & 15) {
1231
        case 0xa: /* STWS */
1232
        case 0x9: /* STHS */
1233
        case 0x8: /* STBS */
1234
        case 0xe: /* STWAS */
1235
        case 0xc: /* STBYS */
1236
            is_write = 1;
1237
        }
1238
        break;
1239
    }
1240

    
1241
    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
1242
                             is_write, &uc->uc_sigmask, puc);
1243
}
1244

    
1245
#else
1246

    
1247
#error host CPU specific signal handler needed
1248

    
1249
#endif
1250

    
1251
#endif /* !defined(CONFIG_SOFTMMU) */