Statistics
| Branch: | Revision:

root / cpu-exec.c @ 3991c35e

History | View | Annotate | Download (26.9 kB)

1
/*
2
 *  i386 emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "exec.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "kvm.h"
24
#include "qemu-barrier.h"
25

    
26
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
27
// Work around ugly bugs in glibc that mangle global register contents
28
#undef env
29
#define env cpu_single_env
30
#endif
31

    
32
int tb_invalidated_flag;
33

    
34
//#define CONFIG_DEBUG_EXEC
35

    
36
int qemu_cpu_has_work(CPUState *env)
37
{
38
    return cpu_has_work(env);
39
}
40

    
41
void cpu_loop_exit(void)
42
{
43
    env->current_tb = NULL;
44
    longjmp(env->jmp_env, 1);
45
}
46

    
47
/* exit the current TB from a signal handler. The host registers are
48
   restored in a state compatible with the CPU emulator
49
 */
50
#if defined(CONFIG_SOFTMMU)
51
void cpu_resume_from_signal(CPUState *env1, void *puc)
52
{
53
    env = env1;
54

    
55
    /* XXX: restore cpu registers saved in host registers */
56

    
57
    env->exception_index = -1;
58
    longjmp(env->jmp_env, 1);
59
}
60
#endif
61

    
62
/* Execute the code without caching the generated code. An interpreter
63
   could be used if available. */
64
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
65
{
66
    unsigned long next_tb;
67
    TranslationBlock *tb;
68

    
69
    /* Should never happen.
70
       We only end up here when an existing TB is too long.  */
71
    if (max_cycles > CF_COUNT_MASK)
72
        max_cycles = CF_COUNT_MASK;
73

    
74
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
75
                     max_cycles);
76
    env->current_tb = tb;
77
    /* execute the generated code */
78
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
79
    env->current_tb = NULL;
80

    
81
    if ((next_tb & 3) == 2) {
82
        /* Restore PC.  This may happen if async event occurs before
83
           the TB starts executing.  */
84
        cpu_pc_from_tb(env, tb);
85
    }
86
    tb_phys_invalidate(tb, -1);
87
    tb_free(tb);
88
}
89

    
90
static TranslationBlock *tb_find_slow(target_ulong pc,
91
                                      target_ulong cs_base,
92
                                      uint64_t flags)
93
{
94
    TranslationBlock *tb, **ptb1;
95
    unsigned int h;
96
    tb_page_addr_t phys_pc, phys_page1, phys_page2;
97
    target_ulong virt_page2;
98

    
99
    tb_invalidated_flag = 0;
100

    
101
    /* find translated block using physical mappings */
102
    phys_pc = get_page_addr_code(env, pc);
103
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
104
    phys_page2 = -1;
105
    h = tb_phys_hash_func(phys_pc);
106
    ptb1 = &tb_phys_hash[h];
107
    for(;;) {
108
        tb = *ptb1;
109
        if (!tb)
110
            goto not_found;
111
        if (tb->pc == pc &&
112
            tb->page_addr[0] == phys_page1 &&
113
            tb->cs_base == cs_base &&
114
            tb->flags == flags) {
115
            /* check next page if needed */
116
            if (tb->page_addr[1] != -1) {
117
                virt_page2 = (pc & TARGET_PAGE_MASK) +
118
                    TARGET_PAGE_SIZE;
119
                phys_page2 = get_page_addr_code(env, virt_page2);
120
                if (tb->page_addr[1] == phys_page2)
121
                    goto found;
122
            } else {
123
                goto found;
124
            }
125
        }
126
        ptb1 = &tb->phys_hash_next;
127
    }
128
 not_found:
129
   /* if no translated code available, then translate it now */
130
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
131

    
132
 found:
133
    /* Move the last found TB to the head of the list */
134
    if (likely(*ptb1)) {
135
        *ptb1 = tb->phys_hash_next;
136
        tb->phys_hash_next = tb_phys_hash[h];
137
        tb_phys_hash[h] = tb;
138
    }
139
    /* we add the TB in the virtual pc hash table */
140
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
141
    return tb;
142
}
143

    
144
static inline TranslationBlock *tb_find_fast(void)
145
{
146
    TranslationBlock *tb;
147
    target_ulong cs_base, pc;
148
    int flags;
149

    
150
    /* we record a subset of the CPU state. It will
151
       always be the same before a given translated block
152
       is executed. */
153
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
154
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
155
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
156
                 tb->flags != flags)) {
157
        tb = tb_find_slow(pc, cs_base, flags);
158
    }
159
    return tb;
160
}
161

    
162
static CPUDebugExcpHandler *debug_excp_handler;
163

    
164
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
165
{
166
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
167

    
168
    debug_excp_handler = handler;
169
    return old_handler;
170
}
171

    
172
static void cpu_handle_debug_exception(CPUState *env)
173
{
174
    CPUWatchpoint *wp;
175

    
176
    if (!env->watchpoint_hit) {
177
        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
178
            wp->flags &= ~BP_WATCHPOINT_HIT;
179
        }
180
    }
181
    if (debug_excp_handler) {
182
        debug_excp_handler(env);
183
    }
184
}
185

    
186
/* main execution loop */
187

    
188
volatile sig_atomic_t exit_request;
189

    
190
int cpu_exec(CPUState *env1)
191
{
192
    volatile host_reg_t saved_env_reg;
193
    int ret, interrupt_request;
194
    TranslationBlock *tb;
195
    uint8_t *tc_ptr;
196
    unsigned long next_tb;
197

    
198
    if (env1->halted) {
199
        if (!cpu_has_work(env1)) {
200
            return EXCP_HALTED;
201
        }
202

    
203
        env1->halted = 0;
204
    }
205

    
206
    cpu_single_env = env1;
207

    
208
    /* the access to env below is actually saving the global register's
209
       value, so that files not including target-xyz/exec.h are free to
210
       use it.  */
211
    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
212
    saved_env_reg = (host_reg_t) env;
213
    barrier();
214
    env = env1;
215

    
216
    if (unlikely(exit_request)) {
217
        env->exit_request = 1;
218
    }
219

    
220
#if defined(TARGET_I386)
221
    /* put eflags in CPU temporary format */
222
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
223
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
224
    CC_OP = CC_OP_EFLAGS;
225
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
226
#elif defined(TARGET_SPARC)
227
#elif defined(TARGET_M68K)
228
    env->cc_op = CC_OP_FLAGS;
229
    env->cc_dest = env->sr & 0xf;
230
    env->cc_x = (env->sr >> 4) & 1;
231
#elif defined(TARGET_ALPHA)
232
#elif defined(TARGET_ARM)
233
#elif defined(TARGET_UNICORE32)
234
#elif defined(TARGET_PPC)
235
#elif defined(TARGET_LM32)
236
#elif defined(TARGET_MICROBLAZE)
237
#elif defined(TARGET_MIPS)
238
#elif defined(TARGET_SH4)
239
#elif defined(TARGET_CRIS)
240
#elif defined(TARGET_S390X)
241
    /* XXXXX */
242
#else
243
#error unsupported target CPU
244
#endif
245
    env->exception_index = -1;
246

    
247
    /* prepare setjmp context for exception handling */
248
    for(;;) {
249
        if (setjmp(env->jmp_env) == 0) {
250
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
251
#undef env
252
            env = cpu_single_env;
253
#define env cpu_single_env
254
#endif
255
            /* if an exception is pending, we execute it here */
256
            if (env->exception_index >= 0) {
257
                if (env->exception_index >= EXCP_INTERRUPT) {
258
                    /* exit request from the cpu execution loop */
259
                    ret = env->exception_index;
260
                    if (ret == EXCP_DEBUG) {
261
                        cpu_handle_debug_exception(env);
262
                    }
263
                    break;
264
                } else {
265
#if defined(CONFIG_USER_ONLY)
266
                    /* if user mode only, we simulate a fake exception
267
                       which will be handled outside the cpu execution
268
                       loop */
269
#if defined(TARGET_I386)
270
                    do_interrupt_user(env->exception_index,
271
                                      env->exception_is_int,
272
                                      env->error_code,
273
                                      env->exception_next_eip);
274
                    /* successfully delivered */
275
                    env->old_exception = -1;
276
#endif
277
                    ret = env->exception_index;
278
                    break;
279
#else
280
#if defined(TARGET_I386)
281
                    /* simulate a real cpu exception. On i386, it can
282
                       trigger new exceptions, but we do not handle
283
                       double or triple faults yet. */
284
                    do_interrupt(env->exception_index,
285
                                 env->exception_is_int,
286
                                 env->error_code,
287
                                 env->exception_next_eip, 0);
288
                    /* successfully delivered */
289
                    env->old_exception = -1;
290
#elif defined(TARGET_PPC)
291
                    do_interrupt(env);
292
#elif defined(TARGET_LM32)
293
                    do_interrupt(env);
294
#elif defined(TARGET_MICROBLAZE)
295
                    do_interrupt(env);
296
#elif defined(TARGET_MIPS)
297
                    do_interrupt(env);
298
#elif defined(TARGET_SPARC)
299
                    do_interrupt(env);
300
#elif defined(TARGET_ARM)
301
                    do_interrupt(env);
302
#elif defined(TARGET_UNICORE32)
303
                    do_interrupt(env);
304
#elif defined(TARGET_SH4)
305
                    do_interrupt(env);
306
#elif defined(TARGET_ALPHA)
307
                    do_interrupt(env);
308
#elif defined(TARGET_CRIS)
309
                    do_interrupt(env);
310
#elif defined(TARGET_M68K)
311
                    do_interrupt(0);
312
#elif defined(TARGET_S390X)
313
                    do_interrupt(env);
314
#endif
315
                    env->exception_index = -1;
316
#endif
317
                }
318
            }
319

    
320
            next_tb = 0; /* force lookup of first TB */
321
            for(;;) {
322
                interrupt_request = env->interrupt_request;
323
                if (unlikely(interrupt_request)) {
324
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
325
                        /* Mask out external interrupts for this step. */
326
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
327
                    }
328
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
329
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
330
                        env->exception_index = EXCP_DEBUG;
331
                        cpu_loop_exit();
332
                    }
333
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
334
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
335
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
336
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
337
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
338
                        env->halted = 1;
339
                        env->exception_index = EXCP_HLT;
340
                        cpu_loop_exit();
341
                    }
342
#endif
343
#if defined(TARGET_I386)
344
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
345
                            svm_check_intercept(SVM_EXIT_INIT);
346
                            do_cpu_init(env);
347
                            env->exception_index = EXCP_HALTED;
348
                            cpu_loop_exit();
349
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
350
                            do_cpu_sipi(env);
351
                    } else if (env->hflags2 & HF2_GIF_MASK) {
352
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
353
                            !(env->hflags & HF_SMM_MASK)) {
354
                            svm_check_intercept(SVM_EXIT_SMI);
355
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
356
                            do_smm_enter();
357
                            next_tb = 0;
358
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
359
                                   !(env->hflags2 & HF2_NMI_MASK)) {
360
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
361
                            env->hflags2 |= HF2_NMI_MASK;
362
                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
363
                            next_tb = 0;
364
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
365
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
366
                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
367
                            next_tb = 0;
368
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
369
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
370
                                     (env->hflags2 & HF2_HIF_MASK)) ||
371
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
372
                                     (env->eflags & IF_MASK && 
373
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
374
                            int intno;
375
                            svm_check_intercept(SVM_EXIT_INTR);
376
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
377
                            intno = cpu_get_pic_interrupt(env);
378
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
379
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
380
#undef env
381
                    env = cpu_single_env;
382
#define env cpu_single_env
383
#endif
384
                            do_interrupt(intno, 0, 0, 0, 1);
385
                            /* ensure that no TB jump will be modified as
386
                               the program flow was changed */
387
                            next_tb = 0;
388
#if !defined(CONFIG_USER_ONLY)
389
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
390
                                   (env->eflags & IF_MASK) && 
391
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
392
                            int intno;
393
                            /* FIXME: this should respect TPR */
394
                            svm_check_intercept(SVM_EXIT_VINTR);
395
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
396
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
397
                            do_interrupt(intno, 0, 0, 0, 1);
398
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
399
                            next_tb = 0;
400
#endif
401
                        }
402
                    }
403
#elif defined(TARGET_PPC)
404
#if 0
405
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
406
                        cpu_reset(env);
407
                    }
408
#endif
409
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
410
                        ppc_hw_interrupt(env);
411
                        if (env->pending_interrupts == 0)
412
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
413
                        next_tb = 0;
414
                    }
415
#elif defined(TARGET_LM32)
416
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
417
                        && (env->ie & IE_IE)) {
418
                        env->exception_index = EXCP_IRQ;
419
                        do_interrupt(env);
420
                        next_tb = 0;
421
                    }
422
#elif defined(TARGET_MICROBLAZE)
423
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
424
                        && (env->sregs[SR_MSR] & MSR_IE)
425
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
426
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
427
                        env->exception_index = EXCP_IRQ;
428
                        do_interrupt(env);
429
                        next_tb = 0;
430
                    }
431
#elif defined(TARGET_MIPS)
432
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
433
                        cpu_mips_hw_interrupts_pending(env)) {
434
                        /* Raise it */
435
                        env->exception_index = EXCP_EXT_INTERRUPT;
436
                        env->error_code = 0;
437
                        do_interrupt(env);
438
                        next_tb = 0;
439
                    }
440
#elif defined(TARGET_SPARC)
441
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
442
                        if (cpu_interrupts_enabled(env) &&
443
                            env->interrupt_index > 0) {
444
                            int pil = env->interrupt_index & 0xf;
445
                            int type = env->interrupt_index & 0xf0;
446

    
447
                            if (((type == TT_EXTINT) &&
448
                                  cpu_pil_allowed(env, pil)) ||
449
                                  type != TT_EXTINT) {
450
                                env->exception_index = env->interrupt_index;
451
                                do_interrupt(env);
452
                                next_tb = 0;
453
                            }
454
                        }
455
                    }
456
#elif defined(TARGET_ARM)
457
                    if (interrupt_request & CPU_INTERRUPT_FIQ
458
                        && !(env->uncached_cpsr & CPSR_F)) {
459
                        env->exception_index = EXCP_FIQ;
460
                        do_interrupt(env);
461
                        next_tb = 0;
462
                    }
463
                    /* ARMv7-M interrupt return works by loading a magic value
464
                       into the PC.  On real hardware the load causes the
465
                       return to occur.  The qemu implementation performs the
466
                       jump normally, then does the exception return when the
467
                       CPU tries to execute code at the magic address.
468
                       This will cause the magic PC value to be pushed to
469
                       the stack if an interrupt occurred at the wrong time.
470
                       We avoid this by disabling interrupts when
471
                       pc contains a magic address.  */
472
                    if (interrupt_request & CPU_INTERRUPT_HARD
473
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
474
                            || !(env->uncached_cpsr & CPSR_I))) {
475
                        env->exception_index = EXCP_IRQ;
476
                        do_interrupt(env);
477
                        next_tb = 0;
478
                    }
479
#elif defined(TARGET_UNICORE32)
480
                    if (interrupt_request & CPU_INTERRUPT_HARD
481
                        && !(env->uncached_asr & ASR_I)) {
482
                        do_interrupt(env);
483
                        next_tb = 0;
484
                    }
485
#elif defined(TARGET_SH4)
486
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
487
                        do_interrupt(env);
488
                        next_tb = 0;
489
                    }
490
#elif defined(TARGET_ALPHA)
491
                    {
492
                        int idx = -1;
493
                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
494
                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
495
                        case 0 ... 3:
496
                            if (interrupt_request & CPU_INTERRUPT_HARD) {
497
                                idx = EXCP_DEV_INTERRUPT;
498
                            }
499
                            /* FALLTHRU */
500
                        case 4:
501
                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
502
                                idx = EXCP_CLK_INTERRUPT;
503
                            }
504
                            /* FALLTHRU */
505
                        case 5:
506
                            if (interrupt_request & CPU_INTERRUPT_SMP) {
507
                                idx = EXCP_SMP_INTERRUPT;
508
                            }
509
                            /* FALLTHRU */
510
                        case 6:
511
                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
512
                                idx = EXCP_MCHK;
513
                            }
514
                        }
515
                        if (idx >= 0) {
516
                            env->exception_index = idx;
517
                            env->error_code = 0;
518
                            do_interrupt(env);
519
                            next_tb = 0;
520
                        }
521
                    }
522
#elif defined(TARGET_CRIS)
523
                    if (interrupt_request & CPU_INTERRUPT_HARD
524
                        && (env->pregs[PR_CCS] & I_FLAG)
525
                        && !env->locked_irq) {
526
                        env->exception_index = EXCP_IRQ;
527
                        do_interrupt(env);
528
                        next_tb = 0;
529
                    }
530
                    if (interrupt_request & CPU_INTERRUPT_NMI
531
                        && (env->pregs[PR_CCS] & M_FLAG)) {
532
                        env->exception_index = EXCP_NMI;
533
                        do_interrupt(env);
534
                        next_tb = 0;
535
                    }
536
#elif defined(TARGET_M68K)
537
                    if (interrupt_request & CPU_INTERRUPT_HARD
538
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
539
                            < env->pending_level) {
540
                        /* Real hardware gets the interrupt vector via an
541
                           IACK cycle at this point.  Current emulated
542
                           hardware doesn't rely on this, so we
543
                           provide/save the vector when the interrupt is
544
                           first signalled.  */
545
                        env->exception_index = env->pending_vector;
546
                        do_interrupt(1);
547
                        next_tb = 0;
548
                    }
549
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
550
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
551
                        (env->psw.mask & PSW_MASK_EXT)) {
552
                        do_interrupt(env);
553
                        next_tb = 0;
554
                    }
555
#endif
556
                   /* Don't use the cached interrupt_request value,
557
                      do_interrupt may have updated the EXITTB flag. */
558
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
559
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
560
                        /* ensure that no TB jump will be modified as
561
                           the program flow was changed */
562
                        next_tb = 0;
563
                    }
564
                }
565
                if (unlikely(env->exit_request)) {
566
                    env->exit_request = 0;
567
                    env->exception_index = EXCP_INTERRUPT;
568
                    cpu_loop_exit();
569
                }
570
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
571
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
572
                    /* restore flags in standard format */
573
#if defined(TARGET_I386)
574
                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
575
                    log_cpu_state(env, X86_DUMP_CCOP);
576
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
577
#elif defined(TARGET_M68K)
578
                    cpu_m68k_flush_flags(env, env->cc_op);
579
                    env->cc_op = CC_OP_FLAGS;
580
                    env->sr = (env->sr & 0xffe0)
581
                              | env->cc_dest | (env->cc_x << 4);
582
                    log_cpu_state(env, 0);
583
#else
584
                    log_cpu_state(env, 0);
585
#endif
586
                }
587
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
588
                spin_lock(&tb_lock);
589
                tb = tb_find_fast();
590
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
591
                   doing it in tb_find_slow */
592
                if (tb_invalidated_flag) {
593
                    /* as some TB could have been invalidated because
594
                       of memory exceptions while generating the code, we
595
                       must recompute the hash index here */
596
                    next_tb = 0;
597
                    tb_invalidated_flag = 0;
598
                }
599
#ifdef CONFIG_DEBUG_EXEC
600
                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
601
                             (long)tb->tc_ptr, tb->pc,
602
                             lookup_symbol(tb->pc));
603
#endif
604
                /* see if we can patch the calling TB. When the TB
605
                   spans two pages, we cannot safely do a direct
606
                   jump. */
607
                if (next_tb != 0 && tb->page_addr[1] == -1) {
608
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
609
                }
610
                spin_unlock(&tb_lock);
611

    
612
                /* cpu_interrupt might be called while translating the
613
                   TB, but before it is linked into a potentially
614
                   infinite loop and becomes env->current_tb. Avoid
615
                   starting execution if there is a pending interrupt. */
616
                env->current_tb = tb;
617
                barrier();
618
                if (likely(!env->exit_request)) {
619
                    tc_ptr = tb->tc_ptr;
620
                /* execute the generated code */
621
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
622
#undef env
623
                    env = cpu_single_env;
624
#define env cpu_single_env
625
#endif
626
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
627
                    if ((next_tb & 3) == 2) {
628
                        /* Instruction counter expired.  */
629
                        int insns_left;
630
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
631
                        /* Restore PC.  */
632
                        cpu_pc_from_tb(env, tb);
633
                        insns_left = env->icount_decr.u32;
634
                        if (env->icount_extra && insns_left >= 0) {
635
                            /* Refill decrementer and continue execution.  */
636
                            env->icount_extra += insns_left;
637
                            if (env->icount_extra > 0xffff) {
638
                                insns_left = 0xffff;
639
                            } else {
640
                                insns_left = env->icount_extra;
641
                            }
642
                            env->icount_extra -= insns_left;
643
                            env->icount_decr.u16.low = insns_left;
644
                        } else {
645
                            if (insns_left > 0) {
646
                                /* Execute remaining instructions.  */
647
                                cpu_exec_nocache(insns_left, tb);
648
                            }
649
                            env->exception_index = EXCP_INTERRUPT;
650
                            next_tb = 0;
651
                            cpu_loop_exit();
652
                        }
653
                    }
654
                }
655
                env->current_tb = NULL;
656
                /* reset soft MMU for next block (it can currently
657
                   only be set by a memory fault) */
658
            } /* for(;;) */
659
        }
660
    } /* for(;;) */
661

    
662

    
663
#if defined(TARGET_I386)
664
    /* restore flags in standard format */
665
    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
666
#elif defined(TARGET_ARM)
667
    /* XXX: Save/restore host fpu exception state?.  */
668
#elif defined(TARGET_UNICORE32)
669
#elif defined(TARGET_SPARC)
670
#elif defined(TARGET_PPC)
671
#elif defined(TARGET_LM32)
672
#elif defined(TARGET_M68K)
673
    cpu_m68k_flush_flags(env, env->cc_op);
674
    env->cc_op = CC_OP_FLAGS;
675
    env->sr = (env->sr & 0xffe0)
676
              | env->cc_dest | (env->cc_x << 4);
677
#elif defined(TARGET_MICROBLAZE)
678
#elif defined(TARGET_MIPS)
679
#elif defined(TARGET_SH4)
680
#elif defined(TARGET_ALPHA)
681
#elif defined(TARGET_CRIS)
682
#elif defined(TARGET_S390X)
683
    /* XXXXX */
684
#else
685
#error unsupported target CPU
686
#endif
687

    
688
    /* restore global registers */
689
    barrier();
690
    env = (void *) saved_env_reg;
691

    
692
    /* fail safe : never use cpu_single_env outside cpu_exec() */
693
    cpu_single_env = NULL;
694
    return ret;
695
}