Statistics
| Branch: | Revision:

root / cpu-exec.c @ 5500316d

History | View | Annotate | Download (25.6 kB)

1
/*
2
 *  emulator main execution loop
3
 *
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#include "cpu.h"
21
#include "disas.h"
22
#include "tcg.h"
23
#include "qemu-barrier.h"
24
#include "qtest.h"
25

    
26
int tb_invalidated_flag;
27

    
28
//#define CONFIG_DEBUG_EXEC
29

    
30
bool qemu_cpu_has_work(CPUArchState *env)
31
{
32
    return cpu_has_work(env);
33
}
34

    
35
void cpu_loop_exit(CPUArchState *env)
36
{
37
    env->current_tb = NULL;
38
    longjmp(env->jmp_env, 1);
39
}
40

    
41
/* exit the current TB from a signal handler. The host registers are
42
   restored in a state compatible with the CPU emulator
43
 */
44
#if defined(CONFIG_SOFTMMU)
45
void cpu_resume_from_signal(CPUArchState *env, void *puc)
46
{
47
    /* XXX: restore cpu registers saved in host registers */
48

    
49
    env->exception_index = -1;
50
    longjmp(env->jmp_env, 1);
51
}
52
#endif
53

    
54
/* Execute the code without caching the generated code. An interpreter
55
   could be used if available. */
56
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57
                             TranslationBlock *orig_tb)
58
{
59
    tcg_target_ulong next_tb;
60
    TranslationBlock *tb;
61

    
62
    /* Should never happen.
63
       We only end up here when an existing TB is too long.  */
64
    if (max_cycles > CF_COUNT_MASK)
65
        max_cycles = CF_COUNT_MASK;
66

    
67
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68
                     max_cycles);
69
    env->current_tb = tb;
70
    /* execute the generated code */
71
    next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72
    env->current_tb = NULL;
73

    
74
    if ((next_tb & 3) == 2) {
75
        /* Restore PC.  This may happen if async event occurs before
76
           the TB starts executing.  */
77
        cpu_pc_from_tb(env, tb);
78
    }
79
    tb_phys_invalidate(tb, -1);
80
    tb_free(tb);
81
}
82

    
83
static TranslationBlock *tb_find_slow(CPUArchState *env,
84
                                      target_ulong pc,
85
                                      target_ulong cs_base,
86
                                      uint64_t flags)
87
{
88
    TranslationBlock *tb, **ptb1;
89
    unsigned int h;
90
    tb_page_addr_t phys_pc, phys_page1;
91
    target_ulong virt_page2;
92

    
93
    tb_invalidated_flag = 0;
94

    
95
    /* find translated block using physical mappings */
96
    phys_pc = get_page_addr_code(env, pc);
97
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
98
    h = tb_phys_hash_func(phys_pc);
99
    ptb1 = &tb_phys_hash[h];
100
    for(;;) {
101
        tb = *ptb1;
102
        if (!tb)
103
            goto not_found;
104
        if (tb->pc == pc &&
105
            tb->page_addr[0] == phys_page1 &&
106
            tb->cs_base == cs_base &&
107
            tb->flags == flags) {
108
            /* check next page if needed */
109
            if (tb->page_addr[1] != -1) {
110
                tb_page_addr_t phys_page2;
111

    
112
                virt_page2 = (pc & TARGET_PAGE_MASK) +
113
                    TARGET_PAGE_SIZE;
114
                phys_page2 = get_page_addr_code(env, virt_page2);
115
                if (tb->page_addr[1] == phys_page2)
116
                    goto found;
117
            } else {
118
                goto found;
119
            }
120
        }
121
        ptb1 = &tb->phys_hash_next;
122
    }
123
 not_found:
124
   /* if no translated code available, then translate it now */
125
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
126

    
127
 found:
128
    /* Move the last found TB to the head of the list */
129
    if (likely(*ptb1)) {
130
        *ptb1 = tb->phys_hash_next;
131
        tb->phys_hash_next = tb_phys_hash[h];
132
        tb_phys_hash[h] = tb;
133
    }
134
    /* we add the TB in the virtual pc hash table */
135
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136
    return tb;
137
}
138

    
139
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140
{
141
    TranslationBlock *tb;
142
    target_ulong cs_base, pc;
143
    int flags;
144

    
145
    /* we record a subset of the CPU state. It will
146
       always be the same before a given translated block
147
       is executed. */
148
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151
                 tb->flags != flags)) {
152
        tb = tb_find_slow(env, pc, cs_base, flags);
153
    }
154
    return tb;
155
}
156

    
157
static CPUDebugExcpHandler *debug_excp_handler;
158

    
159
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160
{
161
    CPUDebugExcpHandler *old_handler = debug_excp_handler;
162

    
163
    debug_excp_handler = handler;
164
    return old_handler;
165
}
166

    
167
static void cpu_handle_debug_exception(CPUArchState *env)
168
{
169
    CPUWatchpoint *wp;
170

    
171
    if (!env->watchpoint_hit) {
172
        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
173
            wp->flags &= ~BP_WATCHPOINT_HIT;
174
        }
175
    }
176
    if (debug_excp_handler) {
177
        debug_excp_handler(env);
178
    }
179
}
180

    
181
/* main execution loop */
182

    
183
volatile sig_atomic_t exit_request;
184

    
185
int cpu_exec(CPUArchState *env)
186
{
187
#ifdef TARGET_PPC
188
    CPUState *cpu = ENV_GET_CPU(env);
189
#endif
190
    int ret, interrupt_request;
191
    TranslationBlock *tb;
192
    uint8_t *tc_ptr;
193
    tcg_target_ulong next_tb;
194

    
195
    if (env->halted) {
196
        if (!cpu_has_work(env)) {
197
            return EXCP_HALTED;
198
        }
199

    
200
        env->halted = 0;
201
    }
202

    
203
    cpu_single_env = env;
204

    
205
    if (unlikely(exit_request)) {
206
        env->exit_request = 1;
207
    }
208

    
209
#if defined(TARGET_I386)
210
    /* put eflags in CPU temporary format */
211
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212
    DF = 1 - (2 * ((env->eflags >> 10) & 1));
213
    CC_OP = CC_OP_EFLAGS;
214
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
215
#elif defined(TARGET_SPARC)
216
#elif defined(TARGET_M68K)
217
    env->cc_op = CC_OP_FLAGS;
218
    env->cc_dest = env->sr & 0xf;
219
    env->cc_x = (env->sr >> 4) & 1;
220
#elif defined(TARGET_ALPHA)
221
#elif defined(TARGET_ARM)
222
#elif defined(TARGET_UNICORE32)
223
#elif defined(TARGET_PPC)
224
    env->reserve_addr = -1;
225
#elif defined(TARGET_LM32)
226
#elif defined(TARGET_MICROBLAZE)
227
#elif defined(TARGET_MIPS)
228
#elif defined(TARGET_SH4)
229
#elif defined(TARGET_CRIS)
230
#elif defined(TARGET_S390X)
231
#elif defined(TARGET_XTENSA)
232
    /* XXXXX */
233
#else
234
#error unsupported target CPU
235
#endif
236
    env->exception_index = -1;
237

    
238
    /* prepare setjmp context for exception handling */
239
    for(;;) {
240
        if (setjmp(env->jmp_env) == 0) {
241
            /* if an exception is pending, we execute it here */
242
            if (env->exception_index >= 0) {
243
                if (env->exception_index >= EXCP_INTERRUPT) {
244
                    /* exit request from the cpu execution loop */
245
                    ret = env->exception_index;
246
                    if (ret == EXCP_DEBUG) {
247
                        cpu_handle_debug_exception(env);
248
                    }
249
                    break;
250
                } else {
251
#if defined(CONFIG_USER_ONLY)
252
                    /* if user mode only, we simulate a fake exception
253
                       which will be handled outside the cpu execution
254
                       loop */
255
#if defined(TARGET_I386)
256
                    do_interrupt(env);
257
#endif
258
                    ret = env->exception_index;
259
                    break;
260
#else
261
                    do_interrupt(env);
262
                    env->exception_index = -1;
263
#endif
264
                }
265
            }
266

    
267
            next_tb = 0; /* force lookup of first TB */
268
            for(;;) {
269
                interrupt_request = env->interrupt_request;
270
                if (unlikely(interrupt_request)) {
271
                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
272
                        /* Mask out external interrupts for this step. */
273
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
274
                    }
275
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
276
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
277
                        env->exception_index = EXCP_DEBUG;
278
                        cpu_loop_exit(env);
279
                    }
280
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
281
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
282
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
283
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
284
                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
285
                        env->halted = 1;
286
                        env->exception_index = EXCP_HLT;
287
                        cpu_loop_exit(env);
288
                    }
289
#endif
290
#if defined(TARGET_I386)
291
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
292
                            svm_check_intercept(env, SVM_EXIT_INIT);
293
                            do_cpu_init(x86_env_get_cpu(env));
294
                            env->exception_index = EXCP_HALTED;
295
                            cpu_loop_exit(env);
296
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
297
                            do_cpu_sipi(x86_env_get_cpu(env));
298
                    } else if (env->hflags2 & HF2_GIF_MASK) {
299
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
300
                            !(env->hflags & HF_SMM_MASK)) {
301
                            svm_check_intercept(env, SVM_EXIT_SMI);
302
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
303
                            do_smm_enter(env);
304
                            next_tb = 0;
305
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
306
                                   !(env->hflags2 & HF2_NMI_MASK)) {
307
                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
308
                            env->hflags2 |= HF2_NMI_MASK;
309
                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
310
                            next_tb = 0;
311
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
312
                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
313
                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
314
                            next_tb = 0;
315
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
316
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
317
                                     (env->hflags2 & HF2_HIF_MASK)) ||
318
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
319
                                     (env->eflags & IF_MASK && 
320
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
321
                            int intno;
322
                            svm_check_intercept(env, SVM_EXIT_INTR);
323
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
324
                            intno = cpu_get_pic_interrupt(env);
325
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
326
                            do_interrupt_x86_hardirq(env, intno, 1);
327
                            /* ensure that no TB jump will be modified as
328
                               the program flow was changed */
329
                            next_tb = 0;
330
#if !defined(CONFIG_USER_ONLY)
331
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
332
                                   (env->eflags & IF_MASK) && 
333
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
334
                            int intno;
335
                            /* FIXME: this should respect TPR */
336
                            svm_check_intercept(env, SVM_EXIT_VINTR);
337
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
338
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
339
                            do_interrupt_x86_hardirq(env, intno, 1);
340
                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
341
                            next_tb = 0;
342
#endif
343
                        }
344
                    }
345
#elif defined(TARGET_PPC)
346
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
347
                        cpu_reset(cpu);
348
                    }
349
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
350
                        ppc_hw_interrupt(env);
351
                        if (env->pending_interrupts == 0)
352
                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
353
                        next_tb = 0;
354
                    }
355
#elif defined(TARGET_LM32)
356
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
357
                        && (env->ie & IE_IE)) {
358
                        env->exception_index = EXCP_IRQ;
359
                        do_interrupt(env);
360
                        next_tb = 0;
361
                    }
362
#elif defined(TARGET_MICROBLAZE)
363
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
364
                        && (env->sregs[SR_MSR] & MSR_IE)
365
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
366
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
367
                        env->exception_index = EXCP_IRQ;
368
                        do_interrupt(env);
369
                        next_tb = 0;
370
                    }
371
#elif defined(TARGET_MIPS)
372
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
373
                        cpu_mips_hw_interrupts_pending(env)) {
374
                        /* Raise it */
375
                        env->exception_index = EXCP_EXT_INTERRUPT;
376
                        env->error_code = 0;
377
                        do_interrupt(env);
378
                        next_tb = 0;
379
                    }
380
#elif defined(TARGET_SPARC)
381
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
382
                        if (cpu_interrupts_enabled(env) &&
383
                            env->interrupt_index > 0) {
384
                            int pil = env->interrupt_index & 0xf;
385
                            int type = env->interrupt_index & 0xf0;
386

    
387
                            if (((type == TT_EXTINT) &&
388
                                  cpu_pil_allowed(env, pil)) ||
389
                                  type != TT_EXTINT) {
390
                                env->exception_index = env->interrupt_index;
391
                                do_interrupt(env);
392
                                next_tb = 0;
393
                            }
394
                        }
395
                    }
396
#elif defined(TARGET_ARM)
397
                    if (interrupt_request & CPU_INTERRUPT_FIQ
398
                        && !(env->uncached_cpsr & CPSR_F)) {
399
                        env->exception_index = EXCP_FIQ;
400
                        do_interrupt(env);
401
                        next_tb = 0;
402
                    }
403
                    /* ARMv7-M interrupt return works by loading a magic value
404
                       into the PC.  On real hardware the load causes the
405
                       return to occur.  The qemu implementation performs the
406
                       jump normally, then does the exception return when the
407
                       CPU tries to execute code at the magic address.
408
                       This will cause the magic PC value to be pushed to
409
                       the stack if an interrupt occurred at the wrong time.
410
                       We avoid this by disabling interrupts when
411
                       pc contains a magic address.  */
412
                    if (interrupt_request & CPU_INTERRUPT_HARD
413
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
414
                            || !(env->uncached_cpsr & CPSR_I))) {
415
                        env->exception_index = EXCP_IRQ;
416
                        do_interrupt(env);
417
                        next_tb = 0;
418
                    }
419
#elif defined(TARGET_UNICORE32)
420
                    if (interrupt_request & CPU_INTERRUPT_HARD
421
                        && !(env->uncached_asr & ASR_I)) {
422
                        do_interrupt(env);
423
                        next_tb = 0;
424
                    }
425
#elif defined(TARGET_SH4)
426
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
427
                        do_interrupt(env);
428
                        next_tb = 0;
429
                    }
430
#elif defined(TARGET_ALPHA)
431
                    {
432
                        int idx = -1;
433
                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
434
                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
435
                        case 0 ... 3:
436
                            if (interrupt_request & CPU_INTERRUPT_HARD) {
437
                                idx = EXCP_DEV_INTERRUPT;
438
                            }
439
                            /* FALLTHRU */
440
                        case 4:
441
                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
442
                                idx = EXCP_CLK_INTERRUPT;
443
                            }
444
                            /* FALLTHRU */
445
                        case 5:
446
                            if (interrupt_request & CPU_INTERRUPT_SMP) {
447
                                idx = EXCP_SMP_INTERRUPT;
448
                            }
449
                            /* FALLTHRU */
450
                        case 6:
451
                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
452
                                idx = EXCP_MCHK;
453
                            }
454
                        }
455
                        if (idx >= 0) {
456
                            env->exception_index = idx;
457
                            env->error_code = 0;
458
                            do_interrupt(env);
459
                            next_tb = 0;
460
                        }
461
                    }
462
#elif defined(TARGET_CRIS)
463
                    if (interrupt_request & CPU_INTERRUPT_HARD
464
                        && (env->pregs[PR_CCS] & I_FLAG)
465
                        && !env->locked_irq) {
466
                        env->exception_index = EXCP_IRQ;
467
                        do_interrupt(env);
468
                        next_tb = 0;
469
                    }
470
                    if (interrupt_request & CPU_INTERRUPT_NMI) {
471
                        unsigned int m_flag_archval;
472
                        if (env->pregs[PR_VR] < 32) {
473
                            m_flag_archval = M_FLAG_V10;
474
                        } else {
475
                            m_flag_archval = M_FLAG_V32;
476
                        }
477
                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
478
                            env->exception_index = EXCP_NMI;
479
                            do_interrupt(env);
480
                            next_tb = 0;
481
                        }
482
                    }
483
#elif defined(TARGET_M68K)
484
                    if (interrupt_request & CPU_INTERRUPT_HARD
485
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
486
                            < env->pending_level) {
487
                        /* Real hardware gets the interrupt vector via an
488
                           IACK cycle at this point.  Current emulated
489
                           hardware doesn't rely on this, so we
490
                           provide/save the vector when the interrupt is
491
                           first signalled.  */
492
                        env->exception_index = env->pending_vector;
493
                        do_interrupt_m68k_hardirq(env);
494
                        next_tb = 0;
495
                    }
496
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
497
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
498
                        (env->psw.mask & PSW_MASK_EXT)) {
499
                        do_interrupt(env);
500
                        next_tb = 0;
501
                    }
502
#elif defined(TARGET_XTENSA)
503
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
504
                        env->exception_index = EXC_IRQ;
505
                        do_interrupt(env);
506
                        next_tb = 0;
507
                    }
508
#endif
509
                   /* Don't use the cached interrupt_request value,
510
                      do_interrupt may have updated the EXITTB flag. */
511
                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
512
                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
513
                        /* ensure that no TB jump will be modified as
514
                           the program flow was changed */
515
                        next_tb = 0;
516
                    }
517
                }
518
                if (unlikely(env->exit_request)) {
519
                    env->exit_request = 0;
520
                    env->exception_index = EXCP_INTERRUPT;
521
                    cpu_loop_exit(env);
522
                }
523
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
524
                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
525
                    /* restore flags in standard format */
526
#if defined(TARGET_I386)
527
                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
528
                        | (DF & DF_MASK);
529
                    log_cpu_state(env, X86_DUMP_CCOP);
530
                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
531
#elif defined(TARGET_M68K)
532
                    cpu_m68k_flush_flags(env, env->cc_op);
533
                    env->cc_op = CC_OP_FLAGS;
534
                    env->sr = (env->sr & 0xffe0)
535
                              | env->cc_dest | (env->cc_x << 4);
536
                    log_cpu_state(env, 0);
537
#else
538
                    log_cpu_state(env, 0);
539
#endif
540
                }
541
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
542
                spin_lock(&tb_lock);
543
                tb = tb_find_fast(env);
544
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
545
                   doing it in tb_find_slow */
546
                if (tb_invalidated_flag) {
547
                    /* as some TB could have been invalidated because
548
                       of memory exceptions while generating the code, we
549
                       must recompute the hash index here */
550
                    next_tb = 0;
551
                    tb_invalidated_flag = 0;
552
                }
553
#ifdef CONFIG_DEBUG_EXEC
554
                qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
555
                             tb->tc_ptr, tb->pc,
556
                             lookup_symbol(tb->pc));
557
#endif
558
                /* see if we can patch the calling TB. When the TB
559
                   spans two pages, we cannot safely do a direct
560
                   jump. */
561
                if (next_tb != 0 && tb->page_addr[1] == -1) {
562
                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
563
                }
564
                spin_unlock(&tb_lock);
565

    
566
                /* cpu_interrupt might be called while translating the
567
                   TB, but before it is linked into a potentially
568
                   infinite loop and becomes env->current_tb. Avoid
569
                   starting execution if there is a pending interrupt. */
570
                env->current_tb = tb;
571
                barrier();
572
                if (likely(!env->exit_request)) {
573
                    tc_ptr = tb->tc_ptr;
574
                    /* execute the generated code */
575
                    next_tb = tcg_qemu_tb_exec(env, tc_ptr);
576
                    if ((next_tb & 3) == 2) {
577
                        /* Instruction counter expired.  */
578
                        int insns_left;
579
                        tb = (TranslationBlock *)(next_tb & ~3);
580
                        /* Restore PC.  */
581
                        cpu_pc_from_tb(env, tb);
582
                        insns_left = env->icount_decr.u32;
583
                        if (env->icount_extra && insns_left >= 0) {
584
                            /* Refill decrementer and continue execution.  */
585
                            env->icount_extra += insns_left;
586
                            if (env->icount_extra > 0xffff) {
587
                                insns_left = 0xffff;
588
                            } else {
589
                                insns_left = env->icount_extra;
590
                            }
591
                            env->icount_extra -= insns_left;
592
                            env->icount_decr.u16.low = insns_left;
593
                        } else {
594
                            if (insns_left > 0) {
595
                                /* Execute remaining instructions.  */
596
                                cpu_exec_nocache(env, insns_left, tb);
597
                            }
598
                            env->exception_index = EXCP_INTERRUPT;
599
                            next_tb = 0;
600
                            cpu_loop_exit(env);
601
                        }
602
                    }
603
                }
604
                env->current_tb = NULL;
605
                /* reset soft MMU for next block (it can currently
606
                   only be set by a memory fault) */
607
            } /* for(;;) */
608
        } else {
609
            /* Reload env after longjmp - the compiler may have smashed all
610
             * local variables as longjmp is marked 'noreturn'. */
611
            env = cpu_single_env;
612
        }
613
    } /* for(;;) */
614

    
615

    
616
#if defined(TARGET_I386)
617
    /* restore flags in standard format */
618
    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
619
        | (DF & DF_MASK);
620
#elif defined(TARGET_ARM)
621
    /* XXX: Save/restore host fpu exception state?.  */
622
#elif defined(TARGET_UNICORE32)
623
#elif defined(TARGET_SPARC)
624
#elif defined(TARGET_PPC)
625
#elif defined(TARGET_LM32)
626
#elif defined(TARGET_M68K)
627
    cpu_m68k_flush_flags(env, env->cc_op);
628
    env->cc_op = CC_OP_FLAGS;
629
    env->sr = (env->sr & 0xffe0)
630
              | env->cc_dest | (env->cc_x << 4);
631
#elif defined(TARGET_MICROBLAZE)
632
#elif defined(TARGET_MIPS)
633
#elif defined(TARGET_SH4)
634
#elif defined(TARGET_ALPHA)
635
#elif defined(TARGET_CRIS)
636
#elif defined(TARGET_S390X)
637
#elif defined(TARGET_XTENSA)
638
    /* XXXXX */
639
#else
640
#error unsupported target CPU
641
#endif
642

    
643
    /* fail safe : never use cpu_single_env outside cpu_exec() */
644
    cpu_single_env = NULL;
645
    return ret;
646
}