Revision 2e70f6ef cpu-exec.c
b/cpu-exec.c | ||
---|---|---|
82 | 82 |
longjmp(env->jmp_env, 1); |
83 | 83 |
} |
84 | 84 |
|
85 |
/* Execute the code without caching the generated code. An interpreter |
|
86 |
could be used if available. */ |
|
87 |
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb) |
|
88 |
{ |
|
89 |
unsigned long next_tb; |
|
90 |
TranslationBlock *tb; |
|
91 |
|
|
92 |
/* Should never happen. |
|
93 |
We only end up here when an existing TB is too long. */ |
|
94 |
if (max_cycles > CF_COUNT_MASK) |
|
95 |
max_cycles = CF_COUNT_MASK; |
|
96 |
|
|
97 |
tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, |
|
98 |
max_cycles); |
|
99 |
env->current_tb = tb; |
|
100 |
/* execute the generated code */ |
|
101 |
next_tb = tcg_qemu_tb_exec(tb->tc_ptr); |
|
102 |
|
|
103 |
if ((next_tb & 3) == 2) { |
|
104 |
/* Restore PC. This may happen if async event occurs before |
|
105 |
the TB starts executing. */ |
|
106 |
CPU_PC_FROM_TB(env, tb); |
|
107 |
} |
|
108 |
tb_phys_invalidate(tb, -1); |
|
109 |
tb_free(tb); |
|
110 |
} |
|
111 |
|
|
85 | 112 |
static TranslationBlock *tb_find_slow(target_ulong pc, |
86 | 113 |
target_ulong cs_base, |
87 | 114 |
uint64_t flags) |
88 | 115 |
{ |
89 | 116 |
TranslationBlock *tb, **ptb1; |
90 |
int code_gen_size; |
|
91 | 117 |
unsigned int h; |
92 | 118 |
target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
93 |
uint8_t *tc_ptr; |
|
94 | 119 |
|
95 | 120 |
tb_invalidated_flag = 0; |
96 | 121 |
|
... | ... | |
124 | 149 |
ptb1 = &tb->phys_hash_next; |
125 | 150 |
} |
126 | 151 |
not_found: |
127 |
/* if no translated code available, then translate it now */ |
|
128 |
tb = tb_alloc(pc); |
|
129 |
if (!tb) { |
|
130 |
/* flush must be done */ |
|
131 |
tb_flush(env); |
|
132 |
/* cannot fail at this point */ |
|
133 |
tb = tb_alloc(pc); |
|
134 |
/* don't forget to invalidate previous TB info */ |
|
135 |
tb_invalidated_flag = 1; |
|
136 |
} |
|
137 |
tc_ptr = code_gen_ptr; |
|
138 |
tb->tc_ptr = tc_ptr; |
|
139 |
tb->cs_base = cs_base; |
|
140 |
tb->flags = flags; |
|
141 |
cpu_gen_code(env, tb, &code_gen_size); |
|
142 |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
|
143 |
|
|
144 |
/* check next page if needed */ |
|
145 |
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
|
146 |
phys_page2 = -1; |
|
147 |
if ((pc & TARGET_PAGE_MASK) != virt_page2) { |
|
148 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
|
149 |
} |
|
150 |
tb_link_phys(tb, phys_pc, phys_page2); |
|
152 |
/* if no translated code available, then translate it now */ |
|
153 |
tb = tb_gen_code(env, pc, cs_base, flags, 0); |
|
151 | 154 |
|
152 | 155 |
found: |
153 | 156 |
/* we add the TB in the virtual pc hash table */ |
... | ... | |
583 | 586 |
of memory exceptions while generating the code, we |
584 | 587 |
must recompute the hash index here */ |
585 | 588 |
next_tb = 0; |
589 |
tb_invalidated_flag = 0; |
|
586 | 590 |
} |
587 | 591 |
#ifdef DEBUG_EXEC |
588 | 592 |
if ((loglevel & CPU_LOG_EXEC)) { |
... | ... | |
604 | 608 |
} |
605 | 609 |
} |
606 | 610 |
spin_unlock(&tb_lock); |
607 |
tc_ptr = tb->tc_ptr; |
|
608 | 611 |
env->current_tb = tb; |
612 |
while (env->current_tb) { |
|
613 |
tc_ptr = tb->tc_ptr; |
|
609 | 614 |
/* execute the generated code */ |
610 | 615 |
#if defined(__sparc__) && !defined(HOST_SOLARIS) |
611 | 616 |
#undef env |
612 |
env = cpu_single_env; |
|
617 |
env = cpu_single_env;
|
|
613 | 618 |
#define env cpu_single_env |
614 | 619 |
#endif |
615 |
next_tb = tcg_qemu_tb_exec(tc_ptr); |
|
616 |
env->current_tb = NULL; |
|
620 |
next_tb = tcg_qemu_tb_exec(tc_ptr); |
|
621 |
env->current_tb = NULL; |
|
622 |
if ((next_tb & 3) == 2) { |
|
623 |
/* Instruction counter exired. */ |
|
624 |
int insns_left; |
|
625 |
tb = (TranslationBlock *)(long)(next_tb & ~3); |
|
626 |
/* Restore PC. */ |
|
627 |
CPU_PC_FROM_TB(env, tb); |
|
628 |
insns_left = env->icount_decr.u32; |
|
629 |
if (env->icount_extra && insns_left >= 0) { |
|
630 |
/* Refill decrementer and continue execution. */ |
|
631 |
env->icount_extra += insns_left; |
|
632 |
if (env->icount_extra > 0xffff) { |
|
633 |
insns_left = 0xffff; |
|
634 |
} else { |
|
635 |
insns_left = env->icount_extra; |
|
636 |
} |
|
637 |
env->icount_extra -= insns_left; |
|
638 |
env->icount_decr.u16.low = insns_left; |
|
639 |
} else { |
|
640 |
if (insns_left > 0) { |
|
641 |
/* Execute remaining instructions. */ |
|
642 |
cpu_exec_nocache(insns_left, tb); |
|
643 |
} |
|
644 |
env->exception_index = EXCP_INTERRUPT; |
|
645 |
next_tb = 0; |
|
646 |
cpu_loop_exit(); |
|
647 |
} |
|
648 |
} |
|
649 |
} |
|
617 | 650 |
/* reset soft MMU for next block (it can currently |
618 | 651 |
only be set by a memory fault) */ |
619 | 652 |
#if defined(USE_KQEMU) |
Also available in: Unified diff