root / exec-all.h @ a0d700e4
History | View | Annotate | Download (12 kB)
1 |
/*
|
---|---|
2 |
* internal execution defines for qemu
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#ifndef _EXEC_ALL_H_
|
21 |
#define _EXEC_ALL_H_
|
22 |
|
23 |
#include "qemu-common.h" |
24 |
|
25 |
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
26 |
#define DEBUG_DISAS
|
27 |
|
28 |
/* is_jmp field values */
|
29 |
#define DISAS_NEXT 0 /* next instruction can be analyzed */ |
30 |
#define DISAS_JUMP 1 /* only pc was modified dynamically */ |
31 |
#define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
32 |
#define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
33 |
|
34 |
typedef struct TranslationBlock TranslationBlock; |
35 |
|
36 |
/* XXX: make safe guess about sizes */
|
37 |
#define MAX_OP_PER_INSTR 96 |
38 |
/* A Call op needs up to 6 + 2N parameters (N = number of arguments). */
|
39 |
#define MAX_OPC_PARAM 10 |
40 |
#define OPC_BUF_SIZE 640 |
41 |
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
|
42 |
|
43 |
/* Maximum size a TCG op can expand to. This is complicated because a
|
44 |
single op may require several host instructions and regirster reloads.
|
45 |
For now take a wild guess at 128 bytes, which should allow at least
|
46 |
a couple of fixup instructions per argument. */
|
47 |
#define TCG_MAX_OP_SIZE 128 |
48 |
|
49 |
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
|
50 |
|
51 |
extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
|
52 |
extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
|
53 |
extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
|
54 |
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
|
55 |
extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
|
56 |
extern target_ulong gen_opc_jump_pc[2]; |
57 |
extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
|
58 |
|
59 |
#include "qemu-log.h" |
60 |
|
61 |
void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); |
62 |
void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); |
63 |
void gen_pc_load(CPUState *env, struct TranslationBlock *tb, |
64 |
unsigned long searched_pc, int pc_pos, void *puc); |
65 |
|
66 |
unsigned long code_gen_max_block_size(void); |
67 |
void cpu_gen_init(void); |
68 |
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, |
69 |
int *gen_code_size_ptr);
|
70 |
int cpu_restore_state(struct TranslationBlock *tb, |
71 |
CPUState *env, unsigned long searched_pc, |
72 |
void *puc);
|
73 |
int cpu_restore_state_copy(struct TranslationBlock *tb, |
74 |
CPUState *env, unsigned long searched_pc, |
75 |
void *puc);
|
76 |
void cpu_resume_from_signal(CPUState *env1, void *puc); |
77 |
void cpu_io_recompile(CPUState *env, void *retaddr); |
78 |
TranslationBlock *tb_gen_code(CPUState *env, |
79 |
target_ulong pc, target_ulong cs_base, int flags,
|
80 |
int cflags);
|
81 |
void cpu_exec_init(CPUState *env);
|
82 |
void QEMU_NORETURN cpu_loop_exit(void); |
83 |
int page_unprotect(target_ulong address, unsigned long pc, void *puc); |
84 |
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
|
85 |
int is_cpu_write_access);
|
86 |
void tb_invalidate_page_range(target_ulong start, target_ulong end);
|
87 |
void tlb_flush_page(CPUState *env, target_ulong addr);
|
88 |
void tlb_flush(CPUState *env, int flush_global); |
89 |
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
90 |
target_phys_addr_t paddr, int prot,
|
91 |
int mmu_idx, int is_softmmu); |
92 |
static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, |
93 |
target_phys_addr_t paddr, int prot,
|
94 |
int mmu_idx, int is_softmmu) |
95 |
{ |
96 |
if (prot & PAGE_READ)
|
97 |
prot |= PAGE_EXEC; |
98 |
return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
|
99 |
} |
100 |
|
101 |
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
102 |
|
103 |
#define CODE_GEN_PHYS_HASH_BITS 15 |
104 |
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) |
105 |
|
106 |
#define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024) |
107 |
|
108 |
/* estimated block size for TB allocation */
|
109 |
/* XXX: use a per code average code fragment size and modulate it
|
110 |
according to the host CPU */
|
111 |
#if defined(CONFIG_SOFTMMU)
|
112 |
#define CODE_GEN_AVG_BLOCK_SIZE 128 |
113 |
#else
|
114 |
#define CODE_GEN_AVG_BLOCK_SIZE 64 |
115 |
#endif
|
116 |
|
117 |
#if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
|
118 |
#define USE_DIRECT_JUMP
|
119 |
#endif
|
120 |
|
121 |
struct TranslationBlock {
|
122 |
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
|
123 |
target_ulong cs_base; /* CS base for this block */
|
124 |
uint64_t flags; /* flags defining in which context the code was generated */
|
125 |
uint16_t size; /* size of target code for this block (1 <=
|
126 |
size <= TARGET_PAGE_SIZE) */
|
127 |
uint16_t cflags; /* compile flags */
|
128 |
#define CF_COUNT_MASK 0x7fff |
129 |
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ |
130 |
|
131 |
uint8_t *tc_ptr; /* pointer to the translated code */
|
132 |
/* next matching tb for physical address. */
|
133 |
struct TranslationBlock *phys_hash_next;
|
134 |
/* first and second physical page containing code. The lower bit
|
135 |
of the pointer tells the index in page_next[] */
|
136 |
struct TranslationBlock *page_next[2]; |
137 |
target_ulong page_addr[2];
|
138 |
|
139 |
/* the following data are used to directly call another TB from
|
140 |
the code of this one. */
|
141 |
uint16_t tb_next_offset[2]; /* offset of original jump target */ |
142 |
#ifdef USE_DIRECT_JUMP
|
143 |
uint16_t tb_jmp_offset[4]; /* offset of jump instruction */ |
144 |
#else
|
145 |
unsigned long tb_next[2]; /* address of jump generated code */ |
146 |
#endif
|
147 |
/* list of TBs jumping to this one. This is a circular list using
|
148 |
the two least significant bits of the pointers to tell what is
|
149 |
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
|
150 |
jmp_first */
|
151 |
struct TranslationBlock *jmp_next[2]; |
152 |
struct TranslationBlock *jmp_first;
|
153 |
uint32_t icount; |
154 |
}; |
155 |
|
156 |
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
157 |
{ |
158 |
target_ulong tmp; |
159 |
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
160 |
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
|
161 |
} |
162 |
|
163 |
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) |
164 |
{ |
165 |
target_ulong tmp; |
166 |
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
167 |
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
|
168 |
| (tmp & TB_JMP_ADDR_MASK)); |
169 |
} |
170 |
|
171 |
static inline unsigned int tb_phys_hash_func(unsigned long pc) |
172 |
{ |
173 |
return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); |
174 |
} |
175 |
|
176 |
TranslationBlock *tb_alloc(target_ulong pc); |
177 |
void tb_free(TranslationBlock *tb);
|
178 |
void tb_flush(CPUState *env);
|
179 |
void tb_link_phys(TranslationBlock *tb,
|
180 |
target_ulong phys_pc, target_ulong phys_page2); |
181 |
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
|
182 |
|
183 |
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
184 |
extern uint8_t *code_gen_ptr;
|
185 |
extern int code_gen_max_blocks; |
186 |
|
187 |
#if defined(USE_DIRECT_JUMP)
|
188 |
|
189 |
#if defined(_ARCH_PPC)
|
190 |
extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); |
191 |
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
|
192 |
#elif defined(__i386__) || defined(__x86_64__)
|
193 |
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) |
194 |
{ |
195 |
/* patch the branch destination */
|
196 |
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
|
197 |
/* no need to flush icache explicitly */
|
198 |
} |
199 |
#elif defined(__arm__)
|
200 |
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) |
201 |
{ |
202 |
#if QEMU_GNUC_PREREQ(4, 1) |
203 |
void __clear_cache(char *beg, char *end); |
204 |
#else
|
205 |
register unsigned long _beg __asm ("a1"); |
206 |
register unsigned long _end __asm ("a2"); |
207 |
register unsigned long _flg __asm ("a3"); |
208 |
#endif
|
209 |
|
210 |
/* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
|
211 |
*(uint32_t *)jmp_addr = |
212 |
(*(uint32_t *)jmp_addr & ~0xffffff)
|
213 |
| (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); |
214 |
|
215 |
#if QEMU_GNUC_PREREQ(4, 1) |
216 |
__clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); |
217 |
#else
|
218 |
/* flush icache */
|
219 |
_beg = jmp_addr; |
220 |
_end = jmp_addr + 4;
|
221 |
_flg = 0;
|
222 |
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); |
223 |
#endif
|
224 |
} |
225 |
#endif
|
226 |
|
227 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
228 |
int n, unsigned long addr) |
229 |
{ |
230 |
unsigned long offset; |
231 |
|
232 |
offset = tb->tb_jmp_offset[n]; |
233 |
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); |
234 |
offset = tb->tb_jmp_offset[n + 2];
|
235 |
if (offset != 0xffff) |
236 |
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); |
237 |
} |
238 |
|
239 |
#else
|
240 |
|
241 |
/* set the jump target */
|
242 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
243 |
int n, unsigned long addr) |
244 |
{ |
245 |
tb->tb_next[n] = addr; |
246 |
} |
247 |
|
248 |
#endif
|
249 |
|
250 |
static inline void tb_add_jump(TranslationBlock *tb, int n, |
251 |
TranslationBlock *tb_next) |
252 |
{ |
253 |
/* NOTE: this test is only needed for thread safety */
|
254 |
if (!tb->jmp_next[n]) {
|
255 |
/* patch the native jump address */
|
256 |
tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); |
257 |
|
258 |
/* add in TB jmp circular list */
|
259 |
tb->jmp_next[n] = tb_next->jmp_first; |
260 |
tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
|
261 |
} |
262 |
} |
263 |
|
264 |
TranslationBlock *tb_find_pc(unsigned long pc_ptr); |
265 |
|
266 |
extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
267 |
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
268 |
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
269 |
|
270 |
#include "qemu-lock.h" |
271 |
|
272 |
extern spinlock_t tb_lock;
|
273 |
|
274 |
extern int tb_invalidated_flag; |
275 |
|
276 |
#if !defined(CONFIG_USER_ONLY)
|
277 |
|
278 |
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, |
279 |
void *retaddr);
|
280 |
|
281 |
#include "softmmu_defs.h" |
282 |
|
283 |
#define ACCESS_TYPE (NB_MMU_MODES + 1) |
284 |
#define MEMSUFFIX _code
|
285 |
#define env cpu_single_env
|
286 |
|
287 |
#define DATA_SIZE 1 |
288 |
#include "softmmu_header.h" |
289 |
|
290 |
#define DATA_SIZE 2 |
291 |
#include "softmmu_header.h" |
292 |
|
293 |
#define DATA_SIZE 4 |
294 |
#include "softmmu_header.h" |
295 |
|
296 |
#define DATA_SIZE 8 |
297 |
#include "softmmu_header.h" |
298 |
|
299 |
#undef ACCESS_TYPE
|
300 |
#undef MEMSUFFIX
|
301 |
#undef env
|
302 |
|
303 |
#endif
|
304 |
|
305 |
#if defined(CONFIG_USER_ONLY)
|
306 |
static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) |
307 |
{ |
308 |
return addr;
|
309 |
} |
310 |
#else
|
311 |
/* NOTE: this function can trigger an exception */
|
312 |
/* NOTE2: the returned address is not exactly the physical address: it
|
313 |
is the offset relative to phys_ram_base */
|
314 |
static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) |
315 |
{ |
316 |
int mmu_idx, page_index, pd;
|
317 |
void *p;
|
318 |
|
319 |
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
320 |
mmu_idx = cpu_mmu_index(env1); |
321 |
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
|
322 |
(addr & TARGET_PAGE_MASK))) { |
323 |
ldub_code(addr); |
324 |
} |
325 |
pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK; |
326 |
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
|
327 |
#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
|
328 |
do_unassigned_access(addr, 0, 1, 0, 4); |
329 |
#else
|
330 |
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); |
331 |
#endif
|
332 |
} |
333 |
p = (void *)(unsigned long)addr |
334 |
+ env1->tlb_table[mmu_idx][page_index].addend; |
335 |
return qemu_ram_addr_from_host(p);
|
336 |
} |
337 |
|
338 |
/* Deterministic execution requires that IO only be performed on the last
|
339 |
instruction of a TB so that interrupts take effect immediately. */
|
340 |
static inline int can_do_io(CPUState *env) |
341 |
{ |
342 |
if (!use_icount)
|
343 |
return 1; |
344 |
|
345 |
/* If not executing code then assume we are ok. */
|
346 |
if (!env->current_tb)
|
347 |
return 1; |
348 |
|
349 |
return env->can_do_io != 0; |
350 |
} |
351 |
#endif
|
352 |
|
353 |
typedef void (CPUDebugExcpHandler)(CPUState *env); |
354 |
|
355 |
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); |
356 |
|
357 |
/* vl.c */
|
358 |
extern int singlestep; |
359 |
|
360 |
#endif
|