root / include / exec / exec-all.h @ 1de7afc9
History | View | Annotate | Download (13.7 kB)
1 |
/*
|
---|---|
2 |
* internal execution defines for qemu
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#ifndef _EXEC_ALL_H_
|
21 |
#define _EXEC_ALL_H_
|
22 |
|
23 |
#include "qemu-common.h" |
24 |
|
25 |
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
26 |
#define DEBUG_DISAS
|
27 |
|
28 |
/* Page tracking code uses ram addresses in system mode, and virtual
|
29 |
addresses in userspace mode. Define tb_page_addr_t to be an appropriate
|
30 |
type. */
|
31 |
#if defined(CONFIG_USER_ONLY)
|
32 |
typedef abi_ulong tb_page_addr_t;
|
33 |
#else
|
34 |
typedef ram_addr_t tb_page_addr_t;
|
35 |
#endif
|
36 |
|
37 |
/* is_jmp field values */
|
38 |
#define DISAS_NEXT 0 /* next instruction can be analyzed */ |
39 |
#define DISAS_JUMP 1 /* only pc was modified dynamically */ |
40 |
#define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
41 |
#define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
42 |
|
43 |
struct TranslationBlock;
|
44 |
typedef struct TranslationBlock TranslationBlock; |
45 |
|
46 |
/* XXX: make safe guess about sizes */
|
47 |
#define MAX_OP_PER_INSTR 208 |
48 |
|
49 |
#if HOST_LONG_BITS == 32 |
50 |
#define MAX_OPC_PARAM_PER_ARG 2 |
51 |
#else
|
52 |
#define MAX_OPC_PARAM_PER_ARG 1 |
53 |
#endif
|
54 |
#define MAX_OPC_PARAM_IARGS 5 |
55 |
#define MAX_OPC_PARAM_OARGS 1 |
56 |
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
|
57 |
|
58 |
/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
|
59 |
* and up to 4 + N parameters on 64-bit archs
|
60 |
* (N = number of input arguments + output arguments). */
|
61 |
#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) |
62 |
#define OPC_BUF_SIZE 640 |
63 |
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
|
64 |
|
65 |
/* Maximum size a TCG op can expand to. This is complicated because a
|
66 |
single op may require several host instructions and register reloads.
|
67 |
For now take a wild guess at 192 bytes, which should allow at least
|
68 |
a couple of fixup instructions per argument. */
|
69 |
#define TCG_MAX_OP_SIZE 192 |
70 |
|
71 |
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
|
72 |
|
73 |
#include "qemu/log.h" |
74 |
|
75 |
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); |
76 |
void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); |
77 |
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
78 |
int pc_pos);
|
79 |
|
80 |
void cpu_gen_init(void); |
81 |
int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, |
82 |
int *gen_code_size_ptr);
|
83 |
bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
|
84 |
|
85 |
void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc); |
86 |
void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
|
87 |
TranslationBlock *tb_gen_code(CPUArchState *env, |
88 |
target_ulong pc, target_ulong cs_base, int flags,
|
89 |
int cflags);
|
90 |
void cpu_exec_init(CPUArchState *env);
|
91 |
void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
|
92 |
int page_unprotect(target_ulong address, uintptr_t pc, void *puc); |
93 |
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
94 |
int is_cpu_write_access);
|
95 |
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
|
96 |
int is_cpu_write_access);
|
97 |
#if !defined(CONFIG_USER_ONLY)
|
98 |
/* cputlb.c */
|
99 |
void tlb_flush_page(CPUArchState *env, target_ulong addr);
|
100 |
void tlb_flush(CPUArchState *env, int flush_global); |
101 |
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
|
102 |
hwaddr paddr, int prot,
|
103 |
int mmu_idx, target_ulong size);
|
104 |
void tb_invalidate_phys_addr(hwaddr addr);
|
105 |
#else
|
106 |
static inline void tlb_flush_page(CPUArchState *env, target_ulong addr) |
107 |
{ |
108 |
} |
109 |
|
110 |
static inline void tlb_flush(CPUArchState *env, int flush_global) |
111 |
{ |
112 |
} |
113 |
#endif
|
114 |
|
115 |
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
116 |
|
117 |
#define CODE_GEN_PHYS_HASH_BITS 15 |
118 |
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) |
119 |
|
120 |
/* estimated block size for TB allocation */
|
121 |
/* XXX: use a per code average code fragment size and modulate it
|
122 |
according to the host CPU */
|
123 |
#if defined(CONFIG_SOFTMMU)
|
124 |
#define CODE_GEN_AVG_BLOCK_SIZE 128 |
125 |
#else
|
126 |
#define CODE_GEN_AVG_BLOCK_SIZE 64 |
127 |
#endif
|
128 |
|
129 |
#if defined(__arm__) || defined(_ARCH_PPC) \
|
130 |
|| defined(__x86_64__) || defined(__i386__) \ |
131 |
|| defined(__sparc__) \ |
132 |
|| defined(CONFIG_TCG_INTERPRETER) |
133 |
#define USE_DIRECT_JUMP
|
134 |
#endif
|
135 |
|
136 |
struct TranslationBlock {
|
137 |
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
|
138 |
target_ulong cs_base; /* CS base for this block */
|
139 |
uint64_t flags; /* flags defining in which context the code was generated */
|
140 |
uint16_t size; /* size of target code for this block (1 <=
|
141 |
size <= TARGET_PAGE_SIZE) */
|
142 |
uint16_t cflags; /* compile flags */
|
143 |
#define CF_COUNT_MASK 0x7fff |
144 |
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ |
145 |
|
146 |
uint8_t *tc_ptr; /* pointer to the translated code */
|
147 |
/* next matching tb for physical address. */
|
148 |
struct TranslationBlock *phys_hash_next;
|
149 |
/* first and second physical page containing code. The lower bit
|
150 |
of the pointer tells the index in page_next[] */
|
151 |
struct TranslationBlock *page_next[2]; |
152 |
tb_page_addr_t page_addr[2];
|
153 |
|
154 |
/* the following data are used to directly call another TB from
|
155 |
the code of this one. */
|
156 |
uint16_t tb_next_offset[2]; /* offset of original jump target */ |
157 |
#ifdef USE_DIRECT_JUMP
|
158 |
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ |
159 |
#else
|
160 |
uintptr_t tb_next[2]; /* address of jump generated code */ |
161 |
#endif
|
162 |
/* list of TBs jumping to this one. This is a circular list using
|
163 |
the two least significant bits of the pointers to tell what is
|
164 |
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
|
165 |
jmp_first */
|
166 |
struct TranslationBlock *jmp_next[2]; |
167 |
struct TranslationBlock *jmp_first;
|
168 |
uint32_t icount; |
169 |
}; |
170 |
|
171 |
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
172 |
{ |
173 |
target_ulong tmp; |
174 |
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
175 |
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
|
176 |
} |
177 |
|
178 |
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) |
179 |
{ |
180 |
target_ulong tmp; |
181 |
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
182 |
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
|
183 |
| (tmp & TB_JMP_ADDR_MASK)); |
184 |
} |
185 |
|
186 |
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) |
187 |
{ |
188 |
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); |
189 |
} |
190 |
|
191 |
void tb_free(TranslationBlock *tb);
|
192 |
void tb_flush(CPUArchState *env);
|
193 |
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
|
194 |
|
195 |
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
196 |
|
197 |
#if defined(USE_DIRECT_JUMP)
|
198 |
|
199 |
#if defined(CONFIG_TCG_INTERPRETER)
|
200 |
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
201 |
{ |
202 |
/* patch the branch destination */
|
203 |
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
|
204 |
/* no need to flush icache explicitly */
|
205 |
} |
206 |
#elif defined(_ARCH_PPC)
|
207 |
void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); |
208 |
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
|
209 |
#elif defined(__i386__) || defined(__x86_64__)
|
210 |
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
211 |
{ |
212 |
/* patch the branch destination */
|
213 |
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
|
214 |
/* no need to flush icache explicitly */
|
215 |
} |
216 |
#elif defined(__arm__)
|
217 |
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
218 |
{ |
219 |
#if !QEMU_GNUC_PREREQ(4, 1) |
220 |
register unsigned long _beg __asm ("a1"); |
221 |
register unsigned long _end __asm ("a2"); |
222 |
register unsigned long _flg __asm ("a3"); |
223 |
#endif
|
224 |
|
225 |
/* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
|
226 |
*(uint32_t *)jmp_addr = |
227 |
(*(uint32_t *)jmp_addr & ~0xffffff)
|
228 |
| (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); |
229 |
|
230 |
#if QEMU_GNUC_PREREQ(4, 1) |
231 |
__builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); |
232 |
#else
|
233 |
/* flush icache */
|
234 |
_beg = jmp_addr; |
235 |
_end = jmp_addr + 4;
|
236 |
_flg = 0;
|
237 |
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); |
238 |
#endif
|
239 |
} |
240 |
#elif defined(__sparc__)
|
241 |
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
|
242 |
#else
|
243 |
#error tb_set_jmp_target1 is missing
|
244 |
#endif
|
245 |
|
246 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
247 |
int n, uintptr_t addr)
|
248 |
{ |
249 |
uint16_t offset = tb->tb_jmp_offset[n]; |
250 |
tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); |
251 |
} |
252 |
|
253 |
#else
|
254 |
|
255 |
/* set the jump target */
|
256 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
257 |
int n, uintptr_t addr)
|
258 |
{ |
259 |
tb->tb_next[n] = addr; |
260 |
} |
261 |
|
262 |
#endif
|
263 |
|
264 |
static inline void tb_add_jump(TranslationBlock *tb, int n, |
265 |
TranslationBlock *tb_next) |
266 |
{ |
267 |
/* NOTE: this test is only needed for thread safety */
|
268 |
if (!tb->jmp_next[n]) {
|
269 |
/* patch the native jump address */
|
270 |
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); |
271 |
|
272 |
/* add in TB jmp circular list */
|
273 |
tb->jmp_next[n] = tb_next->jmp_first; |
274 |
tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); |
275 |
} |
276 |
} |
277 |
|
278 |
#include "exec/spinlock.h" |
279 |
|
280 |
extern spinlock_t tb_lock;
|
281 |
|
282 |
extern int tb_invalidated_flag; |
283 |
|
284 |
/* The return address may point to the start of the next instruction.
|
285 |
Subtracting one gets us the call instruction itself. */
|
286 |
#if defined(CONFIG_TCG_INTERPRETER)
|
287 |
/* Softmmu, Alpha, MIPS, SH4 and SPARC user mode emulations call GETPC().
|
288 |
For all others, GETPC remains undefined (which makes TCI a little faster. */
|
289 |
# if defined(CONFIG_SOFTMMU) || \
|
290 |
defined(TARGET_ALPHA) || defined(TARGET_MIPS) || \ |
291 |
defined(TARGET_SH4) || defined(TARGET_SPARC) |
292 |
extern uintptr_t tci_tb_ptr;
|
293 |
# define GETPC() tci_tb_ptr
|
294 |
# endif
|
295 |
#elif defined(__s390__) && !defined(__s390x__)
|
296 |
# define GETPC() \
|
297 |
(((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1) |
298 |
#elif defined(__arm__)
|
299 |
/* Thumb return addresses have the low bit set, so we need to subtract two.
|
300 |
This is still safe in ARM mode because instructions are 4 bytes. */
|
301 |
# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2) |
302 |
#else
|
303 |
# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1) |
304 |
#endif
|
305 |
|
306 |
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
|
307 |
/* qemu_ld/st optimization split code generation to fast and slow path, thus,
|
308 |
it needs special handling for an MMU helper which is called from the slow
|
309 |
path, to get the fast path's pc without any additional argument.
|
310 |
It uses a tricky solution which embeds the fast path pc into the slow path.
|
311 |
|
312 |
Code flow in slow path:
|
313 |
(1) pre-process
|
314 |
(2) call MMU helper
|
315 |
(3) jump to (5)
|
316 |
(4) fast path information (implementation specific)
|
317 |
(5) post-process (e.g. stack adjust)
|
318 |
(6) jump to corresponding code of the next of fast path
|
319 |
*/
|
320 |
# if defined(__i386__) || defined(__x86_64__)
|
321 |
/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
|
322 |
so that the destination is the next code of fast path, though this jmp is
|
323 |
never executed.
|
324 |
|
325 |
call MMU helper
|
326 |
jmp POST_PROC (2byte) <- GETRA()
|
327 |
jmp NEXT_CODE (5byte)
|
328 |
POST_PROCESS ... <- GETRA() + 7
|
329 |
*/
|
330 |
# define GETRA() ((uintptr_t)__builtin_return_address(0)) |
331 |
# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \ |
332 |
*(int32_t *)((void *)GETRA() + 3) - 1)) |
333 |
# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
|
334 |
# define GETRA() ((uintptr_t)__builtin_return_address(0)) |
335 |
# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1)) |
336 |
# else
|
337 |
# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!" |
338 |
# endif
|
339 |
bool is_tcg_gen_code(uintptr_t pc_ptr);
|
340 |
# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
|
341 |
#else
|
342 |
# define GETPC_EXT() GETPC()
|
343 |
#endif
|
344 |
|
345 |
#if !defined(CONFIG_USER_ONLY)
|
346 |
|
347 |
struct MemoryRegion *iotlb_to_region(hwaddr index);
|
348 |
uint64_t io_mem_read(struct MemoryRegion *mr, hwaddr addr,
|
349 |
unsigned size);
|
350 |
void io_mem_write(struct MemoryRegion *mr, hwaddr addr, |
351 |
uint64_t value, unsigned size);
|
352 |
|
353 |
void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, |
354 |
uintptr_t retaddr); |
355 |
|
356 |
#include "exec/softmmu_defs.h" |
357 |
|
358 |
#define ACCESS_TYPE (NB_MMU_MODES + 1) |
359 |
#define MEMSUFFIX _code
|
360 |
|
361 |
#define DATA_SIZE 1 |
362 |
#include "exec/softmmu_header.h" |
363 |
|
364 |
#define DATA_SIZE 2 |
365 |
#include "exec/softmmu_header.h" |
366 |
|
367 |
#define DATA_SIZE 4 |
368 |
#include "exec/softmmu_header.h" |
369 |
|
370 |
#define DATA_SIZE 8 |
371 |
#include "exec/softmmu_header.h" |
372 |
|
373 |
#undef ACCESS_TYPE
|
374 |
#undef MEMSUFFIX
|
375 |
|
376 |
#endif
|
377 |
|
378 |
#if defined(CONFIG_USER_ONLY)
|
379 |
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
380 |
{ |
381 |
return addr;
|
382 |
} |
383 |
#else
|
384 |
/* cputlb.c */
|
385 |
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
386 |
#endif
|
387 |
|
388 |
typedef void (CPUDebugExcpHandler)(CPUArchState *env); |
389 |
|
390 |
void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
|
391 |
|
392 |
/* vl.c */
|
393 |
extern int singlestep; |
394 |
|
395 |
/* cpu-exec.c */
|
396 |
extern volatile sig_atomic_t exit_request; |
397 |
|
398 |
/* Deterministic execution requires that IO only be performed on the last
|
399 |
instruction of a TB so that interrupts take effect immediately. */
|
400 |
static inline int can_do_io(CPUArchState *env) |
401 |
{ |
402 |
if (!use_icount) {
|
403 |
return 1; |
404 |
} |
405 |
/* If not executing code then assume we are ok. */
|
406 |
if (!env->current_tb) {
|
407 |
return 1; |
408 |
} |
409 |
return env->can_do_io != 0; |
410 |
} |
411 |
|
412 |
#endif
|