root / exec.h @ facc68be
History | View | Annotate | Download (10.7 kB)
1 |
/*
|
---|---|
2 |
* internal execution defines for qemu
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
|
21 |
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
22 |
#define DEBUG_DISAS
|
23 |
|
24 |
#ifndef glue
|
25 |
#define xglue(x, y) x ## y |
26 |
#define glue(x, y) xglue(x, y)
|
27 |
#define stringify(s) tostring(s)
|
28 |
#define tostring(s) #s |
29 |
#endif
|
30 |
|
31 |
#if GCC_MAJOR < 3 |
32 |
#define __builtin_expect(x, n) (x)
|
33 |
#endif
|
34 |
|
35 |
#ifdef __i386__
|
36 |
#define REGPARM(n) __attribute((regparm(n)))
|
37 |
#else
|
38 |
#define REGPARM(n)
|
39 |
#endif
|
40 |
|
41 |
/* is_jmp field values */
|
42 |
#define DISAS_NEXT 0 /* next instruction can be analyzed */ |
43 |
#define DISAS_JUMP 1 /* only pc was modified dynamically */ |
44 |
#define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
45 |
#define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
46 |
|
47 |
struct TranslationBlock;
|
48 |
|
49 |
/* XXX: make safe guess about sizes */
|
50 |
#define MAX_OP_PER_INSTR 32 |
51 |
#define OPC_BUF_SIZE 512 |
52 |
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
|
53 |
|
54 |
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3) |
55 |
|
56 |
extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
|
57 |
extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
|
58 |
extern uint32_t gen_opc_pc[OPC_BUF_SIZE];
|
59 |
extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
|
60 |
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
|
61 |
|
62 |
#if defined(TARGET_I386)
|
63 |
|
64 |
void optimize_flags_init(void); |
65 |
|
66 |
#endif
|
67 |
|
68 |
extern FILE *logfile;
|
69 |
extern int loglevel; |
70 |
|
71 |
int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); |
72 |
int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); |
73 |
void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); |
74 |
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, |
75 |
int max_code_size, int *gen_code_size_ptr); |
76 |
int cpu_restore_state(struct TranslationBlock *tb, |
77 |
CPUState *env, unsigned long searched_pc); |
78 |
void cpu_exec_init(void); |
79 |
int page_unprotect(unsigned long address); |
80 |
void page_unmap(void); |
81 |
void tlb_flush_page(CPUState *env, uint32_t addr);
|
82 |
void tlb_flush(CPUState *env);
|
83 |
|
84 |
#define CODE_GEN_MAX_SIZE 65536 |
85 |
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
86 |
|
87 |
#define CODE_GEN_HASH_BITS 15 |
88 |
#define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS) |
89 |
|
90 |
/* maximum total translate dcode allocated */
|
91 |
#define CODE_GEN_BUFFER_SIZE (2048 * 1024) |
92 |
//#define CODE_GEN_BUFFER_SIZE (128 * 1024)
|
93 |
|
94 |
#if defined(__powerpc__)
|
95 |
#define USE_DIRECT_JUMP
|
96 |
#endif
|
97 |
|
98 |
typedef struct TranslationBlock { |
99 |
unsigned long pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
100 |
unsigned long cs_base; /* CS base for this block */ |
101 |
unsigned int flags; /* flags defining in which context the code was generated */ |
102 |
uint16_t size; /* size of target code for this block (1 <=
|
103 |
size <= TARGET_PAGE_SIZE) */
|
104 |
uint8_t *tc_ptr; /* pointer to the translated code */
|
105 |
struct TranslationBlock *hash_next; /* next matching block */ |
106 |
struct TranslationBlock *page_next[2]; /* next blocks in even/odd page */ |
107 |
/* the following data are used to directly call another TB from
|
108 |
the code of this one. */
|
109 |
uint16_t tb_next_offset[2]; /* offset of original jump target */ |
110 |
#ifdef USE_DIRECT_JUMP
|
111 |
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ |
112 |
#else
|
113 |
uint32_t tb_next[2]; /* address of jump generated code */ |
114 |
#endif
|
115 |
/* list of TBs jumping to this one. This is a circular list using
|
116 |
the two least significant bits of the pointers to tell what is
|
117 |
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
|
118 |
jmp_first */
|
119 |
struct TranslationBlock *jmp_next[2]; |
120 |
struct TranslationBlock *jmp_first;
|
121 |
} TranslationBlock; |
122 |
|
123 |
static inline unsigned int tb_hash_func(unsigned long pc) |
124 |
{ |
125 |
return pc & (CODE_GEN_HASH_SIZE - 1); |
126 |
} |
127 |
|
128 |
TranslationBlock *tb_alloc(unsigned long pc); |
129 |
void tb_flush(void); |
130 |
void tb_link(TranslationBlock *tb);
|
131 |
|
132 |
extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
|
133 |
|
134 |
extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
|
135 |
extern uint8_t *code_gen_ptr;
|
136 |
|
137 |
/* find a translation block in the translation cache. If not found,
|
138 |
return NULL and the pointer to the last element of the list in pptb */
|
139 |
static inline TranslationBlock *tb_find(TranslationBlock ***pptb, |
140 |
unsigned long pc, |
141 |
unsigned long cs_base, |
142 |
unsigned int flags) |
143 |
{ |
144 |
TranslationBlock **ptb, *tb; |
145 |
unsigned int h; |
146 |
|
147 |
h = tb_hash_func(pc); |
148 |
ptb = &tb_hash[h]; |
149 |
for(;;) {
|
150 |
tb = *ptb; |
151 |
if (!tb)
|
152 |
break;
|
153 |
if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)
|
154 |
return tb;
|
155 |
ptb = &tb->hash_next; |
156 |
} |
157 |
*pptb = ptb; |
158 |
return NULL; |
159 |
} |
160 |
|
161 |
#if defined(__powerpc__)
|
162 |
|
163 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
164 |
int n, unsigned long addr) |
165 |
{ |
166 |
uint32_t val, *ptr; |
167 |
unsigned long offset; |
168 |
|
169 |
offset = (unsigned long)(tb->tc_ptr + tb->tb_jmp_offset[n]); |
170 |
|
171 |
/* patch the branch destination */
|
172 |
ptr = (uint32_t *)offset; |
173 |
val = *ptr; |
174 |
val = (val & ~0x03fffffc) | ((addr - offset) & 0x03fffffc); |
175 |
*ptr = val; |
176 |
/* flush icache */
|
177 |
asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory"); |
178 |
asm volatile ("sync" : : : "memory"); |
179 |
asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory"); |
180 |
asm volatile ("sync" : : : "memory"); |
181 |
asm volatile ("isync" : : : "memory"); |
182 |
} |
183 |
|
184 |
#else
|
185 |
|
186 |
/* set the jump target */
|
187 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
188 |
int n, unsigned long addr) |
189 |
{ |
190 |
tb->tb_next[n] = addr; |
191 |
} |
192 |
|
193 |
#endif
|
194 |
|
195 |
static inline void tb_add_jump(TranslationBlock *tb, int n, |
196 |
TranslationBlock *tb_next) |
197 |
{ |
198 |
/* NOTE: this test is only needed for thread safety */
|
199 |
if (!tb->jmp_next[n]) {
|
200 |
/* patch the native jump address */
|
201 |
tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); |
202 |
|
203 |
/* add in TB jmp circular list */
|
204 |
tb->jmp_next[n] = tb_next->jmp_first; |
205 |
tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
|
206 |
} |
207 |
} |
208 |
|
209 |
TranslationBlock *tb_find_pc(unsigned long pc_ptr); |
210 |
|
211 |
#ifndef offsetof
|
212 |
#define offsetof(type, field) ((size_t) &((type *)0)->field) |
213 |
#endif
|
214 |
|
215 |
#if defined(__powerpc__)
|
216 |
|
217 |
/* on PowerPC we patch the jump instruction directly */
|
218 |
#define JUMP_TB(opname, tbparam, n, eip)\
|
219 |
do {\
|
220 |
asm volatile (".section \".data\"\n"\ |
221 |
"__op_label" #n "." stringify(opname) ":\n"\ |
222 |
".long 1f\n"\
|
223 |
".previous\n"\
|
224 |
"b __op_jmp" #n "\n"\ |
225 |
"1:\n");\
|
226 |
T0 = (long)(tbparam) + (n);\
|
227 |
EIP = eip;\ |
228 |
EXIT_TB();\ |
229 |
} while (0) |
230 |
|
231 |
#else
|
232 |
|
233 |
/* jump to next block operations (more portable code, does not need
|
234 |
cache flushing, but slower because of indirect jump) */
|
235 |
#define JUMP_TB(opname, tbparam, n, eip)\
|
236 |
do {\
|
237 |
static void __attribute__((unused)) *__op_label ## n = &&label ## n;\ |
238 |
static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ |
239 |
goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ |
240 |
label ## n:\ |
241 |
T0 = (long)(tbparam) + (n);\
|
242 |
EIP = eip;\ |
243 |
dummy_label ## n:\ |
244 |
EXIT_TB();\ |
245 |
} while (0) |
246 |
|
247 |
#endif
|
248 |
|
249 |
/* physical memory access */
|
250 |
#define IO_MEM_NB_ENTRIES 256 |
251 |
#define TLB_INVALID_MASK (1 << 3) |
252 |
#define IO_MEM_SHIFT 4 |
253 |
#define IO_MEM_UNASSIGNED (1 << IO_MEM_SHIFT) |
254 |
|
255 |
unsigned long physpage_find(unsigned long page); |
256 |
|
257 |
extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
258 |
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
259 |
|
260 |
#ifdef __powerpc__
|
261 |
static inline int testandset (int *p) |
262 |
{ |
263 |
int ret;
|
264 |
__asm__ __volatile__ ( |
265 |
"0: lwarx %0,0,%1 ;"
|
266 |
" xor. %0,%3,%0;"
|
267 |
" bne 1f;"
|
268 |
" stwcx. %2,0,%1;"
|
269 |
" bne- 0b;"
|
270 |
"1: "
|
271 |
: "=&r" (ret)
|
272 |
: "r" (p), "r" (1), "r" (0) |
273 |
: "cr0", "memory"); |
274 |
return ret;
|
275 |
} |
276 |
#endif
|
277 |
|
278 |
#ifdef __i386__
|
279 |
static inline int testandset (int *p) |
280 |
{ |
281 |
char ret;
|
282 |
long int readval; |
283 |
|
284 |
__asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
|
285 |
: "=q" (ret), "=m" (*p), "=a" (readval) |
286 |
: "r" (1), "m" (*p), "a" (0) |
287 |
: "memory");
|
288 |
return ret;
|
289 |
} |
290 |
#endif
|
291 |
|
292 |
#ifdef __s390__
|
293 |
static inline int testandset (int *p) |
294 |
{ |
295 |
int ret;
|
296 |
|
297 |
__asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
|
298 |
" jl 0b"
|
299 |
: "=&d" (ret)
|
300 |
: "r" (1), "a" (p), "0" (*p) |
301 |
: "cc", "memory" ); |
302 |
return ret;
|
303 |
} |
304 |
#endif
|
305 |
|
306 |
#ifdef __alpha__
|
307 |
static inline int testandset (int *p) |
308 |
{ |
309 |
int ret;
|
310 |
unsigned long one; |
311 |
|
312 |
__asm__ __volatile__ ("0: mov 1,%2\n"
|
313 |
" ldl_l %0,%1\n"
|
314 |
" stl_c %2,%1\n"
|
315 |
" beq %2,1f\n"
|
316 |
".subsection 2\n"
|
317 |
"1: br 0b\n"
|
318 |
".previous"
|
319 |
: "=r" (ret), "=m" (*p), "=r" (one) |
320 |
: "m" (*p));
|
321 |
return ret;
|
322 |
} |
323 |
#endif
|
324 |
|
325 |
#ifdef __sparc__
|
326 |
static inline int testandset (int *p) |
327 |
{ |
328 |
int ret;
|
329 |
|
330 |
__asm__ __volatile__("ldstub [%1], %0"
|
331 |
: "=r" (ret)
|
332 |
: "r" (p)
|
333 |
: "memory");
|
334 |
|
335 |
return (ret ? 1 : 0); |
336 |
} |
337 |
#endif
|
338 |
|
339 |
#ifdef __arm__
|
340 |
static inline int testandset (int *spinlock) |
341 |
{ |
342 |
register unsigned int ret; |
343 |
__asm__ __volatile__("swp %0, %1, [%2]"
|
344 |
: "=r"(ret)
|
345 |
: "0"(1), "r"(spinlock)); |
346 |
|
347 |
return ret;
|
348 |
} |
349 |
#endif
|
350 |
|
351 |
#ifdef __mc68000
|
352 |
static inline int testandset (int *p) |
353 |
{ |
354 |
char ret;
|
355 |
__asm__ __volatile__("tas %1; sne %0"
|
356 |
: "=r" (ret)
|
357 |
: "m" (p)
|
358 |
: "cc","memory"); |
359 |
return ret == 0; |
360 |
} |
361 |
#endif
|
362 |
|
363 |
typedef int spinlock_t; |
364 |
|
365 |
#define SPIN_LOCK_UNLOCKED 0 |
366 |
|
367 |
#if 1 |
368 |
static inline void spin_lock(spinlock_t *lock) |
369 |
{ |
370 |
while (testandset(lock));
|
371 |
} |
372 |
|
373 |
static inline void spin_unlock(spinlock_t *lock) |
374 |
{ |
375 |
*lock = 0;
|
376 |
} |
377 |
|
378 |
static inline int spin_trylock(spinlock_t *lock) |
379 |
{ |
380 |
return !testandset(lock);
|
381 |
} |
382 |
#else
|
383 |
static inline void spin_lock(spinlock_t *lock) |
384 |
{ |
385 |
} |
386 |
|
387 |
static inline void spin_unlock(spinlock_t *lock) |
388 |
{ |
389 |
} |
390 |
|
391 |
static inline int spin_trylock(spinlock_t *lock) |
392 |
{ |
393 |
return 1; |
394 |
} |
395 |
#endif
|
396 |
|
397 |
extern spinlock_t tb_lock;
|
398 |
|