root / exec-all.h @ 89fc88da
History | View | Annotate | Download (19 kB)
1 |
/*
|
---|---|
2 |
* internal execution defines for qemu
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
|
21 |
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
22 |
#define DEBUG_DISAS
|
23 |
|
24 |
#ifndef glue
|
25 |
#define xglue(x, y) x ## y |
26 |
#define glue(x, y) xglue(x, y)
|
27 |
#define stringify(s) tostring(s)
|
28 |
#define tostring(s) #s |
29 |
#endif
|
30 |
|
31 |
#ifndef likely
|
32 |
#if __GNUC__ < 3 |
33 |
#define __builtin_expect(x, n) (x)
|
34 |
#endif
|
35 |
|
36 |
#define likely(x) __builtin_expect(!!(x), 1) |
37 |
#define unlikely(x) __builtin_expect(!!(x), 0) |
38 |
#endif
|
39 |
|
40 |
#ifndef always_inline
|
41 |
#if (__GNUC__ < 3) || defined(__APPLE__) |
42 |
#define always_inline inline |
43 |
#else
|
44 |
#define always_inline __attribute__ (( always_inline )) inline |
45 |
#endif
|
46 |
#endif
|
47 |
|
48 |
#ifdef __i386__
|
49 |
#define REGPARM(n) __attribute((regparm(n)))
|
50 |
#else
|
51 |
#define REGPARM(n)
|
52 |
#endif
|
53 |
|
54 |
/* is_jmp field values */
|
55 |
#define DISAS_NEXT 0 /* next instruction can be analyzed */ |
56 |
#define DISAS_JUMP 1 /* only pc was modified dynamically */ |
57 |
#define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
58 |
#define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
59 |
|
60 |
struct TranslationBlock;
|
61 |
|
62 |
/* XXX: make safe guess about sizes */
|
63 |
#define MAX_OP_PER_INSTR 32 |
64 |
#define OPC_BUF_SIZE 512 |
65 |
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
|
66 |
|
67 |
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3) |
68 |
|
69 |
extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
|
70 |
extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
|
71 |
extern long gen_labels[OPC_BUF_SIZE]; |
72 |
extern int nb_gen_labels; |
73 |
extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
|
74 |
extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
|
75 |
extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
|
76 |
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
|
77 |
extern target_ulong gen_opc_jump_pc[2]; |
78 |
extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
|
79 |
|
80 |
typedef void (GenOpFunc)(void); |
81 |
typedef void (GenOpFunc1)(long); |
82 |
typedef void (GenOpFunc2)(long, long); |
83 |
typedef void (GenOpFunc3)(long, long, long); |
84 |
|
85 |
#if defined(TARGET_I386)
|
86 |
|
87 |
void optimize_flags_init(void); |
88 |
|
89 |
#endif
|
90 |
|
91 |
extern FILE *logfile;
|
92 |
extern int loglevel; |
93 |
|
94 |
void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b);
|
95 |
void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b);
|
96 |
|
97 |
int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); |
98 |
int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); |
99 |
void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); |
100 |
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, |
101 |
int max_code_size, int *gen_code_size_ptr); |
102 |
int cpu_restore_state(struct TranslationBlock *tb, |
103 |
CPUState *env, unsigned long searched_pc, |
104 |
void *puc);
|
105 |
int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb, |
106 |
int max_code_size, int *gen_code_size_ptr); |
107 |
int cpu_restore_state_copy(struct TranslationBlock *tb, |
108 |
CPUState *env, unsigned long searched_pc, |
109 |
void *puc);
|
110 |
void cpu_resume_from_signal(CPUState *env1, void *puc); |
111 |
void cpu_exec_init(CPUState *env);
|
112 |
int page_unprotect(target_ulong address, unsigned long pc, void *puc); |
113 |
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
114 |
int is_cpu_write_access);
|
115 |
void tb_invalidate_page_range(target_ulong start, target_ulong end);
|
116 |
void tlb_flush_page(CPUState *env, target_ulong addr);
|
117 |
void tlb_flush(CPUState *env, int flush_global); |
118 |
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
119 |
target_phys_addr_t paddr, int prot,
|
120 |
int is_user, int is_softmmu); |
121 |
static inline int tlb_set_page(CPUState *env, target_ulong vaddr, |
122 |
target_phys_addr_t paddr, int prot,
|
123 |
int is_user, int is_softmmu) |
124 |
{ |
125 |
if (prot & PAGE_READ)
|
126 |
prot |= PAGE_EXEC; |
127 |
return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
|
128 |
} |
129 |
|
130 |
#define CODE_GEN_MAX_SIZE 65536 |
131 |
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
132 |
|
133 |
#define CODE_GEN_PHYS_HASH_BITS 15 |
134 |
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) |
135 |
|
136 |
/* maximum total translate dcode allocated */
|
137 |
|
138 |
/* NOTE: the translated code area cannot be too big because on some
|
139 |
archs the range of "fast" function calls is limited. Here is a
|
140 |
summary of the ranges:
|
141 |
|
142 |
i386 : signed 32 bits
|
143 |
arm : signed 26 bits
|
144 |
ppc : signed 24 bits
|
145 |
sparc : signed 32 bits
|
146 |
alpha : signed 23 bits
|
147 |
*/
|
148 |
|
149 |
#if defined(__alpha__)
|
150 |
#define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024) |
151 |
#elif defined(__ia64)
|
152 |
#define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */ |
153 |
#elif defined(__powerpc__)
|
154 |
#define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) |
155 |
#else
|
156 |
#define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024) |
157 |
#endif
|
158 |
|
159 |
//#define CODE_GEN_BUFFER_SIZE (128 * 1024)
|
160 |
|
161 |
/* estimated block size for TB allocation */
|
162 |
/* XXX: use a per code average code fragment size and modulate it
|
163 |
according to the host CPU */
|
164 |
#if defined(CONFIG_SOFTMMU)
|
165 |
#define CODE_GEN_AVG_BLOCK_SIZE 128 |
166 |
#else
|
167 |
#define CODE_GEN_AVG_BLOCK_SIZE 64 |
168 |
#endif
|
169 |
|
170 |
#define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
|
171 |
|
172 |
#if defined(__powerpc__)
|
173 |
#define USE_DIRECT_JUMP
|
174 |
#endif
|
175 |
#if defined(__i386__) && !defined(_WIN32)
|
176 |
#define USE_DIRECT_JUMP
|
177 |
#endif
|
178 |
|
179 |
typedef struct TranslationBlock { |
180 |
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
|
181 |
target_ulong cs_base; /* CS base for this block */
|
182 |
uint64_t flags; /* flags defining in which context the code was generated */
|
183 |
uint16_t size; /* size of target code for this block (1 <=
|
184 |
size <= TARGET_PAGE_SIZE) */
|
185 |
uint16_t cflags; /* compile flags */
|
186 |
#define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */ |
187 |
#define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */ |
188 |
#define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */ |
189 |
#define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */ |
190 |
|
191 |
uint8_t *tc_ptr; /* pointer to the translated code */
|
192 |
/* next matching tb for physical address. */
|
193 |
struct TranslationBlock *phys_hash_next;
|
194 |
/* first and second physical page containing code. The lower bit
|
195 |
of the pointer tells the index in page_next[] */
|
196 |
struct TranslationBlock *page_next[2]; |
197 |
target_ulong page_addr[2];
|
198 |
|
199 |
/* the following data are used to directly call another TB from
|
200 |
the code of this one. */
|
201 |
uint16_t tb_next_offset[2]; /* offset of original jump target */ |
202 |
#ifdef USE_DIRECT_JUMP
|
203 |
uint16_t tb_jmp_offset[4]; /* offset of jump instruction */ |
204 |
#else
|
205 |
uint32_t tb_next[2]; /* address of jump generated code */ |
206 |
#endif
|
207 |
/* list of TBs jumping to this one. This is a circular list using
|
208 |
the two least significant bits of the pointers to tell what is
|
209 |
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
|
210 |
jmp_first */
|
211 |
struct TranslationBlock *jmp_next[2]; |
212 |
struct TranslationBlock *jmp_first;
|
213 |
} TranslationBlock; |
214 |
|
215 |
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
216 |
{ |
217 |
target_ulong tmp; |
218 |
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
219 |
return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
|
220 |
} |
221 |
|
222 |
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) |
223 |
{ |
224 |
target_ulong tmp; |
225 |
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
226 |
return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
|
227 |
(tmp & TB_JMP_ADDR_MASK)); |
228 |
} |
229 |
|
230 |
static inline unsigned int tb_phys_hash_func(unsigned long pc) |
231 |
{ |
232 |
return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); |
233 |
} |
234 |
|
235 |
TranslationBlock *tb_alloc(target_ulong pc); |
236 |
void tb_flush(CPUState *env);
|
237 |
void tb_link_phys(TranslationBlock *tb,
|
238 |
target_ulong phys_pc, target_ulong phys_page2); |
239 |
|
240 |
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
241 |
|
242 |
extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
|
243 |
extern uint8_t *code_gen_ptr;
|
244 |
|
245 |
#if defined(USE_DIRECT_JUMP)
|
246 |
|
247 |
#if defined(__powerpc__)
|
248 |
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) |
249 |
{ |
250 |
uint32_t val, *ptr; |
251 |
|
252 |
/* patch the branch destination */
|
253 |
ptr = (uint32_t *)jmp_addr; |
254 |
val = *ptr; |
255 |
val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc); |
256 |
*ptr = val; |
257 |
/* flush icache */
|
258 |
asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory"); |
259 |
asm volatile ("sync" : : : "memory"); |
260 |
asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory"); |
261 |
asm volatile ("sync" : : : "memory"); |
262 |
asm volatile ("isync" : : : "memory"); |
263 |
} |
264 |
#elif defined(__i386__)
|
265 |
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) |
266 |
{ |
267 |
/* patch the branch destination */
|
268 |
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
|
269 |
/* no need to flush icache explicitely */
|
270 |
} |
271 |
#endif
|
272 |
|
273 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
274 |
int n, unsigned long addr) |
275 |
{ |
276 |
unsigned long offset; |
277 |
|
278 |
offset = tb->tb_jmp_offset[n]; |
279 |
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); |
280 |
offset = tb->tb_jmp_offset[n + 2];
|
281 |
if (offset != 0xffff) |
282 |
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); |
283 |
} |
284 |
|
285 |
#else
|
286 |
|
287 |
/* set the jump target */
|
288 |
static inline void tb_set_jmp_target(TranslationBlock *tb, |
289 |
int n, unsigned long addr) |
290 |
{ |
291 |
tb->tb_next[n] = addr; |
292 |
} |
293 |
|
294 |
#endif
|
295 |
|
296 |
static inline void tb_add_jump(TranslationBlock *tb, int n, |
297 |
TranslationBlock *tb_next) |
298 |
{ |
299 |
/* NOTE: this test is only needed for thread safety */
|
300 |
if (!tb->jmp_next[n]) {
|
301 |
/* patch the native jump address */
|
302 |
tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); |
303 |
|
304 |
/* add in TB jmp circular list */
|
305 |
tb->jmp_next[n] = tb_next->jmp_first; |
306 |
tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
|
307 |
} |
308 |
} |
309 |
|
310 |
TranslationBlock *tb_find_pc(unsigned long pc_ptr); |
311 |
|
312 |
#ifndef offsetof
|
313 |
#define offsetof(type, field) ((size_t) &((type *)0)->field) |
314 |
#endif
|
315 |
|
316 |
#if defined(_WIN32)
|
317 |
#define ASM_DATA_SECTION ".section \".data\"\n" |
318 |
#define ASM_PREVIOUS_SECTION ".section .text\n" |
319 |
#elif defined(__APPLE__)
|
320 |
#define ASM_DATA_SECTION ".data\n" |
321 |
#define ASM_PREVIOUS_SECTION ".text\n" |
322 |
#else
|
323 |
#define ASM_DATA_SECTION ".section \".data\"\n" |
324 |
#define ASM_PREVIOUS_SECTION ".previous\n" |
325 |
#endif
|
326 |
|
327 |
#define ASM_OP_LABEL_NAME(n, opname) \
|
328 |
ASM_NAME(__op_label) #n "." ASM_NAME(opname) |
329 |
|
330 |
#if defined(__powerpc__)
|
331 |
|
332 |
/* we patch the jump instruction directly */
|
333 |
#define GOTO_TB(opname, tbparam, n)\
|
334 |
do {\
|
335 |
asm volatile (ASM_DATA_SECTION\ |
336 |
ASM_OP_LABEL_NAME(n, opname) ":\n"\
|
337 |
".long 1f\n"\
|
338 |
ASM_PREVIOUS_SECTION \ |
339 |
"b " ASM_NAME(__op_jmp) #n "\n"\ |
340 |
"1:\n");\
|
341 |
} while (0) |
342 |
|
343 |
#elif defined(__i386__) && defined(USE_DIRECT_JUMP)
|
344 |
|
345 |
/* we patch the jump instruction directly */
|
346 |
#define GOTO_TB(opname, tbparam, n)\
|
347 |
do {\
|
348 |
asm volatile (".section .data\n"\ |
349 |
ASM_OP_LABEL_NAME(n, opname) ":\n"\
|
350 |
".long 1f\n"\
|
351 |
ASM_PREVIOUS_SECTION \ |
352 |
"jmp " ASM_NAME(__op_jmp) #n "\n"\ |
353 |
"1:\n");\
|
354 |
} while (0) |
355 |
|
356 |
#elif defined(__s390__)
|
357 |
/* GCC spills R13, so we have to restore it before branching away */
|
358 |
|
359 |
#define GOTO_TB(opname, tbparam, n)\
|
360 |
do {\
|
361 |
static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ |
362 |
static void __attribute__((used)) *__op_label ## n \ |
363 |
__asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ |
364 |
__asm__ __volatile__ ( \ |
365 |
"l %%r13,52(%%r15)\n" \
|
366 |
"br %0\n" \
|
367 |
: : "r" (((TranslationBlock*)tbparam)->tb_next[n]));\
|
368 |
\ |
369 |
for(;*((int*)0);); /* just to keep GCC busy */ \ |
370 |
label ## n: ;\ |
371 |
dummy_label ## n: ;\ |
372 |
} while(0) |
373 |
|
374 |
#else
|
375 |
|
376 |
/* jump to next block operations (more portable code, does not need
|
377 |
cache flushing, but slower because of indirect jump) */
|
378 |
#define GOTO_TB(opname, tbparam, n)\
|
379 |
do {\
|
380 |
static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ |
381 |
static void __attribute__((used)) *__op_label ## n \ |
382 |
__asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ |
383 |
goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ |
384 |
label ## n: ;\ |
385 |
dummy_label ## n: ;\ |
386 |
} while (0) |
387 |
|
388 |
#endif
|
389 |
|
390 |
extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
391 |
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
392 |
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
393 |
|
394 |
#if defined(__powerpc__)
|
395 |
static inline int testandset (int *p) |
396 |
{ |
397 |
int ret;
|
398 |
__asm__ __volatile__ ( |
399 |
"0: lwarx %0,0,%1\n"
|
400 |
" xor. %0,%3,%0\n"
|
401 |
" bne 1f\n"
|
402 |
" stwcx. %2,0,%1\n"
|
403 |
" bne- 0b\n"
|
404 |
"1: "
|
405 |
: "=&r" (ret)
|
406 |
: "r" (p), "r" (1), "r" (0) |
407 |
: "cr0", "memory"); |
408 |
return ret;
|
409 |
} |
410 |
#elif defined(__i386__)
|
411 |
static inline int testandset (int *p) |
412 |
{ |
413 |
long int readval = 0; |
414 |
|
415 |
__asm__ __volatile__ ("lock; cmpxchgl %2, %0"
|
416 |
: "+m" (*p), "+a" (readval) |
417 |
: "r" (1) |
418 |
: "cc");
|
419 |
return readval;
|
420 |
} |
421 |
#elif defined(__x86_64__)
|
422 |
static inline int testandset (int *p) |
423 |
{ |
424 |
long int readval = 0; |
425 |
|
426 |
__asm__ __volatile__ ("lock; cmpxchgl %2, %0"
|
427 |
: "+m" (*p), "+a" (readval) |
428 |
: "r" (1) |
429 |
: "cc");
|
430 |
return readval;
|
431 |
} |
432 |
#elif defined(__s390__)
|
433 |
static inline int testandset (int *p) |
434 |
{ |
435 |
int ret;
|
436 |
|
437 |
__asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
|
438 |
" jl 0b"
|
439 |
: "=&d" (ret)
|
440 |
: "r" (1), "a" (p), "0" (*p) |
441 |
: "cc", "memory" ); |
442 |
return ret;
|
443 |
} |
444 |
#elif defined(__alpha__)
|
445 |
static inline int testandset (int *p) |
446 |
{ |
447 |
int ret;
|
448 |
unsigned long one; |
449 |
|
450 |
__asm__ __volatile__ ("0: mov 1,%2\n"
|
451 |
" ldl_l %0,%1\n"
|
452 |
" stl_c %2,%1\n"
|
453 |
" beq %2,1f\n"
|
454 |
".subsection 2\n"
|
455 |
"1: br 0b\n"
|
456 |
".previous"
|
457 |
: "=r" (ret), "=m" (*p), "=r" (one) |
458 |
: "m" (*p));
|
459 |
return ret;
|
460 |
} |
461 |
#elif defined(__sparc__)
|
462 |
static inline int testandset (int *p) |
463 |
{ |
464 |
int ret;
|
465 |
|
466 |
__asm__ __volatile__("ldstub [%1], %0"
|
467 |
: "=r" (ret)
|
468 |
: "r" (p)
|
469 |
: "memory");
|
470 |
|
471 |
return (ret ? 1 : 0); |
472 |
} |
473 |
#elif defined(__arm__)
|
474 |
static inline int testandset (int *spinlock) |
475 |
{ |
476 |
register unsigned int ret; |
477 |
__asm__ __volatile__("swp %0, %1, [%2]"
|
478 |
: "=r"(ret)
|
479 |
: "0"(1), "r"(spinlock)); |
480 |
|
481 |
return ret;
|
482 |
} |
483 |
#elif defined(__mc68000)
|
484 |
static inline int testandset (int *p) |
485 |
{ |
486 |
char ret;
|
487 |
__asm__ __volatile__("tas %1; sne %0"
|
488 |
: "=r" (ret)
|
489 |
: "m" (p)
|
490 |
: "cc","memory"); |
491 |
return ret;
|
492 |
} |
493 |
#elif defined(__ia64)
|
494 |
|
495 |
#include <ia64intrin.h> |
496 |
|
497 |
static inline int testandset (int *p) |
498 |
{ |
499 |
return __sync_lock_test_and_set (p, 1); |
500 |
} |
501 |
#elif defined(__mips__)
|
502 |
static inline int testandset (int *p) |
503 |
{ |
504 |
int ret;
|
505 |
|
506 |
__asm__ __volatile__ ( |
507 |
" .set push \n"
|
508 |
" .set noat \n"
|
509 |
" .set mips2 \n"
|
510 |
"1: li $1, 1 \n"
|
511 |
" ll %0, %1 \n"
|
512 |
" sc $1, %1 \n"
|
513 |
" beqz $1, 1b \n"
|
514 |
" .set pop "
|
515 |
: "=r" (ret), "+R" (*p) |
516 |
: |
517 |
: "memory");
|
518 |
|
519 |
return ret;
|
520 |
} |
521 |
#else
|
522 |
#error unimplemented CPU support
|
523 |
#endif
|
524 |
|
525 |
typedef int spinlock_t; |
526 |
|
527 |
#define SPIN_LOCK_UNLOCKED 0 |
528 |
|
529 |
#if defined(CONFIG_USER_ONLY)
|
530 |
static inline void spin_lock(spinlock_t *lock) |
531 |
{ |
532 |
while (testandset(lock));
|
533 |
} |
534 |
|
535 |
static inline void spin_unlock(spinlock_t *lock) |
536 |
{ |
537 |
*lock = 0;
|
538 |
} |
539 |
|
540 |
static inline int spin_trylock(spinlock_t *lock) |
541 |
{ |
542 |
return !testandset(lock);
|
543 |
} |
544 |
#else
|
545 |
static inline void spin_lock(spinlock_t *lock) |
546 |
{ |
547 |
} |
548 |
|
549 |
static inline void spin_unlock(spinlock_t *lock) |
550 |
{ |
551 |
} |
552 |
|
553 |
static inline int spin_trylock(spinlock_t *lock) |
554 |
{ |
555 |
return 1; |
556 |
} |
557 |
#endif
|
558 |
|
559 |
extern spinlock_t tb_lock;
|
560 |
|
561 |
extern int tb_invalidated_flag; |
562 |
|
563 |
#if !defined(CONFIG_USER_ONLY)
|
564 |
|
565 |
void tlb_fill(target_ulong addr, int is_write, int is_user, |
566 |
void *retaddr);
|
567 |
|
568 |
#define ACCESS_TYPE 3 |
569 |
#define MEMSUFFIX _code
|
570 |
#define env cpu_single_env
|
571 |
|
572 |
#define DATA_SIZE 1 |
573 |
#include "softmmu_header.h" |
574 |
|
575 |
#define DATA_SIZE 2 |
576 |
#include "softmmu_header.h" |
577 |
|
578 |
#define DATA_SIZE 4 |
579 |
#include "softmmu_header.h" |
580 |
|
581 |
#define DATA_SIZE 8 |
582 |
#include "softmmu_header.h" |
583 |
|
584 |
#undef ACCESS_TYPE
|
585 |
#undef MEMSUFFIX
|
586 |
#undef env
|
587 |
|
588 |
#endif
|
589 |
|
590 |
#if defined(CONFIG_USER_ONLY)
|
591 |
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) |
592 |
{ |
593 |
return addr;
|
594 |
} |
595 |
#else
|
596 |
/* NOTE: this function can trigger an exception */
|
597 |
/* NOTE2: the returned address is not exactly the physical address: it
|
598 |
is the offset relative to phys_ram_base */
|
599 |
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) |
600 |
{ |
601 |
int is_user, index, pd;
|
602 |
|
603 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
604 |
#if defined(TARGET_I386)
|
605 |
is_user = ((env->hflags & HF_CPL_MASK) == 3);
|
606 |
#elif defined (TARGET_PPC)
|
607 |
is_user = msr_pr; |
608 |
#elif defined (TARGET_MIPS)
|
609 |
is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM); |
610 |
#elif defined (TARGET_SPARC)
|
611 |
is_user = (env->psrs == 0);
|
612 |
#elif defined (TARGET_ARM)
|
613 |
is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR); |
614 |
#elif defined (TARGET_SH4)
|
615 |
is_user = ((env->sr & SR_MD) == 0);
|
616 |
#elif defined (TARGET_ALPHA)
|
617 |
is_user = ((env->ps >> 3) & 3); |
618 |
#elif defined (TARGET_M68K)
|
619 |
is_user = ((env->sr & SR_S) == 0);
|
620 |
#elif defined (TARGET_CRIS)
|
621 |
is_user = (0);
|
622 |
#else
|
623 |
#error unimplemented CPU
|
624 |
#endif
|
625 |
if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
|
626 |
(addr & TARGET_PAGE_MASK), 0)) {
|
627 |
ldub_code(addr); |
628 |
} |
629 |
pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK; |
630 |
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
|
631 |
#ifdef TARGET_SPARC
|
632 |
do_unassigned_access(addr, 0, 1, 0); |
633 |
#else
|
634 |
cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); |
635 |
#endif
|
636 |
} |
637 |
return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base; |
638 |
} |
639 |
#endif
|
640 |
|
641 |
#ifdef USE_KQEMU
|
642 |
#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) |
643 |
|
644 |
int kqemu_init(CPUState *env);
|
645 |
int kqemu_cpu_exec(CPUState *env);
|
646 |
void kqemu_flush_page(CPUState *env, target_ulong addr);
|
647 |
void kqemu_flush(CPUState *env, int global); |
648 |
void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
|
649 |
void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
|
650 |
void kqemu_cpu_interrupt(CPUState *env);
|
651 |
void kqemu_record_dump(void); |
652 |
|
653 |
static inline int kqemu_is_ok(CPUState *env) |
654 |
{ |
655 |
return(env->kqemu_enabled &&
|
656 |
(env->cr[0] & CR0_PE_MASK) &&
|
657 |
!(env->hflags & HF_INHIBIT_IRQ_MASK) && |
658 |
(env->eflags & IF_MASK) && |
659 |
!(env->eflags & VM_MASK) && |
660 |
(env->kqemu_enabled == 2 ||
|
661 |
((env->hflags & HF_CPL_MASK) == 3 &&
|
662 |
(env->eflags & IOPL_MASK) != IOPL_MASK))); |
663 |
} |
664 |
|
665 |
#endif
|