root / cpu-all.h @ b9a02bed
History | View | Annotate | Download (24.3 kB)
1 |
/*
|
---|---|
2 |
* defines common to all virtual CPUs
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#ifndef CPU_ALL_H
|
21 |
#define CPU_ALL_H
|
22 |
|
23 |
#if defined(__arm__) || defined(__sparc__) || defined(__mips__)
|
24 |
#define WORDS_ALIGNED
|
25 |
#endif
|
26 |
|
27 |
/* some important defines:
|
28 |
*
|
29 |
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned
|
30 |
* memory accesses.
|
31 |
*
|
32 |
* WORDS_BIGENDIAN : if defined, the host cpu is big endian and
|
33 |
* otherwise little endian.
|
34 |
*
|
35 |
* (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
|
36 |
*
|
37 |
* TARGET_WORDS_BIGENDIAN : same for target cpu
|
38 |
*/
|
39 |
|
40 |
#include "bswap.h" |
41 |
|
42 |
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
|
43 |
#define BSWAP_NEEDED
|
44 |
#endif
|
45 |
|
46 |
#ifdef BSWAP_NEEDED
|
47 |
|
48 |
static inline uint16_t tswap16(uint16_t s) |
49 |
{ |
50 |
return bswap16(s);
|
51 |
} |
52 |
|
53 |
static inline uint32_t tswap32(uint32_t s) |
54 |
{ |
55 |
return bswap32(s);
|
56 |
} |
57 |
|
58 |
static inline uint64_t tswap64(uint64_t s) |
59 |
{ |
60 |
return bswap64(s);
|
61 |
} |
62 |
|
63 |
static inline void tswap16s(uint16_t *s) |
64 |
{ |
65 |
*s = bswap16(*s); |
66 |
} |
67 |
|
68 |
static inline void tswap32s(uint32_t *s) |
69 |
{ |
70 |
*s = bswap32(*s); |
71 |
} |
72 |
|
73 |
static inline void tswap64s(uint64_t *s) |
74 |
{ |
75 |
*s = bswap64(*s); |
76 |
} |
77 |
|
78 |
#else
|
79 |
|
80 |
static inline uint16_t tswap16(uint16_t s) |
81 |
{ |
82 |
return s;
|
83 |
} |
84 |
|
85 |
static inline uint32_t tswap32(uint32_t s) |
86 |
{ |
87 |
return s;
|
88 |
} |
89 |
|
90 |
static inline uint64_t tswap64(uint64_t s) |
91 |
{ |
92 |
return s;
|
93 |
} |
94 |
|
95 |
static inline void tswap16s(uint16_t *s) |
96 |
{ |
97 |
} |
98 |
|
99 |
static inline void tswap32s(uint32_t *s) |
100 |
{ |
101 |
} |
102 |
|
103 |
static inline void tswap64s(uint64_t *s) |
104 |
{ |
105 |
} |
106 |
|
107 |
#endif
|
108 |
|
109 |
#if TARGET_LONG_SIZE == 4 |
110 |
#define tswapl(s) tswap32(s)
|
111 |
#define tswapls(s) tswap32s((uint32_t *)(s))
|
112 |
#define bswaptls(s) bswap32s(s)
|
113 |
#else
|
114 |
#define tswapl(s) tswap64(s)
|
115 |
#define tswapls(s) tswap64s((uint64_t *)(s))
|
116 |
#define bswaptls(s) bswap64s(s)
|
117 |
#endif
|
118 |
|
119 |
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
|
120 |
endian ! */
|
121 |
typedef union { |
122 |
float64 d; |
123 |
#if defined(WORDS_BIGENDIAN) \
|
124 |
|| (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) |
125 |
struct {
|
126 |
uint32_t upper; |
127 |
uint32_t lower; |
128 |
} l; |
129 |
#else
|
130 |
struct {
|
131 |
uint32_t lower; |
132 |
uint32_t upper; |
133 |
} l; |
134 |
#endif
|
135 |
uint64_t ll; |
136 |
} CPU_DoubleU; |
137 |
|
138 |
/* CPU memory access without any memory or io remapping */
|
139 |
|
140 |
/*
|
141 |
* the generic syntax for the memory accesses is:
|
142 |
*
|
143 |
* load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
|
144 |
*
|
145 |
* store: st{type}{size}{endian}_{access_type}(ptr, val)
|
146 |
*
|
147 |
* type is:
|
148 |
* (empty): integer access
|
149 |
* f : float access
|
150 |
*
|
151 |
* sign is:
|
152 |
* (empty): for floats or 32 bit size
|
153 |
* u : unsigned
|
154 |
* s : signed
|
155 |
*
|
156 |
* size is:
|
157 |
* b: 8 bits
|
158 |
* w: 16 bits
|
159 |
* l: 32 bits
|
160 |
* q: 64 bits
|
161 |
*
|
162 |
* endian is:
|
163 |
* (empty): target cpu endianness or 8 bit access
|
164 |
* r : reversed target cpu endianness (not implemented yet)
|
165 |
* be : big endian (not implemented yet)
|
166 |
* le : little endian (not implemented yet)
|
167 |
*
|
168 |
* access_type is:
|
169 |
* raw : host memory access
|
170 |
* user : user mode access using soft MMU
|
171 |
* kernel : kernel mode access using soft MMU
|
172 |
*/
|
173 |
static inline int ldub_p(void *ptr) |
174 |
{ |
175 |
return *(uint8_t *)ptr;
|
176 |
} |
177 |
|
178 |
static inline int ldsb_p(void *ptr) |
179 |
{ |
180 |
return *(int8_t *)ptr;
|
181 |
} |
182 |
|
183 |
static inline void stb_p(void *ptr, int v) |
184 |
{ |
185 |
*(uint8_t *)ptr = v; |
186 |
} |
187 |
|
188 |
/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
|
189 |
kernel handles unaligned load/stores may give better results, but
|
190 |
it is a system wide setting : bad */
|
191 |
#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
|
192 |
|
193 |
/* conservative code for little endian unaligned accesses */
|
194 |
static inline int lduw_le_p(void *ptr) |
195 |
{ |
196 |
#ifdef __powerpc__
|
197 |
int val;
|
198 |
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
199 |
return val;
|
200 |
#else
|
201 |
uint8_t *p = ptr; |
202 |
return p[0] | (p[1] << 8); |
203 |
#endif
|
204 |
} |
205 |
|
206 |
static inline int ldsw_le_p(void *ptr) |
207 |
{ |
208 |
#ifdef __powerpc__
|
209 |
int val;
|
210 |
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
211 |
return (int16_t)val;
|
212 |
#else
|
213 |
uint8_t *p = ptr; |
214 |
return (int16_t)(p[0] | (p[1] << 8)); |
215 |
#endif
|
216 |
} |
217 |
|
218 |
static inline int ldl_le_p(void *ptr) |
219 |
{ |
220 |
#ifdef __powerpc__
|
221 |
int val;
|
222 |
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
223 |
return val;
|
224 |
#else
|
225 |
uint8_t *p = ptr; |
226 |
return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); |
227 |
#endif
|
228 |
} |
229 |
|
230 |
static inline uint64_t ldq_le_p(void *ptr) |
231 |
{ |
232 |
uint8_t *p = ptr; |
233 |
uint32_t v1, v2; |
234 |
v1 = ldl_le_p(p); |
235 |
v2 = ldl_le_p(p + 4);
|
236 |
return v1 | ((uint64_t)v2 << 32); |
237 |
} |
238 |
|
239 |
static inline void stw_le_p(void *ptr, int v) |
240 |
{ |
241 |
#ifdef __powerpc__
|
242 |
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); |
243 |
#else
|
244 |
uint8_t *p = ptr; |
245 |
p[0] = v;
|
246 |
p[1] = v >> 8; |
247 |
#endif
|
248 |
} |
249 |
|
250 |
static inline void stl_le_p(void *ptr, int v) |
251 |
{ |
252 |
#ifdef __powerpc__
|
253 |
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); |
254 |
#else
|
255 |
uint8_t *p = ptr; |
256 |
p[0] = v;
|
257 |
p[1] = v >> 8; |
258 |
p[2] = v >> 16; |
259 |
p[3] = v >> 24; |
260 |
#endif
|
261 |
} |
262 |
|
263 |
static inline void stq_le_p(void *ptr, uint64_t v) |
264 |
{ |
265 |
uint8_t *p = ptr; |
266 |
stl_le_p(p, (uint32_t)v); |
267 |
stl_le_p(p + 4, v >> 32); |
268 |
} |
269 |
|
270 |
/* float access */
|
271 |
|
272 |
static inline float32 ldfl_le_p(void *ptr) |
273 |
{ |
274 |
union {
|
275 |
float32 f; |
276 |
uint32_t i; |
277 |
} u; |
278 |
u.i = ldl_le_p(ptr); |
279 |
return u.f;
|
280 |
} |
281 |
|
282 |
static inline void stfl_le_p(void *ptr, float32 v) |
283 |
{ |
284 |
union {
|
285 |
float32 f; |
286 |
uint32_t i; |
287 |
} u; |
288 |
u.f = v; |
289 |
stl_le_p(ptr, u.i); |
290 |
} |
291 |
|
292 |
static inline float64 ldfq_le_p(void *ptr) |
293 |
{ |
294 |
CPU_DoubleU u; |
295 |
u.l.lower = ldl_le_p(ptr); |
296 |
u.l.upper = ldl_le_p(ptr + 4);
|
297 |
return u.d;
|
298 |
} |
299 |
|
300 |
static inline void stfq_le_p(void *ptr, float64 v) |
301 |
{ |
302 |
CPU_DoubleU u; |
303 |
u.d = v; |
304 |
stl_le_p(ptr, u.l.lower); |
305 |
stl_le_p(ptr + 4, u.l.upper);
|
306 |
} |
307 |
|
308 |
#else
|
309 |
|
310 |
static inline int lduw_le_p(void *ptr) |
311 |
{ |
312 |
return *(uint16_t *)ptr;
|
313 |
} |
314 |
|
315 |
static inline int ldsw_le_p(void *ptr) |
316 |
{ |
317 |
return *(int16_t *)ptr;
|
318 |
} |
319 |
|
320 |
static inline int ldl_le_p(void *ptr) |
321 |
{ |
322 |
return *(uint32_t *)ptr;
|
323 |
} |
324 |
|
325 |
static inline uint64_t ldq_le_p(void *ptr) |
326 |
{ |
327 |
return *(uint64_t *)ptr;
|
328 |
} |
329 |
|
330 |
static inline void stw_le_p(void *ptr, int v) |
331 |
{ |
332 |
*(uint16_t *)ptr = v; |
333 |
} |
334 |
|
335 |
static inline void stl_le_p(void *ptr, int v) |
336 |
{ |
337 |
*(uint32_t *)ptr = v; |
338 |
} |
339 |
|
340 |
static inline void stq_le_p(void *ptr, uint64_t v) |
341 |
{ |
342 |
*(uint64_t *)ptr = v; |
343 |
} |
344 |
|
345 |
/* float access */
|
346 |
|
347 |
static inline float32 ldfl_le_p(void *ptr) |
348 |
{ |
349 |
return *(float32 *)ptr;
|
350 |
} |
351 |
|
352 |
static inline float64 ldfq_le_p(void *ptr) |
353 |
{ |
354 |
return *(float64 *)ptr;
|
355 |
} |
356 |
|
357 |
static inline void stfl_le_p(void *ptr, float32 v) |
358 |
{ |
359 |
*(float32 *)ptr = v; |
360 |
} |
361 |
|
362 |
static inline void stfq_le_p(void *ptr, float64 v) |
363 |
{ |
364 |
*(float64 *)ptr = v; |
365 |
} |
366 |
#endif
|
367 |
|
368 |
#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
|
369 |
|
370 |
static inline int lduw_be_p(void *ptr) |
371 |
{ |
372 |
#if defined(__i386__)
|
373 |
int val;
|
374 |
asm volatile ("movzwl %1, %0\n" |
375 |
"xchgb %b0, %h0\n"
|
376 |
: "=q" (val)
|
377 |
: "m" (*(uint16_t *)ptr));
|
378 |
return val;
|
379 |
#else
|
380 |
uint8_t *b = (uint8_t *) ptr; |
381 |
return ((b[0] << 8) | b[1]); |
382 |
#endif
|
383 |
} |
384 |
|
385 |
static inline int ldsw_be_p(void *ptr) |
386 |
{ |
387 |
#if defined(__i386__)
|
388 |
int val;
|
389 |
asm volatile ("movzwl %1, %0\n" |
390 |
"xchgb %b0, %h0\n"
|
391 |
: "=q" (val)
|
392 |
: "m" (*(uint16_t *)ptr));
|
393 |
return (int16_t)val;
|
394 |
#else
|
395 |
uint8_t *b = (uint8_t *) ptr; |
396 |
return (int16_t)((b[0] << 8) | b[1]); |
397 |
#endif
|
398 |
} |
399 |
|
400 |
static inline int ldl_be_p(void *ptr) |
401 |
{ |
402 |
#if defined(__i386__) || defined(__x86_64__)
|
403 |
int val;
|
404 |
asm volatile ("movl %1, %0\n" |
405 |
"bswap %0\n"
|
406 |
: "=r" (val)
|
407 |
: "m" (*(uint32_t *)ptr));
|
408 |
return val;
|
409 |
#else
|
410 |
uint8_t *b = (uint8_t *) ptr; |
411 |
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; |
412 |
#endif
|
413 |
} |
414 |
|
415 |
static inline uint64_t ldq_be_p(void *ptr) |
416 |
{ |
417 |
uint32_t a,b; |
418 |
a = ldl_be_p(ptr); |
419 |
b = ldl_be_p(ptr+4);
|
420 |
return (((uint64_t)a<<32)|b); |
421 |
} |
422 |
|
423 |
static inline void stw_be_p(void *ptr, int v) |
424 |
{ |
425 |
#if defined(__i386__)
|
426 |
asm volatile ("xchgb %b0, %h0\n" |
427 |
"movw %w0, %1\n"
|
428 |
: "=q" (v)
|
429 |
: "m" (*(uint16_t *)ptr), "0" (v)); |
430 |
#else
|
431 |
uint8_t *d = (uint8_t *) ptr; |
432 |
d[0] = v >> 8; |
433 |
d[1] = v;
|
434 |
#endif
|
435 |
} |
436 |
|
437 |
static inline void stl_be_p(void *ptr, int v) |
438 |
{ |
439 |
#if defined(__i386__) || defined(__x86_64__)
|
440 |
asm volatile ("bswap %0\n" |
441 |
"movl %0, %1\n"
|
442 |
: "=r" (v)
|
443 |
: "m" (*(uint32_t *)ptr), "0" (v)); |
444 |
#else
|
445 |
uint8_t *d = (uint8_t *) ptr; |
446 |
d[0] = v >> 24; |
447 |
d[1] = v >> 16; |
448 |
d[2] = v >> 8; |
449 |
d[3] = v;
|
450 |
#endif
|
451 |
} |
452 |
|
453 |
static inline void stq_be_p(void *ptr, uint64_t v) |
454 |
{ |
455 |
stl_be_p(ptr, v >> 32);
|
456 |
stl_be_p(ptr + 4, v);
|
457 |
} |
458 |
|
459 |
/* float access */
|
460 |
|
461 |
static inline float32 ldfl_be_p(void *ptr) |
462 |
{ |
463 |
union {
|
464 |
float32 f; |
465 |
uint32_t i; |
466 |
} u; |
467 |
u.i = ldl_be_p(ptr); |
468 |
return u.f;
|
469 |
} |
470 |
|
471 |
static inline void stfl_be_p(void *ptr, float32 v) |
472 |
{ |
473 |
union {
|
474 |
float32 f; |
475 |
uint32_t i; |
476 |
} u; |
477 |
u.f = v; |
478 |
stl_be_p(ptr, u.i); |
479 |
} |
480 |
|
481 |
static inline float64 ldfq_be_p(void *ptr) |
482 |
{ |
483 |
CPU_DoubleU u; |
484 |
u.l.upper = ldl_be_p(ptr); |
485 |
u.l.lower = ldl_be_p(ptr + 4);
|
486 |
return u.d;
|
487 |
} |
488 |
|
489 |
static inline void stfq_be_p(void *ptr, float64 v) |
490 |
{ |
491 |
CPU_DoubleU u; |
492 |
u.d = v; |
493 |
stl_be_p(ptr, u.l.upper); |
494 |
stl_be_p(ptr + 4, u.l.lower);
|
495 |
} |
496 |
|
497 |
#else
|
498 |
|
499 |
static inline int lduw_be_p(void *ptr) |
500 |
{ |
501 |
return *(uint16_t *)ptr;
|
502 |
} |
503 |
|
504 |
static inline int ldsw_be_p(void *ptr) |
505 |
{ |
506 |
return *(int16_t *)ptr;
|
507 |
} |
508 |
|
509 |
static inline int ldl_be_p(void *ptr) |
510 |
{ |
511 |
return *(uint32_t *)ptr;
|
512 |
} |
513 |
|
514 |
static inline uint64_t ldq_be_p(void *ptr) |
515 |
{ |
516 |
return *(uint64_t *)ptr;
|
517 |
} |
518 |
|
519 |
static inline void stw_be_p(void *ptr, int v) |
520 |
{ |
521 |
*(uint16_t *)ptr = v; |
522 |
} |
523 |
|
524 |
static inline void stl_be_p(void *ptr, int v) |
525 |
{ |
526 |
*(uint32_t *)ptr = v; |
527 |
} |
528 |
|
529 |
static inline void stq_be_p(void *ptr, uint64_t v) |
530 |
{ |
531 |
*(uint64_t *)ptr = v; |
532 |
} |
533 |
|
534 |
/* float access */
|
535 |
|
536 |
static inline float32 ldfl_be_p(void *ptr) |
537 |
{ |
538 |
return *(float32 *)ptr;
|
539 |
} |
540 |
|
541 |
static inline float64 ldfq_be_p(void *ptr) |
542 |
{ |
543 |
return *(float64 *)ptr;
|
544 |
} |
545 |
|
546 |
static inline void stfl_be_p(void *ptr, float32 v) |
547 |
{ |
548 |
*(float32 *)ptr = v; |
549 |
} |
550 |
|
551 |
static inline void stfq_be_p(void *ptr, float64 v) |
552 |
{ |
553 |
*(float64 *)ptr = v; |
554 |
} |
555 |
|
556 |
#endif
|
557 |
|
558 |
/* target CPU memory access functions */
|
559 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
560 |
#define lduw_p(p) lduw_be_p(p)
|
561 |
#define ldsw_p(p) ldsw_be_p(p)
|
562 |
#define ldl_p(p) ldl_be_p(p)
|
563 |
#define ldq_p(p) ldq_be_p(p)
|
564 |
#define ldfl_p(p) ldfl_be_p(p)
|
565 |
#define ldfq_p(p) ldfq_be_p(p)
|
566 |
#define stw_p(p, v) stw_be_p(p, v)
|
567 |
#define stl_p(p, v) stl_be_p(p, v)
|
568 |
#define stq_p(p, v) stq_be_p(p, v)
|
569 |
#define stfl_p(p, v) stfl_be_p(p, v)
|
570 |
#define stfq_p(p, v) stfq_be_p(p, v)
|
571 |
#else
|
572 |
#define lduw_p(p) lduw_le_p(p)
|
573 |
#define ldsw_p(p) ldsw_le_p(p)
|
574 |
#define ldl_p(p) ldl_le_p(p)
|
575 |
#define ldq_p(p) ldq_le_p(p)
|
576 |
#define ldfl_p(p) ldfl_le_p(p)
|
577 |
#define ldfq_p(p) ldfq_le_p(p)
|
578 |
#define stw_p(p, v) stw_le_p(p, v)
|
579 |
#define stl_p(p, v) stl_le_p(p, v)
|
580 |
#define stq_p(p, v) stq_le_p(p, v)
|
581 |
#define stfl_p(p, v) stfl_le_p(p, v)
|
582 |
#define stfq_p(p, v) stfq_le_p(p, v)
|
583 |
#endif
|
584 |
|
585 |
/* MMU memory access macros */
|
586 |
|
587 |
#if defined(CONFIG_USER_ONLY)
|
588 |
/* On some host systems the guest address space is reserved on the host.
|
589 |
* This allows the guest address space to be offset to a convenient location.
|
590 |
*/
|
591 |
//#define GUEST_BASE 0x20000000
|
592 |
#define GUEST_BASE 0 |
593 |
|
594 |
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
595 |
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) |
596 |
#define h2g(x) ((target_ulong)(x - GUEST_BASE))
|
597 |
|
598 |
#define saddr(x) g2h(x)
|
599 |
#define laddr(x) g2h(x)
|
600 |
|
601 |
#else /* !CONFIG_USER_ONLY */ |
602 |
/* NOTE: we use double casts if pointers and target_ulong have
|
603 |
different sizes */
|
604 |
#define saddr(x) (uint8_t *)(long)(x) |
605 |
#define laddr(x) (uint8_t *)(long)(x) |
606 |
#endif
|
607 |
|
608 |
#define ldub_raw(p) ldub_p(laddr((p)))
|
609 |
#define ldsb_raw(p) ldsb_p(laddr((p)))
|
610 |
#define lduw_raw(p) lduw_p(laddr((p)))
|
611 |
#define ldsw_raw(p) ldsw_p(laddr((p)))
|
612 |
#define ldl_raw(p) ldl_p(laddr((p)))
|
613 |
#define ldq_raw(p) ldq_p(laddr((p)))
|
614 |
#define ldfl_raw(p) ldfl_p(laddr((p)))
|
615 |
#define ldfq_raw(p) ldfq_p(laddr((p)))
|
616 |
#define stb_raw(p, v) stb_p(saddr((p)), v)
|
617 |
#define stw_raw(p, v) stw_p(saddr((p)), v)
|
618 |
#define stl_raw(p, v) stl_p(saddr((p)), v)
|
619 |
#define stq_raw(p, v) stq_p(saddr((p)), v)
|
620 |
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
|
621 |
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
|
622 |
|
623 |
|
624 |
#if defined(CONFIG_USER_ONLY)
|
625 |
|
626 |
/* if user mode, no other memory access functions */
|
627 |
#define ldub(p) ldub_raw(p)
|
628 |
#define ldsb(p) ldsb_raw(p)
|
629 |
#define lduw(p) lduw_raw(p)
|
630 |
#define ldsw(p) ldsw_raw(p)
|
631 |
#define ldl(p) ldl_raw(p)
|
632 |
#define ldq(p) ldq_raw(p)
|
633 |
#define ldfl(p) ldfl_raw(p)
|
634 |
#define ldfq(p) ldfq_raw(p)
|
635 |
#define stb(p, v) stb_raw(p, v)
|
636 |
#define stw(p, v) stw_raw(p, v)
|
637 |
#define stl(p, v) stl_raw(p, v)
|
638 |
#define stq(p, v) stq_raw(p, v)
|
639 |
#define stfl(p, v) stfl_raw(p, v)
|
640 |
#define stfq(p, v) stfq_raw(p, v)
|
641 |
|
642 |
#define ldub_code(p) ldub_raw(p)
|
643 |
#define ldsb_code(p) ldsb_raw(p)
|
644 |
#define lduw_code(p) lduw_raw(p)
|
645 |
#define ldsw_code(p) ldsw_raw(p)
|
646 |
#define ldl_code(p) ldl_raw(p)
|
647 |
#define ldq_code(p) ldq_raw(p)
|
648 |
|
649 |
#define ldub_kernel(p) ldub_raw(p)
|
650 |
#define ldsb_kernel(p) ldsb_raw(p)
|
651 |
#define lduw_kernel(p) lduw_raw(p)
|
652 |
#define ldsw_kernel(p) ldsw_raw(p)
|
653 |
#define ldl_kernel(p) ldl_raw(p)
|
654 |
#define ldq_kernel(p) ldq_raw(p)
|
655 |
#define ldfl_kernel(p) ldfl_raw(p)
|
656 |
#define ldfq_kernel(p) ldfq_raw(p)
|
657 |
#define stb_kernel(p, v) stb_raw(p, v)
|
658 |
#define stw_kernel(p, v) stw_raw(p, v)
|
659 |
#define stl_kernel(p, v) stl_raw(p, v)
|
660 |
#define stq_kernel(p, v) stq_raw(p, v)
|
661 |
#define stfl_kernel(p, v) stfl_raw(p, v)
|
662 |
#define stfq_kernel(p, vt) stfq_raw(p, v)
|
663 |
|
664 |
#endif /* defined(CONFIG_USER_ONLY) */ |
665 |
|
666 |
/* page related stuff */
|
667 |
|
668 |
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
669 |
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
670 |
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
671 |
|
672 |
/* ??? These should be the larger of unsigned long and target_ulong. */
|
673 |
extern unsigned long qemu_real_host_page_size; |
674 |
extern unsigned long qemu_host_page_bits; |
675 |
extern unsigned long qemu_host_page_size; |
676 |
extern unsigned long qemu_host_page_mask; |
677 |
|
678 |
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) |
679 |
|
680 |
/* same as PROT_xxx */
|
681 |
#define PAGE_READ 0x0001 |
682 |
#define PAGE_WRITE 0x0002 |
683 |
#define PAGE_EXEC 0x0004 |
684 |
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
|
685 |
#define PAGE_VALID 0x0008 |
686 |
/* original state of the write flag (used when tracking self-modifying
|
687 |
code */
|
688 |
#define PAGE_WRITE_ORG 0x0010 |
689 |
|
690 |
void page_dump(FILE *f);
|
691 |
int page_get_flags(target_ulong address);
|
692 |
void page_set_flags(target_ulong start, target_ulong end, int flags); |
693 |
void page_unprotect_range(target_ulong data, target_ulong data_size);
|
694 |
|
695 |
CPUState *cpu_copy(CPUState *env); |
696 |
|
697 |
void cpu_dump_state(CPUState *env, FILE *f,
|
698 |
int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
699 |
int flags);
|
700 |
void cpu_dump_statistics (CPUState *env, FILE *f,
|
701 |
int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
702 |
int flags);
|
703 |
|
704 |
void cpu_abort(CPUState *env, const char *fmt, ...) |
705 |
__attribute__ ((__format__ (__printf__, 2, 3))) |
706 |
__attribute__ ((__noreturn__)); |
707 |
extern CPUState *first_cpu;
|
708 |
extern CPUState *cpu_single_env;
|
709 |
extern int code_copy_enabled; |
710 |
|
711 |
#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ |
712 |
#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ |
713 |
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ |
714 |
#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ |
715 |
#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ |
716 |
#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ |
717 |
#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ |
718 |
#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ |
719 |
#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ |
720 |
|
721 |
void cpu_interrupt(CPUState *s, int mask); |
722 |
void cpu_reset_interrupt(CPUState *env, int mask); |
723 |
|
724 |
int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
|
725 |
int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
|
726 |
int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
|
727 |
int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
|
728 |
void cpu_single_step(CPUState *env, int enabled); |
729 |
void cpu_reset(CPUState *s);
|
730 |
|
731 |
/* Return the physical page corresponding to a virtual one. Use it
|
732 |
only for debugging because no protection checks are done. Return -1
|
733 |
if no page found. */
|
734 |
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); |
735 |
|
736 |
#define CPU_LOG_TB_OUT_ASM (1 << 0) |
737 |
#define CPU_LOG_TB_IN_ASM (1 << 1) |
738 |
#define CPU_LOG_TB_OP (1 << 2) |
739 |
#define CPU_LOG_TB_OP_OPT (1 << 3) |
740 |
#define CPU_LOG_INT (1 << 4) |
741 |
#define CPU_LOG_EXEC (1 << 5) |
742 |
#define CPU_LOG_PCALL (1 << 6) |
743 |
#define CPU_LOG_IOPORT (1 << 7) |
744 |
#define CPU_LOG_TB_CPU (1 << 8) |
745 |
|
746 |
/* define log items */
|
747 |
typedef struct CPULogItem { |
748 |
int mask;
|
749 |
const char *name; |
750 |
const char *help; |
751 |
} CPULogItem; |
752 |
|
753 |
extern CPULogItem cpu_log_items[];
|
754 |
|
755 |
void cpu_set_log(int log_flags); |
756 |
void cpu_set_log_filename(const char *filename); |
757 |
int cpu_str_to_log_mask(const char *str); |
758 |
|
759 |
/* IO ports API */
|
760 |
|
761 |
/* NOTE: as these functions may be even used when there is an isa
|
762 |
brige on non x86 targets, we always defined them */
|
763 |
#ifndef NO_CPU_IO_DEFS
|
764 |
void cpu_outb(CPUState *env, int addr, int val); |
765 |
void cpu_outw(CPUState *env, int addr, int val); |
766 |
void cpu_outl(CPUState *env, int addr, int val); |
767 |
int cpu_inb(CPUState *env, int addr); |
768 |
int cpu_inw(CPUState *env, int addr); |
769 |
int cpu_inl(CPUState *env, int addr); |
770 |
#endif
|
771 |
|
772 |
/* memory API */
|
773 |
|
774 |
extern int phys_ram_size; |
775 |
extern int phys_ram_fd; |
776 |
extern uint8_t *phys_ram_base;
|
777 |
extern uint8_t *phys_ram_dirty;
|
778 |
|
779 |
/* physical memory access */
|
780 |
#define TLB_INVALID_MASK (1 << 3) |
781 |
#define IO_MEM_SHIFT 4 |
782 |
#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) |
783 |
|
784 |
#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ |
785 |
#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ |
786 |
#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) |
787 |
#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */ |
788 |
/* acts like a ROM when read and like a device when written. As an
|
789 |
exception, the write memory callback gets the ram offset instead of
|
790 |
the physical address */
|
791 |
#define IO_MEM_ROMD (1) |
792 |
#define IO_MEM_SUBPAGE (2) |
793 |
|
794 |
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); |
795 |
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); |
796 |
|
797 |
void cpu_register_physical_memory(target_phys_addr_t start_addr,
|
798 |
unsigned long size, |
799 |
unsigned long phys_offset); |
800 |
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr); |
801 |
ram_addr_t qemu_ram_alloc(unsigned int size); |
802 |
void qemu_ram_free(ram_addr_t addr);
|
803 |
int cpu_register_io_memory(int io_index, |
804 |
CPUReadMemoryFunc **mem_read, |
805 |
CPUWriteMemoryFunc **mem_write, |
806 |
void *opaque);
|
807 |
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
|
808 |
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
|
809 |
|
810 |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
811 |
int len, int is_write); |
812 |
static inline void cpu_physical_memory_read(target_phys_addr_t addr, |
813 |
uint8_t *buf, int len)
|
814 |
{ |
815 |
cpu_physical_memory_rw(addr, buf, len, 0);
|
816 |
} |
817 |
static inline void cpu_physical_memory_write(target_phys_addr_t addr, |
818 |
const uint8_t *buf, int len) |
819 |
{ |
820 |
cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
|
821 |
} |
822 |
uint32_t ldub_phys(target_phys_addr_t addr); |
823 |
uint32_t lduw_phys(target_phys_addr_t addr); |
824 |
uint32_t ldl_phys(target_phys_addr_t addr); |
825 |
uint64_t ldq_phys(target_phys_addr_t addr); |
826 |
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
|
827 |
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
|
828 |
void stb_phys(target_phys_addr_t addr, uint32_t val);
|
829 |
void stw_phys(target_phys_addr_t addr, uint32_t val);
|
830 |
void stl_phys(target_phys_addr_t addr, uint32_t val);
|
831 |
void stq_phys(target_phys_addr_t addr, uint64_t val);
|
832 |
|
833 |
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
834 |
const uint8_t *buf, int len); |
835 |
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
836 |
uint8_t *buf, int len, int is_write); |
837 |
|
838 |
#define VGA_DIRTY_FLAG 0x01 |
839 |
#define CODE_DIRTY_FLAG 0x02 |
840 |
|
841 |
/* read dirty bit (return 0 or 1) */
|
842 |
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) |
843 |
{ |
844 |
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; |
845 |
} |
846 |
|
847 |
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, |
848 |
int dirty_flags)
|
849 |
{ |
850 |
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
|
851 |
} |
852 |
|
853 |
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) |
854 |
{ |
855 |
phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
|
856 |
} |
857 |
|
858 |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
859 |
int dirty_flags);
|
860 |
void cpu_tlb_update_dirty(CPUState *env);
|
861 |
|
862 |
void dump_exec_info(FILE *f,
|
863 |
int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); |
864 |
|
865 |
/*******************************************/
|
866 |
/* host CPU ticks (if available) */
|
867 |
|
868 |
#if defined(__powerpc__)
|
869 |
|
870 |
static inline uint32_t get_tbl(void) |
871 |
{ |
872 |
uint32_t tbl; |
873 |
asm volatile("mftb %0" : "=r" (tbl)); |
874 |
return tbl;
|
875 |
} |
876 |
|
877 |
static inline uint32_t get_tbu(void) |
878 |
{ |
879 |
uint32_t tbl; |
880 |
asm volatile("mftbu %0" : "=r" (tbl)); |
881 |
return tbl;
|
882 |
} |
883 |
|
884 |
static inline int64_t cpu_get_real_ticks(void) |
885 |
{ |
886 |
uint32_t l, h, h1; |
887 |
/* NOTE: we test if wrapping has occurred */
|
888 |
do {
|
889 |
h = get_tbu(); |
890 |
l = get_tbl(); |
891 |
h1 = get_tbu(); |
892 |
} while (h != h1);
|
893 |
return ((int64_t)h << 32) | l; |
894 |
} |
895 |
|
896 |
#elif defined(__i386__)
|
897 |
|
898 |
static inline int64_t cpu_get_real_ticks(void) |
899 |
{ |
900 |
int64_t val; |
901 |
asm volatile ("rdtsc" : "=A" (val)); |
902 |
return val;
|
903 |
} |
904 |
|
905 |
#elif defined(__x86_64__)
|
906 |
|
907 |
static inline int64_t cpu_get_real_ticks(void) |
908 |
{ |
909 |
uint32_t low,high; |
910 |
int64_t val; |
911 |
asm volatile("rdtsc" : "=a" (low), "=d" (high)); |
912 |
val = high; |
913 |
val <<= 32;
|
914 |
val |= low; |
915 |
return val;
|
916 |
} |
917 |
|
918 |
#elif defined(__ia64)
|
919 |
|
920 |
static inline int64_t cpu_get_real_ticks(void) |
921 |
{ |
922 |
int64_t val; |
923 |
asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); |
924 |
return val;
|
925 |
} |
926 |
|
927 |
#elif defined(__s390__)
|
928 |
|
929 |
static inline int64_t cpu_get_real_ticks(void) |
930 |
{ |
931 |
int64_t val; |
932 |
asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); |
933 |
return val;
|
934 |
} |
935 |
|
936 |
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
|
937 |
|
938 |
static inline int64_t cpu_get_real_ticks (void) |
939 |
{ |
940 |
#if defined(_LP64)
|
941 |
uint64_t rval; |
942 |
asm volatile("rd %%tick,%0" : "=r"(rval)); |
943 |
return rval;
|
944 |
#else
|
945 |
union {
|
946 |
uint64_t i64; |
947 |
struct {
|
948 |
uint32_t high; |
949 |
uint32_t low; |
950 |
} i32; |
951 |
} rval; |
952 |
asm volatile("rd %%tick,%1; srlx %1,32,%0" |
953 |
: "=r"(rval.i32.high), "=r"(rval.i32.low)); |
954 |
return rval.i64;
|
955 |
#endif
|
956 |
} |
957 |
|
958 |
#elif defined(__mips__)
|
959 |
|
960 |
static inline int64_t cpu_get_real_ticks(void) |
961 |
{ |
962 |
#if __mips_isa_rev >= 2 |
963 |
uint32_t count; |
964 |
static uint32_t cyc_per_count = 0; |
965 |
|
966 |
if (!cyc_per_count)
|
967 |
__asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count)); |
968 |
|
969 |
__asm__ __volatile__("rdhwr %1, $2" : "=r" (count)); |
970 |
return (int64_t)(count * cyc_per_count);
|
971 |
#else
|
972 |
/* FIXME */
|
973 |
static int64_t ticks = 0; |
974 |
return ticks++;
|
975 |
#endif
|
976 |
} |
977 |
|
978 |
#else
|
979 |
/* The host CPU doesn't have an easily accessible cycle counter.
|
980 |
Just return a monotonically increasing value. This will be
|
981 |
totally wrong, but hopefully better than nothing. */
|
982 |
static inline int64_t cpu_get_real_ticks (void) |
983 |
{ |
984 |
static int64_t ticks = 0; |
985 |
return ticks++;
|
986 |
} |
987 |
#endif
|
988 |
|
989 |
/* profiling */
|
990 |
#ifdef CONFIG_PROFILER
|
991 |
static inline int64_t profile_getclock(void) |
992 |
{ |
993 |
return cpu_get_real_ticks();
|
994 |
} |
995 |
|
996 |
extern int64_t kqemu_time, kqemu_time_start;
|
997 |
extern int64_t qemu_time, qemu_time_start;
|
998 |
extern int64_t tlb_flush_time;
|
999 |
extern int64_t kqemu_exec_count;
|
1000 |
extern int64_t dev_time;
|
1001 |
extern int64_t kqemu_ret_int_count;
|
1002 |
extern int64_t kqemu_ret_excp_count;
|
1003 |
extern int64_t kqemu_ret_intr_count;
|
1004 |
|
1005 |
#endif
|
1006 |
|
1007 |
#endif /* CPU_ALL_H */ |