root / cpu-all.h @ 1ad2134f
History | View | Annotate | Download (25.7 kB)
1 | 5a9fdfec | bellard | /*
|
---|---|---|---|
2 | 5a9fdfec | bellard | * defines common to all virtual CPUs
|
3 | 5fafdf24 | ths | *
|
4 | 5a9fdfec | bellard | * Copyright (c) 2003 Fabrice Bellard
|
5 | 5a9fdfec | bellard | *
|
6 | 5a9fdfec | bellard | * This library is free software; you can redistribute it and/or
|
7 | 5a9fdfec | bellard | * modify it under the terms of the GNU Lesser General Public
|
8 | 5a9fdfec | bellard | * License as published by the Free Software Foundation; either
|
9 | 5a9fdfec | bellard | * version 2 of the License, or (at your option) any later version.
|
10 | 5a9fdfec | bellard | *
|
11 | 5a9fdfec | bellard | * This library is distributed in the hope that it will be useful,
|
12 | 5a9fdfec | bellard | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 | 5a9fdfec | bellard | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 | 5a9fdfec | bellard | * Lesser General Public License for more details.
|
15 | 5a9fdfec | bellard | *
|
16 | 5a9fdfec | bellard | * You should have received a copy of the GNU Lesser General Public
|
17 | 5a9fdfec | bellard | * License along with this library; if not, write to the Free Software
|
18 | fad6cb1a | aurel32 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
|
19 | 5a9fdfec | bellard | */
|
20 | 5a9fdfec | bellard | #ifndef CPU_ALL_H
|
21 | 5a9fdfec | bellard | #define CPU_ALL_H
|
22 | 5a9fdfec | bellard | |
23 | 7d99a001 | blueswir1 | #include "qemu-common.h" |
24 | 1ad2134f | Paul Brook | #include "cpu-common.h" |
25 | 0ac4bd56 | bellard | |
26 | 5fafdf24 | ths | /* some important defines:
|
27 | 5fafdf24 | ths | *
|
28 | 0ac4bd56 | bellard | * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
|
29 | 0ac4bd56 | bellard | * memory accesses.
|
30 | 5fafdf24 | ths | *
|
31 | 0ac4bd56 | bellard | * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
|
32 | 0ac4bd56 | bellard | * otherwise little endian.
|
33 | 5fafdf24 | ths | *
|
34 | 0ac4bd56 | bellard | * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
|
35 | 5fafdf24 | ths | *
|
36 | 0ac4bd56 | bellard | * TARGET_WORDS_BIGENDIAN : same for target cpu
|
37 | 0ac4bd56 | bellard | */
|
38 | 0ac4bd56 | bellard | |
39 | 939ef593 | aurel32 | #include "softfloat.h" |
40 | f193c797 | bellard | |
41 | f193c797 | bellard | #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
|
42 | f193c797 | bellard | #define BSWAP_NEEDED
|
43 | f193c797 | bellard | #endif
|
44 | f193c797 | bellard | |
45 | f193c797 | bellard | #ifdef BSWAP_NEEDED
|
46 | f193c797 | bellard | |
47 | f193c797 | bellard | static inline uint16_t tswap16(uint16_t s) |
48 | f193c797 | bellard | { |
49 | f193c797 | bellard | return bswap16(s);
|
50 | f193c797 | bellard | } |
51 | f193c797 | bellard | |
52 | f193c797 | bellard | static inline uint32_t tswap32(uint32_t s) |
53 | f193c797 | bellard | { |
54 | f193c797 | bellard | return bswap32(s);
|
55 | f193c797 | bellard | } |
56 | f193c797 | bellard | |
57 | f193c797 | bellard | static inline uint64_t tswap64(uint64_t s) |
58 | f193c797 | bellard | { |
59 | f193c797 | bellard | return bswap64(s);
|
60 | f193c797 | bellard | } |
61 | f193c797 | bellard | |
62 | f193c797 | bellard | static inline void tswap16s(uint16_t *s) |
63 | f193c797 | bellard | { |
64 | f193c797 | bellard | *s = bswap16(*s); |
65 | f193c797 | bellard | } |
66 | f193c797 | bellard | |
67 | f193c797 | bellard | static inline void tswap32s(uint32_t *s) |
68 | f193c797 | bellard | { |
69 | f193c797 | bellard | *s = bswap32(*s); |
70 | f193c797 | bellard | } |
71 | f193c797 | bellard | |
72 | f193c797 | bellard | static inline void tswap64s(uint64_t *s) |
73 | f193c797 | bellard | { |
74 | f193c797 | bellard | *s = bswap64(*s); |
75 | f193c797 | bellard | } |
76 | f193c797 | bellard | |
77 | f193c797 | bellard | #else
|
78 | f193c797 | bellard | |
79 | f193c797 | bellard | static inline uint16_t tswap16(uint16_t s) |
80 | f193c797 | bellard | { |
81 | f193c797 | bellard | return s;
|
82 | f193c797 | bellard | } |
83 | f193c797 | bellard | |
84 | f193c797 | bellard | static inline uint32_t tswap32(uint32_t s) |
85 | f193c797 | bellard | { |
86 | f193c797 | bellard | return s;
|
87 | f193c797 | bellard | } |
88 | f193c797 | bellard | |
89 | f193c797 | bellard | static inline uint64_t tswap64(uint64_t s) |
90 | f193c797 | bellard | { |
91 | f193c797 | bellard | return s;
|
92 | f193c797 | bellard | } |
93 | f193c797 | bellard | |
94 | f193c797 | bellard | static inline void tswap16s(uint16_t *s) |
95 | f193c797 | bellard | { |
96 | f193c797 | bellard | } |
97 | f193c797 | bellard | |
98 | f193c797 | bellard | static inline void tswap32s(uint32_t *s) |
99 | f193c797 | bellard | { |
100 | f193c797 | bellard | } |
101 | f193c797 | bellard | |
102 | f193c797 | bellard | static inline void tswap64s(uint64_t *s) |
103 | f193c797 | bellard | { |
104 | f193c797 | bellard | } |
105 | f193c797 | bellard | |
106 | f193c797 | bellard | #endif
|
107 | f193c797 | bellard | |
108 | f193c797 | bellard | #if TARGET_LONG_SIZE == 4 |
109 | f193c797 | bellard | #define tswapl(s) tswap32(s)
|
110 | f193c797 | bellard | #define tswapls(s) tswap32s((uint32_t *)(s))
|
111 | 0a962c02 | bellard | #define bswaptls(s) bswap32s(s)
|
112 | f193c797 | bellard | #else
|
113 | f193c797 | bellard | #define tswapl(s) tswap64(s)
|
114 | f193c797 | bellard | #define tswapls(s) tswap64s((uint64_t *)(s))
|
115 | 0a962c02 | bellard | #define bswaptls(s) bswap64s(s)
|
116 | f193c797 | bellard | #endif
|
117 | f193c797 | bellard | |
118 | 0ca9d380 | aurel32 | typedef union { |
119 | 0ca9d380 | aurel32 | float32 f; |
120 | 0ca9d380 | aurel32 | uint32_t l; |
121 | 0ca9d380 | aurel32 | } CPU_FloatU; |
122 | 0ca9d380 | aurel32 | |
123 | 832ed0fa | bellard | /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
|
124 | 832ed0fa | bellard | endian ! */
|
125 | 0ac4bd56 | bellard | typedef union { |
126 | 53cd6637 | bellard | float64 d; |
127 | 9d60cac0 | bellard | #if defined(WORDS_BIGENDIAN) \
|
128 | 9d60cac0 | bellard | || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) |
129 | 0ac4bd56 | bellard | struct {
|
130 | 0ac4bd56 | bellard | uint32_t upper; |
131 | 832ed0fa | bellard | uint32_t lower; |
132 | 0ac4bd56 | bellard | } l; |
133 | 0ac4bd56 | bellard | #else
|
134 | 0ac4bd56 | bellard | struct {
|
135 | 0ac4bd56 | bellard | uint32_t lower; |
136 | 832ed0fa | bellard | uint32_t upper; |
137 | 0ac4bd56 | bellard | } l; |
138 | 0ac4bd56 | bellard | #endif
|
139 | 0ac4bd56 | bellard | uint64_t ll; |
140 | 0ac4bd56 | bellard | } CPU_DoubleU; |
141 | 0ac4bd56 | bellard | |
142 | 1f587329 | blueswir1 | #ifdef TARGET_SPARC
|
143 | 1f587329 | blueswir1 | typedef union { |
144 | 1f587329 | blueswir1 | float128 q; |
145 | 1f587329 | blueswir1 | #if defined(WORDS_BIGENDIAN) \
|
146 | 1f587329 | blueswir1 | || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) |
147 | 1f587329 | blueswir1 | struct {
|
148 | 1f587329 | blueswir1 | uint32_t upmost; |
149 | 1f587329 | blueswir1 | uint32_t upper; |
150 | 1f587329 | blueswir1 | uint32_t lower; |
151 | 1f587329 | blueswir1 | uint32_t lowest; |
152 | 1f587329 | blueswir1 | } l; |
153 | 1f587329 | blueswir1 | struct {
|
154 | 1f587329 | blueswir1 | uint64_t upper; |
155 | 1f587329 | blueswir1 | uint64_t lower; |
156 | 1f587329 | blueswir1 | } ll; |
157 | 1f587329 | blueswir1 | #else
|
158 | 1f587329 | blueswir1 | struct {
|
159 | 1f587329 | blueswir1 | uint32_t lowest; |
160 | 1f587329 | blueswir1 | uint32_t lower; |
161 | 1f587329 | blueswir1 | uint32_t upper; |
162 | 1f587329 | blueswir1 | uint32_t upmost; |
163 | 1f587329 | blueswir1 | } l; |
164 | 1f587329 | blueswir1 | struct {
|
165 | 1f587329 | blueswir1 | uint64_t lower; |
166 | 1f587329 | blueswir1 | uint64_t upper; |
167 | 1f587329 | blueswir1 | } ll; |
168 | 1f587329 | blueswir1 | #endif
|
169 | 1f587329 | blueswir1 | } CPU_QuadU; |
170 | 1f587329 | blueswir1 | #endif
|
171 | 1f587329 | blueswir1 | |
172 | 61382a50 | bellard | /* CPU memory access without any memory or io remapping */
|
173 | 61382a50 | bellard | |
174 | 83d73968 | bellard | /*
|
175 | 83d73968 | bellard | * the generic syntax for the memory accesses is:
|
176 | 83d73968 | bellard | *
|
177 | 83d73968 | bellard | * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
|
178 | 83d73968 | bellard | *
|
179 | 83d73968 | bellard | * store: st{type}{size}{endian}_{access_type}(ptr, val)
|
180 | 83d73968 | bellard | *
|
181 | 83d73968 | bellard | * type is:
|
182 | 83d73968 | bellard | * (empty): integer access
|
183 | 83d73968 | bellard | * f : float access
|
184 | 5fafdf24 | ths | *
|
185 | 83d73968 | bellard | * sign is:
|
186 | 83d73968 | bellard | * (empty): for floats or 32 bit size
|
187 | 83d73968 | bellard | * u : unsigned
|
188 | 83d73968 | bellard | * s : signed
|
189 | 83d73968 | bellard | *
|
190 | 83d73968 | bellard | * size is:
|
191 | 83d73968 | bellard | * b: 8 bits
|
192 | 83d73968 | bellard | * w: 16 bits
|
193 | 83d73968 | bellard | * l: 32 bits
|
194 | 83d73968 | bellard | * q: 64 bits
|
195 | 5fafdf24 | ths | *
|
196 | 83d73968 | bellard | * endian is:
|
197 | 83d73968 | bellard | * (empty): target cpu endianness or 8 bit access
|
198 | 83d73968 | bellard | * r : reversed target cpu endianness (not implemented yet)
|
199 | 83d73968 | bellard | * be : big endian (not implemented yet)
|
200 | 83d73968 | bellard | * le : little endian (not implemented yet)
|
201 | 83d73968 | bellard | *
|
202 | 83d73968 | bellard | * access_type is:
|
203 | 83d73968 | bellard | * raw : host memory access
|
204 | 83d73968 | bellard | * user : user mode access using soft MMU
|
205 | 83d73968 | bellard | * kernel : kernel mode access using soft MMU
|
206 | 83d73968 | bellard | */
|
207 | 8bba3ea1 | balrog | static inline int ldub_p(const void *ptr) |
208 | 5a9fdfec | bellard | { |
209 | 5a9fdfec | bellard | return *(uint8_t *)ptr;
|
210 | 5a9fdfec | bellard | } |
211 | 5a9fdfec | bellard | |
212 | 8bba3ea1 | balrog | static inline int ldsb_p(const void *ptr) |
213 | 5a9fdfec | bellard | { |
214 | 5a9fdfec | bellard | return *(int8_t *)ptr;
|
215 | 5a9fdfec | bellard | } |
216 | 5a9fdfec | bellard | |
217 | c27004ec | bellard | static inline void stb_p(void *ptr, int v) |
218 | 5a9fdfec | bellard | { |
219 | 5a9fdfec | bellard | *(uint8_t *)ptr = v; |
220 | 5a9fdfec | bellard | } |
221 | 5a9fdfec | bellard | |
222 | 5a9fdfec | bellard | /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
|
223 | 5a9fdfec | bellard | kernel handles unaligned load/stores may give better results, but
|
224 | 5a9fdfec | bellard | it is a system wide setting : bad */
|
225 | 2df3b95d | bellard | #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
|
226 | 5a9fdfec | bellard | |
227 | 5a9fdfec | bellard | /* conservative code for little endian unaligned accesses */
|
228 | 8bba3ea1 | balrog | static inline int lduw_le_p(const void *ptr) |
229 | 5a9fdfec | bellard | { |
230 | e58ffeb3 | malc | #ifdef _ARCH_PPC
|
231 | 5a9fdfec | bellard | int val;
|
232 | 5a9fdfec | bellard | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
233 | 5a9fdfec | bellard | return val;
|
234 | 5a9fdfec | bellard | #else
|
235 | e01fe6d5 | malc | const uint8_t *p = ptr;
|
236 | 5a9fdfec | bellard | return p[0] | (p[1] << 8); |
237 | 5a9fdfec | bellard | #endif
|
238 | 5a9fdfec | bellard | } |
239 | 5a9fdfec | bellard | |
240 | 8bba3ea1 | balrog | static inline int ldsw_le_p(const void *ptr) |
241 | 5a9fdfec | bellard | { |
242 | e58ffeb3 | malc | #ifdef _ARCH_PPC
|
243 | 5a9fdfec | bellard | int val;
|
244 | 5a9fdfec | bellard | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
245 | 5a9fdfec | bellard | return (int16_t)val;
|
246 | 5a9fdfec | bellard | #else
|
247 | e01fe6d5 | malc | const uint8_t *p = ptr;
|
248 | 5a9fdfec | bellard | return (int16_t)(p[0] | (p[1] << 8)); |
249 | 5a9fdfec | bellard | #endif
|
250 | 5a9fdfec | bellard | } |
251 | 5a9fdfec | bellard | |
252 | 8bba3ea1 | balrog | static inline int ldl_le_p(const void *ptr) |
253 | 5a9fdfec | bellard | { |
254 | e58ffeb3 | malc | #ifdef _ARCH_PPC
|
255 | 5a9fdfec | bellard | int val;
|
256 | 5a9fdfec | bellard | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
257 | 5a9fdfec | bellard | return val;
|
258 | 5a9fdfec | bellard | #else
|
259 | e01fe6d5 | malc | const uint8_t *p = ptr;
|
260 | 5a9fdfec | bellard | return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); |
261 | 5a9fdfec | bellard | #endif
|
262 | 5a9fdfec | bellard | } |
263 | 5a9fdfec | bellard | |
264 | 8bba3ea1 | balrog | static inline uint64_t ldq_le_p(const void *ptr) |
265 | 5a9fdfec | bellard | { |
266 | e01fe6d5 | malc | const uint8_t *p = ptr;
|
267 | 5a9fdfec | bellard | uint32_t v1, v2; |
268 | f0aca822 | bellard | v1 = ldl_le_p(p); |
269 | f0aca822 | bellard | v2 = ldl_le_p(p + 4);
|
270 | 5a9fdfec | bellard | return v1 | ((uint64_t)v2 << 32); |
271 | 5a9fdfec | bellard | } |
272 | 5a9fdfec | bellard | |
273 | 2df3b95d | bellard | static inline void stw_le_p(void *ptr, int v) |
274 | 5a9fdfec | bellard | { |
275 | e58ffeb3 | malc | #ifdef _ARCH_PPC
|
276 | 5a9fdfec | bellard | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); |
277 | 5a9fdfec | bellard | #else
|
278 | 5a9fdfec | bellard | uint8_t *p = ptr; |
279 | 5a9fdfec | bellard | p[0] = v;
|
280 | 5a9fdfec | bellard | p[1] = v >> 8; |
281 | 5a9fdfec | bellard | #endif
|
282 | 5a9fdfec | bellard | } |
283 | 5a9fdfec | bellard | |
284 | 2df3b95d | bellard | static inline void stl_le_p(void *ptr, int v) |
285 | 5a9fdfec | bellard | { |
286 | e58ffeb3 | malc | #ifdef _ARCH_PPC
|
287 | 5a9fdfec | bellard | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); |
288 | 5a9fdfec | bellard | #else
|
289 | 5a9fdfec | bellard | uint8_t *p = ptr; |
290 | 5a9fdfec | bellard | p[0] = v;
|
291 | 5a9fdfec | bellard | p[1] = v >> 8; |
292 | 5a9fdfec | bellard | p[2] = v >> 16; |
293 | 5a9fdfec | bellard | p[3] = v >> 24; |
294 | 5a9fdfec | bellard | #endif
|
295 | 5a9fdfec | bellard | } |
296 | 5a9fdfec | bellard | |
297 | 2df3b95d | bellard | static inline void stq_le_p(void *ptr, uint64_t v) |
298 | 5a9fdfec | bellard | { |
299 | 5a9fdfec | bellard | uint8_t *p = ptr; |
300 | f0aca822 | bellard | stl_le_p(p, (uint32_t)v); |
301 | f0aca822 | bellard | stl_le_p(p + 4, v >> 32); |
302 | 5a9fdfec | bellard | } |
303 | 5a9fdfec | bellard | |
304 | 5a9fdfec | bellard | /* float access */
|
305 | 5a9fdfec | bellard | |
306 | 8bba3ea1 | balrog | static inline float32 ldfl_le_p(const void *ptr) |
307 | 5a9fdfec | bellard | { |
308 | 5a9fdfec | bellard | union {
|
309 | 53cd6637 | bellard | float32 f; |
310 | 5a9fdfec | bellard | uint32_t i; |
311 | 5a9fdfec | bellard | } u; |
312 | 2df3b95d | bellard | u.i = ldl_le_p(ptr); |
313 | 5a9fdfec | bellard | return u.f;
|
314 | 5a9fdfec | bellard | } |
315 | 5a9fdfec | bellard | |
316 | 2df3b95d | bellard | static inline void stfl_le_p(void *ptr, float32 v) |
317 | 5a9fdfec | bellard | { |
318 | 5a9fdfec | bellard | union {
|
319 | 53cd6637 | bellard | float32 f; |
320 | 5a9fdfec | bellard | uint32_t i; |
321 | 5a9fdfec | bellard | } u; |
322 | 5a9fdfec | bellard | u.f = v; |
323 | 2df3b95d | bellard | stl_le_p(ptr, u.i); |
324 | 5a9fdfec | bellard | } |
325 | 5a9fdfec | bellard | |
326 | 8bba3ea1 | balrog | static inline float64 ldfq_le_p(const void *ptr) |
327 | 5a9fdfec | bellard | { |
328 | 0ac4bd56 | bellard | CPU_DoubleU u; |
329 | 2df3b95d | bellard | u.l.lower = ldl_le_p(ptr); |
330 | 2df3b95d | bellard | u.l.upper = ldl_le_p(ptr + 4);
|
331 | 5a9fdfec | bellard | return u.d;
|
332 | 5a9fdfec | bellard | } |
333 | 5a9fdfec | bellard | |
334 | 2df3b95d | bellard | static inline void stfq_le_p(void *ptr, float64 v) |
335 | 5a9fdfec | bellard | { |
336 | 0ac4bd56 | bellard | CPU_DoubleU u; |
337 | 5a9fdfec | bellard | u.d = v; |
338 | 2df3b95d | bellard | stl_le_p(ptr, u.l.lower); |
339 | 2df3b95d | bellard | stl_le_p(ptr + 4, u.l.upper);
|
340 | 5a9fdfec | bellard | } |
341 | 5a9fdfec | bellard | |
342 | 2df3b95d | bellard | #else
|
343 | 2df3b95d | bellard | |
344 | 8bba3ea1 | balrog | static inline int lduw_le_p(const void *ptr) |
345 | 2df3b95d | bellard | { |
346 | 2df3b95d | bellard | return *(uint16_t *)ptr;
|
347 | 2df3b95d | bellard | } |
348 | 2df3b95d | bellard | |
349 | 8bba3ea1 | balrog | static inline int ldsw_le_p(const void *ptr) |
350 | 2df3b95d | bellard | { |
351 | 2df3b95d | bellard | return *(int16_t *)ptr;
|
352 | 2df3b95d | bellard | } |
353 | 93ac68bc | bellard | |
354 | 8bba3ea1 | balrog | static inline int ldl_le_p(const void *ptr) |
355 | 2df3b95d | bellard | { |
356 | 2df3b95d | bellard | return *(uint32_t *)ptr;
|
357 | 2df3b95d | bellard | } |
358 | 2df3b95d | bellard | |
359 | 8bba3ea1 | balrog | static inline uint64_t ldq_le_p(const void *ptr) |
360 | 2df3b95d | bellard | { |
361 | 2df3b95d | bellard | return *(uint64_t *)ptr;
|
362 | 2df3b95d | bellard | } |
363 | 2df3b95d | bellard | |
364 | 2df3b95d | bellard | static inline void stw_le_p(void *ptr, int v) |
365 | 2df3b95d | bellard | { |
366 | 2df3b95d | bellard | *(uint16_t *)ptr = v; |
367 | 2df3b95d | bellard | } |
368 | 2df3b95d | bellard | |
369 | 2df3b95d | bellard | static inline void stl_le_p(void *ptr, int v) |
370 | 2df3b95d | bellard | { |
371 | 2df3b95d | bellard | *(uint32_t *)ptr = v; |
372 | 2df3b95d | bellard | } |
373 | 2df3b95d | bellard | |
374 | 2df3b95d | bellard | static inline void stq_le_p(void *ptr, uint64_t v) |
375 | 2df3b95d | bellard | { |
376 | 2df3b95d | bellard | *(uint64_t *)ptr = v; |
377 | 2df3b95d | bellard | } |
378 | 2df3b95d | bellard | |
379 | 2df3b95d | bellard | /* float access */
|
380 | 2df3b95d | bellard | |
381 | 8bba3ea1 | balrog | static inline float32 ldfl_le_p(const void *ptr) |
382 | 2df3b95d | bellard | { |
383 | 2df3b95d | bellard | return *(float32 *)ptr;
|
384 | 2df3b95d | bellard | } |
385 | 2df3b95d | bellard | |
386 | 8bba3ea1 | balrog | static inline float64 ldfq_le_p(const void *ptr) |
387 | 2df3b95d | bellard | { |
388 | 2df3b95d | bellard | return *(float64 *)ptr;
|
389 | 2df3b95d | bellard | } |
390 | 2df3b95d | bellard | |
391 | 2df3b95d | bellard | static inline void stfl_le_p(void *ptr, float32 v) |
392 | 2df3b95d | bellard | { |
393 | 2df3b95d | bellard | *(float32 *)ptr = v; |
394 | 2df3b95d | bellard | } |
395 | 2df3b95d | bellard | |
396 | 2df3b95d | bellard | static inline void stfq_le_p(void *ptr, float64 v) |
397 | 2df3b95d | bellard | { |
398 | 2df3b95d | bellard | *(float64 *)ptr = v; |
399 | 2df3b95d | bellard | } |
400 | 2df3b95d | bellard | #endif
|
401 | 2df3b95d | bellard | |
402 | 2df3b95d | bellard | #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
|
403 | 2df3b95d | bellard | |
404 | 8bba3ea1 | balrog | static inline int lduw_be_p(const void *ptr) |
405 | 93ac68bc | bellard | { |
406 | 83d73968 | bellard | #if defined(__i386__)
|
407 | 83d73968 | bellard | int val;
|
408 | 83d73968 | bellard | asm volatile ("movzwl %1, %0\n" |
409 | 83d73968 | bellard | "xchgb %b0, %h0\n"
|
410 | 83d73968 | bellard | : "=q" (val)
|
411 | 83d73968 | bellard | : "m" (*(uint16_t *)ptr));
|
412 | 83d73968 | bellard | return val;
|
413 | 83d73968 | bellard | #else
|
414 | e01fe6d5 | malc | const uint8_t *b = ptr;
|
415 | 83d73968 | bellard | return ((b[0] << 8) | b[1]); |
416 | 83d73968 | bellard | #endif
|
417 | 93ac68bc | bellard | } |
418 | 93ac68bc | bellard | |
419 | 8bba3ea1 | balrog | static inline int ldsw_be_p(const void *ptr) |
420 | 93ac68bc | bellard | { |
421 | 83d73968 | bellard | #if defined(__i386__)
|
422 | 83d73968 | bellard | int val;
|
423 | 83d73968 | bellard | asm volatile ("movzwl %1, %0\n" |
424 | 83d73968 | bellard | "xchgb %b0, %h0\n"
|
425 | 83d73968 | bellard | : "=q" (val)
|
426 | 83d73968 | bellard | : "m" (*(uint16_t *)ptr));
|
427 | 83d73968 | bellard | return (int16_t)val;
|
428 | 83d73968 | bellard | #else
|
429 | e01fe6d5 | malc | const uint8_t *b = ptr;
|
430 | 83d73968 | bellard | return (int16_t)((b[0] << 8) | b[1]); |
431 | 83d73968 | bellard | #endif
|
432 | 93ac68bc | bellard | } |
433 | 93ac68bc | bellard | |
434 | 8bba3ea1 | balrog | static inline int ldl_be_p(const void *ptr) |
435 | 93ac68bc | bellard | { |
436 | 4f2ac237 | bellard | #if defined(__i386__) || defined(__x86_64__)
|
437 | 83d73968 | bellard | int val;
|
438 | 83d73968 | bellard | asm volatile ("movl %1, %0\n" |
439 | 83d73968 | bellard | "bswap %0\n"
|
440 | 83d73968 | bellard | : "=r" (val)
|
441 | 83d73968 | bellard | : "m" (*(uint32_t *)ptr));
|
442 | 83d73968 | bellard | return val;
|
443 | 83d73968 | bellard | #else
|
444 | e01fe6d5 | malc | const uint8_t *b = ptr;
|
445 | 83d73968 | bellard | return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; |
446 | 83d73968 | bellard | #endif
|
447 | 93ac68bc | bellard | } |
448 | 93ac68bc | bellard | |
449 | 8bba3ea1 | balrog | static inline uint64_t ldq_be_p(const void *ptr) |
450 | 93ac68bc | bellard | { |
451 | 93ac68bc | bellard | uint32_t a,b; |
452 | 2df3b95d | bellard | a = ldl_be_p(ptr); |
453 | 4d7a0880 | blueswir1 | b = ldl_be_p((uint8_t *)ptr + 4);
|
454 | 93ac68bc | bellard | return (((uint64_t)a<<32)|b); |
455 | 93ac68bc | bellard | } |
456 | 93ac68bc | bellard | |
457 | 2df3b95d | bellard | static inline void stw_be_p(void *ptr, int v) |
458 | 93ac68bc | bellard | { |
459 | 83d73968 | bellard | #if defined(__i386__)
|
460 | 83d73968 | bellard | asm volatile ("xchgb %b0, %h0\n" |
461 | 83d73968 | bellard | "movw %w0, %1\n"
|
462 | 83d73968 | bellard | : "=q" (v)
|
463 | 83d73968 | bellard | : "m" (*(uint16_t *)ptr), "0" (v)); |
464 | 83d73968 | bellard | #else
|
465 | 93ac68bc | bellard | uint8_t *d = (uint8_t *) ptr; |
466 | 93ac68bc | bellard | d[0] = v >> 8; |
467 | 93ac68bc | bellard | d[1] = v;
|
468 | 83d73968 | bellard | #endif
|
469 | 93ac68bc | bellard | } |
470 | 93ac68bc | bellard | |
471 | 2df3b95d | bellard | static inline void stl_be_p(void *ptr, int v) |
472 | 93ac68bc | bellard | { |
473 | 4f2ac237 | bellard | #if defined(__i386__) || defined(__x86_64__)
|
474 | 83d73968 | bellard | asm volatile ("bswap %0\n" |
475 | 83d73968 | bellard | "movl %0, %1\n"
|
476 | 83d73968 | bellard | : "=r" (v)
|
477 | 83d73968 | bellard | : "m" (*(uint32_t *)ptr), "0" (v)); |
478 | 83d73968 | bellard | #else
|
479 | 93ac68bc | bellard | uint8_t *d = (uint8_t *) ptr; |
480 | 93ac68bc | bellard | d[0] = v >> 24; |
481 | 93ac68bc | bellard | d[1] = v >> 16; |
482 | 93ac68bc | bellard | d[2] = v >> 8; |
483 | 93ac68bc | bellard | d[3] = v;
|
484 | 83d73968 | bellard | #endif
|
485 | 93ac68bc | bellard | } |
486 | 93ac68bc | bellard | |
487 | 2df3b95d | bellard | static inline void stq_be_p(void *ptr, uint64_t v) |
488 | 93ac68bc | bellard | { |
489 | 2df3b95d | bellard | stl_be_p(ptr, v >> 32);
|
490 | 4d7a0880 | blueswir1 | stl_be_p((uint8_t *)ptr + 4, v);
|
491 | 0ac4bd56 | bellard | } |
492 | 0ac4bd56 | bellard | |
493 | 0ac4bd56 | bellard | /* float access */
|
494 | 0ac4bd56 | bellard | |
495 | 8bba3ea1 | balrog | static inline float32 ldfl_be_p(const void *ptr) |
496 | 0ac4bd56 | bellard | { |
497 | 0ac4bd56 | bellard | union {
|
498 | 53cd6637 | bellard | float32 f; |
499 | 0ac4bd56 | bellard | uint32_t i; |
500 | 0ac4bd56 | bellard | } u; |
501 | 2df3b95d | bellard | u.i = ldl_be_p(ptr); |
502 | 0ac4bd56 | bellard | return u.f;
|
503 | 0ac4bd56 | bellard | } |
504 | 0ac4bd56 | bellard | |
505 | 2df3b95d | bellard | static inline void stfl_be_p(void *ptr, float32 v) |
506 | 0ac4bd56 | bellard | { |
507 | 0ac4bd56 | bellard | union {
|
508 | 53cd6637 | bellard | float32 f; |
509 | 0ac4bd56 | bellard | uint32_t i; |
510 | 0ac4bd56 | bellard | } u; |
511 | 0ac4bd56 | bellard | u.f = v; |
512 | 2df3b95d | bellard | stl_be_p(ptr, u.i); |
513 | 0ac4bd56 | bellard | } |
514 | 0ac4bd56 | bellard | |
515 | 8bba3ea1 | balrog | static inline float64 ldfq_be_p(const void *ptr) |
516 | 0ac4bd56 | bellard | { |
517 | 0ac4bd56 | bellard | CPU_DoubleU u; |
518 | 2df3b95d | bellard | u.l.upper = ldl_be_p(ptr); |
519 | 4d7a0880 | blueswir1 | u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
|
520 | 0ac4bd56 | bellard | return u.d;
|
521 | 0ac4bd56 | bellard | } |
522 | 0ac4bd56 | bellard | |
523 | 2df3b95d | bellard | static inline void stfq_be_p(void *ptr, float64 v) |
524 | 0ac4bd56 | bellard | { |
525 | 0ac4bd56 | bellard | CPU_DoubleU u; |
526 | 0ac4bd56 | bellard | u.d = v; |
527 | 2df3b95d | bellard | stl_be_p(ptr, u.l.upper); |
528 | 4d7a0880 | blueswir1 | stl_be_p((uint8_t *)ptr + 4, u.l.lower);
|
529 | 93ac68bc | bellard | } |
530 | 93ac68bc | bellard | |
531 | 5a9fdfec | bellard | #else
|
532 | 5a9fdfec | bellard | |
533 | 8bba3ea1 | balrog | static inline int lduw_be_p(const void *ptr) |
534 | 5a9fdfec | bellard | { |
535 | 5a9fdfec | bellard | return *(uint16_t *)ptr;
|
536 | 5a9fdfec | bellard | } |
537 | 5a9fdfec | bellard | |
538 | 8bba3ea1 | balrog | static inline int ldsw_be_p(const void *ptr) |
539 | 5a9fdfec | bellard | { |
540 | 5a9fdfec | bellard | return *(int16_t *)ptr;
|
541 | 5a9fdfec | bellard | } |
542 | 5a9fdfec | bellard | |
543 | 8bba3ea1 | balrog | static inline int ldl_be_p(const void *ptr) |
544 | 5a9fdfec | bellard | { |
545 | 5a9fdfec | bellard | return *(uint32_t *)ptr;
|
546 | 5a9fdfec | bellard | } |
547 | 5a9fdfec | bellard | |
548 | 8bba3ea1 | balrog | static inline uint64_t ldq_be_p(const void *ptr) |
549 | 5a9fdfec | bellard | { |
550 | 5a9fdfec | bellard | return *(uint64_t *)ptr;
|
551 | 5a9fdfec | bellard | } |
552 | 5a9fdfec | bellard | |
553 | 2df3b95d | bellard | static inline void stw_be_p(void *ptr, int v) |
554 | 5a9fdfec | bellard | { |
555 | 5a9fdfec | bellard | *(uint16_t *)ptr = v; |
556 | 5a9fdfec | bellard | } |
557 | 5a9fdfec | bellard | |
558 | 2df3b95d | bellard | static inline void stl_be_p(void *ptr, int v) |
559 | 5a9fdfec | bellard | { |
560 | 5a9fdfec | bellard | *(uint32_t *)ptr = v; |
561 | 5a9fdfec | bellard | } |
562 | 5a9fdfec | bellard | |
563 | 2df3b95d | bellard | static inline void stq_be_p(void *ptr, uint64_t v) |
564 | 5a9fdfec | bellard | { |
565 | 5a9fdfec | bellard | *(uint64_t *)ptr = v; |
566 | 5a9fdfec | bellard | } |
567 | 5a9fdfec | bellard | |
568 | 5a9fdfec | bellard | /* float access */
|
569 | 5a9fdfec | bellard | |
570 | 8bba3ea1 | balrog | static inline float32 ldfl_be_p(const void *ptr) |
571 | 5a9fdfec | bellard | { |
572 | 53cd6637 | bellard | return *(float32 *)ptr;
|
573 | 5a9fdfec | bellard | } |
574 | 5a9fdfec | bellard | |
575 | 8bba3ea1 | balrog | static inline float64 ldfq_be_p(const void *ptr) |
576 | 5a9fdfec | bellard | { |
577 | 53cd6637 | bellard | return *(float64 *)ptr;
|
578 | 5a9fdfec | bellard | } |
579 | 5a9fdfec | bellard | |
580 | 2df3b95d | bellard | static inline void stfl_be_p(void *ptr, float32 v) |
581 | 5a9fdfec | bellard | { |
582 | 53cd6637 | bellard | *(float32 *)ptr = v; |
583 | 5a9fdfec | bellard | } |
584 | 5a9fdfec | bellard | |
585 | 2df3b95d | bellard | static inline void stfq_be_p(void *ptr, float64 v) |
586 | 5a9fdfec | bellard | { |
587 | 53cd6637 | bellard | *(float64 *)ptr = v; |
588 | 5a9fdfec | bellard | } |
589 | 2df3b95d | bellard | |
590 | 2df3b95d | bellard | #endif
|
591 | 2df3b95d | bellard | |
592 | 2df3b95d | bellard | /* target CPU memory access functions */
|
593 | 2df3b95d | bellard | #if defined(TARGET_WORDS_BIGENDIAN)
|
594 | 2df3b95d | bellard | #define lduw_p(p) lduw_be_p(p)
|
595 | 2df3b95d | bellard | #define ldsw_p(p) ldsw_be_p(p)
|
596 | 2df3b95d | bellard | #define ldl_p(p) ldl_be_p(p)
|
597 | 2df3b95d | bellard | #define ldq_p(p) ldq_be_p(p)
|
598 | 2df3b95d | bellard | #define ldfl_p(p) ldfl_be_p(p)
|
599 | 2df3b95d | bellard | #define ldfq_p(p) ldfq_be_p(p)
|
600 | 2df3b95d | bellard | #define stw_p(p, v) stw_be_p(p, v)
|
601 | 2df3b95d | bellard | #define stl_p(p, v) stl_be_p(p, v)
|
602 | 2df3b95d | bellard | #define stq_p(p, v) stq_be_p(p, v)
|
603 | 2df3b95d | bellard | #define stfl_p(p, v) stfl_be_p(p, v)
|
604 | 2df3b95d | bellard | #define stfq_p(p, v) stfq_be_p(p, v)
|
605 | 2df3b95d | bellard | #else
|
606 | 2df3b95d | bellard | #define lduw_p(p) lduw_le_p(p)
|
607 | 2df3b95d | bellard | #define ldsw_p(p) ldsw_le_p(p)
|
608 | 2df3b95d | bellard | #define ldl_p(p) ldl_le_p(p)
|
609 | 2df3b95d | bellard | #define ldq_p(p) ldq_le_p(p)
|
610 | 2df3b95d | bellard | #define ldfl_p(p) ldfl_le_p(p)
|
611 | 2df3b95d | bellard | #define ldfq_p(p) ldfq_le_p(p)
|
612 | 2df3b95d | bellard | #define stw_p(p, v) stw_le_p(p, v)
|
613 | 2df3b95d | bellard | #define stl_p(p, v) stl_le_p(p, v)
|
614 | 2df3b95d | bellard | #define stq_p(p, v) stq_le_p(p, v)
|
615 | 2df3b95d | bellard | #define stfl_p(p, v) stfl_le_p(p, v)
|
616 | 2df3b95d | bellard | #define stfq_p(p, v) stfq_le_p(p, v)
|
617 | 5a9fdfec | bellard | #endif
|
618 | 5a9fdfec | bellard | |
619 | 61382a50 | bellard | /* MMU memory access macros */
|
620 | 61382a50 | bellard | |
621 | 53a5960a | pbrook | #if defined(CONFIG_USER_ONLY)
|
622 | 0e62fd79 | aurel32 | #include <assert.h> |
623 | 0e62fd79 | aurel32 | #include "qemu-types.h" |
624 | 0e62fd79 | aurel32 | |
625 | 53a5960a | pbrook | /* On some host systems the guest address space is reserved on the host.
|
626 | 53a5960a | pbrook | * This allows the guest address space to be offset to a convenient location.
|
627 | 53a5960a | pbrook | */
|
628 | 53a5960a | pbrook | //#define GUEST_BASE 0x20000000
|
629 | 53a5960a | pbrook | #define GUEST_BASE 0 |
630 | 53a5960a | pbrook | |
631 | 53a5960a | pbrook | /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
632 | 53a5960a | pbrook | #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) |
633 | 0e62fd79 | aurel32 | #define h2g(x) ({ \
|
634 | 0e62fd79 | aurel32 | unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ |
635 | 0e62fd79 | aurel32 | /* Check if given address fits target address space */ \
|
636 | 0e62fd79 | aurel32 | assert(__ret == (abi_ulong)__ret); \ |
637 | 0e62fd79 | aurel32 | (abi_ulong)__ret; \ |
638 | 0e62fd79 | aurel32 | }) |
639 | 14cc46b1 | aurel32 | #define h2g_valid(x) ({ \
|
640 | 14cc46b1 | aurel32 | unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ |
641 | 14cc46b1 | aurel32 | (__guest == (abi_ulong)__guest); \ |
642 | 14cc46b1 | aurel32 | }) |
643 | 53a5960a | pbrook | |
644 | 53a5960a | pbrook | #define saddr(x) g2h(x)
|
645 | 53a5960a | pbrook | #define laddr(x) g2h(x)
|
646 | 53a5960a | pbrook | |
647 | 53a5960a | pbrook | #else /* !CONFIG_USER_ONLY */ |
648 | c27004ec | bellard | /* NOTE: we use double casts if pointers and target_ulong have
|
649 | c27004ec | bellard | different sizes */
|
650 | 53a5960a | pbrook | #define saddr(x) (uint8_t *)(long)(x) |
651 | 53a5960a | pbrook | #define laddr(x) (uint8_t *)(long)(x) |
652 | 53a5960a | pbrook | #endif
|
653 | 53a5960a | pbrook | |
654 | 53a5960a | pbrook | #define ldub_raw(p) ldub_p(laddr((p)))
|
655 | 53a5960a | pbrook | #define ldsb_raw(p) ldsb_p(laddr((p)))
|
656 | 53a5960a | pbrook | #define lduw_raw(p) lduw_p(laddr((p)))
|
657 | 53a5960a | pbrook | #define ldsw_raw(p) ldsw_p(laddr((p)))
|
658 | 53a5960a | pbrook | #define ldl_raw(p) ldl_p(laddr((p)))
|
659 | 53a5960a | pbrook | #define ldq_raw(p) ldq_p(laddr((p)))
|
660 | 53a5960a | pbrook | #define ldfl_raw(p) ldfl_p(laddr((p)))
|
661 | 53a5960a | pbrook | #define ldfq_raw(p) ldfq_p(laddr((p)))
|
662 | 53a5960a | pbrook | #define stb_raw(p, v) stb_p(saddr((p)), v)
|
663 | 53a5960a | pbrook | #define stw_raw(p, v) stw_p(saddr((p)), v)
|
664 | 53a5960a | pbrook | #define stl_raw(p, v) stl_p(saddr((p)), v)
|
665 | 53a5960a | pbrook | #define stq_raw(p, v) stq_p(saddr((p)), v)
|
666 | 53a5960a | pbrook | #define stfl_raw(p, v) stfl_p(saddr((p)), v)
|
667 | 53a5960a | pbrook | #define stfq_raw(p, v) stfq_p(saddr((p)), v)
|
668 | c27004ec | bellard | |
669 | c27004ec | bellard | |
670 | 5fafdf24 | ths | #if defined(CONFIG_USER_ONLY)
|
671 | 61382a50 | bellard | |
672 | 61382a50 | bellard | /* if user mode, no other memory access functions */
|
673 | 61382a50 | bellard | #define ldub(p) ldub_raw(p)
|
674 | 61382a50 | bellard | #define ldsb(p) ldsb_raw(p)
|
675 | 61382a50 | bellard | #define lduw(p) lduw_raw(p)
|
676 | 61382a50 | bellard | #define ldsw(p) ldsw_raw(p)
|
677 | 61382a50 | bellard | #define ldl(p) ldl_raw(p)
|
678 | 61382a50 | bellard | #define ldq(p) ldq_raw(p)
|
679 | 61382a50 | bellard | #define ldfl(p) ldfl_raw(p)
|
680 | 61382a50 | bellard | #define ldfq(p) ldfq_raw(p)
|
681 | 61382a50 | bellard | #define stb(p, v) stb_raw(p, v)
|
682 | 61382a50 | bellard | #define stw(p, v) stw_raw(p, v)
|
683 | 61382a50 | bellard | #define stl(p, v) stl_raw(p, v)
|
684 | 61382a50 | bellard | #define stq(p, v) stq_raw(p, v)
|
685 | 61382a50 | bellard | #define stfl(p, v) stfl_raw(p, v)
|
686 | 61382a50 | bellard | #define stfq(p, v) stfq_raw(p, v)
|
687 | 61382a50 | bellard | |
688 | 61382a50 | bellard | #define ldub_code(p) ldub_raw(p)
|
689 | 61382a50 | bellard | #define ldsb_code(p) ldsb_raw(p)
|
690 | 61382a50 | bellard | #define lduw_code(p) lduw_raw(p)
|
691 | 61382a50 | bellard | #define ldsw_code(p) ldsw_raw(p)
|
692 | 61382a50 | bellard | #define ldl_code(p) ldl_raw(p)
|
693 | bc98a7ef | j_mayer | #define ldq_code(p) ldq_raw(p)
|
694 | 61382a50 | bellard | |
695 | 61382a50 | bellard | #define ldub_kernel(p) ldub_raw(p)
|
696 | 61382a50 | bellard | #define ldsb_kernel(p) ldsb_raw(p)
|
697 | 61382a50 | bellard | #define lduw_kernel(p) lduw_raw(p)
|
698 | 61382a50 | bellard | #define ldsw_kernel(p) ldsw_raw(p)
|
699 | 61382a50 | bellard | #define ldl_kernel(p) ldl_raw(p)
|
700 | bc98a7ef | j_mayer | #define ldq_kernel(p) ldq_raw(p)
|
701 | 0ac4bd56 | bellard | #define ldfl_kernel(p) ldfl_raw(p)
|
702 | 0ac4bd56 | bellard | #define ldfq_kernel(p) ldfq_raw(p)
|
703 | 61382a50 | bellard | #define stb_kernel(p, v) stb_raw(p, v)
|
704 | 61382a50 | bellard | #define stw_kernel(p, v) stw_raw(p, v)
|
705 | 61382a50 | bellard | #define stl_kernel(p, v) stl_raw(p, v)
|
706 | 61382a50 | bellard | #define stq_kernel(p, v) stq_raw(p, v)
|
707 | 0ac4bd56 | bellard | #define stfl_kernel(p, v) stfl_raw(p, v)
|
708 | 0ac4bd56 | bellard | #define stfq_kernel(p, vt) stfq_raw(p, v)
|
709 | 61382a50 | bellard | |
710 | 61382a50 | bellard | #endif /* defined(CONFIG_USER_ONLY) */ |
711 | 61382a50 | bellard | |
712 | 5a9fdfec | bellard | /* page related stuff */
|
713 | 5a9fdfec | bellard | |
714 | 03875444 | aurel32 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
715 | 5a9fdfec | bellard | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
716 | 5a9fdfec | bellard | #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
717 | 5a9fdfec | bellard | |
718 | 53a5960a | pbrook | /* ??? These should be the larger of unsigned long and target_ulong. */
|
719 | 83fb7adf | bellard | extern unsigned long qemu_real_host_page_size; |
720 | 83fb7adf | bellard | extern unsigned long qemu_host_page_bits; |
721 | 83fb7adf | bellard | extern unsigned long qemu_host_page_size; |
722 | 83fb7adf | bellard | extern unsigned long qemu_host_page_mask; |
723 | 5a9fdfec | bellard | |
724 | 83fb7adf | bellard | #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) |
725 | 5a9fdfec | bellard | |
726 | 5a9fdfec | bellard | /* same as PROT_xxx */
|
727 | 5a9fdfec | bellard | #define PAGE_READ 0x0001 |
728 | 5a9fdfec | bellard | #define PAGE_WRITE 0x0002 |
729 | 5a9fdfec | bellard | #define PAGE_EXEC 0x0004 |
730 | 5a9fdfec | bellard | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
|
731 | 5a9fdfec | bellard | #define PAGE_VALID 0x0008 |
732 | 5a9fdfec | bellard | /* original state of the write flag (used when tracking self-modifying
|
733 | 5a9fdfec | bellard | code */
|
734 | 5fafdf24 | ths | #define PAGE_WRITE_ORG 0x0010 |
735 | 50a9569b | balrog | #define PAGE_RESERVED 0x0020 |
736 | 5a9fdfec | bellard | |
737 | 5a9fdfec | bellard | void page_dump(FILE *f);
|
738 | 53a5960a | pbrook | int page_get_flags(target_ulong address);
|
739 | 53a5960a | pbrook | void page_set_flags(target_ulong start, target_ulong end, int flags); |
740 | 3d97b40b | ths | int page_check_range(target_ulong start, target_ulong len, int flags); |
741 | 5a9fdfec | bellard | |
742 | 26a5f13b | bellard | void cpu_exec_init_all(unsigned long tb_size); |
743 | c5be9f08 | ths | CPUState *cpu_copy(CPUState *env); |
744 | c5be9f08 | ths | |
745 | 5fafdf24 | ths | void cpu_dump_state(CPUState *env, FILE *f,
|
746 | 7fe48483 | bellard | int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
747 | 7fe48483 | bellard | int flags);
|
748 | 76a66253 | j_mayer | void cpu_dump_statistics (CPUState *env, FILE *f,
|
749 | 76a66253 | j_mayer | int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
750 | 76a66253 | j_mayer | int flags);
|
751 | 7fe48483 | bellard | |
752 | a5e50b26 | malc | void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) |
753 | 7d99a001 | blueswir1 | __attribute__ ((__format__ (__printf__, 2, 3))); |
754 | f0aca822 | bellard | extern CPUState *first_cpu;
|
755 | e2f22898 | bellard | extern CPUState *cpu_single_env;
|
756 | 2e70f6ef | pbrook | extern int64_t qemu_icount;
|
757 | 2e70f6ef | pbrook | extern int use_icount; |
758 | 5a9fdfec | bellard | |
759 | 9acbed06 | bellard | #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ |
760 | 9acbed06 | bellard | #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ |
761 | ef792f9d | bellard | #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ |
762 | 98699967 | bellard | #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ |
763 | ba3c64fb | bellard | #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ |
764 | 3b21e03e | bellard | #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ |
765 | 6658ffb8 | pbrook | #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ |
766 | 0573fbfc | ths | #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ |
767 | 474ea849 | aurel32 | #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ |
768 | 98699967 | bellard | |
769 | 4690764b | bellard | void cpu_interrupt(CPUState *s, int mask); |
770 | b54ad049 | bellard | void cpu_reset_interrupt(CPUState *env, int mask); |
771 | 68a79315 | bellard | |
772 | 3098dba0 | aurel32 | void cpu_exit(CPUState *s);
|
773 | 3098dba0 | aurel32 | |
774 | 6a4955a8 | aliguori | int qemu_cpu_has_work(CPUState *env);
|
775 | 6a4955a8 | aliguori | |
776 | a1d1bb31 | aliguori | /* Breakpoint/watchpoint flags */
|
777 | a1d1bb31 | aliguori | #define BP_MEM_READ 0x01 |
778 | a1d1bb31 | aliguori | #define BP_MEM_WRITE 0x02 |
779 | a1d1bb31 | aliguori | #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
|
780 | 06d55cc1 | aliguori | #define BP_STOP_BEFORE_ACCESS 0x04 |
781 | 6e140f28 | aliguori | #define BP_WATCHPOINT_HIT 0x08 |
782 | a1d1bb31 | aliguori | #define BP_GDB 0x10 |
783 | 2dc9f411 | aliguori | #define BP_CPU 0x20 |
784 | a1d1bb31 | aliguori | |
785 | a1d1bb31 | aliguori | int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, |
786 | a1d1bb31 | aliguori | CPUBreakpoint **breakpoint); |
787 | a1d1bb31 | aliguori | int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); |
788 | a1d1bb31 | aliguori | void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
|
789 | a1d1bb31 | aliguori | void cpu_breakpoint_remove_all(CPUState *env, int mask); |
790 | a1d1bb31 | aliguori | int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
|
791 | a1d1bb31 | aliguori | int flags, CPUWatchpoint **watchpoint);
|
792 | a1d1bb31 | aliguori | int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
|
793 | a1d1bb31 | aliguori | target_ulong len, int flags);
|
794 | a1d1bb31 | aliguori | void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
|
795 | a1d1bb31 | aliguori | void cpu_watchpoint_remove_all(CPUState *env, int mask); |
796 | 60897d36 | edgar_igl | |
797 | 60897d36 | edgar_igl | #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ |
798 | 60897d36 | edgar_igl | #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ |
799 | 60897d36 | edgar_igl | #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ |
800 | 60897d36 | edgar_igl | |
801 | c33a346e | bellard | void cpu_single_step(CPUState *env, int enabled); |
802 | d95dc32d | bellard | void cpu_reset(CPUState *s);
|
803 | 4c3a88a2 | bellard | |
804 | 13eb76e0 | bellard | /* Return the physical page corresponding to a virtual one. Use it
|
805 | 13eb76e0 | bellard | only for debugging because no protection checks are done. Return -1
|
806 | 13eb76e0 | bellard | if no page found. */
|
807 | 9b3c35e0 | j_mayer | target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); |
808 | 13eb76e0 | bellard | |
809 | 5fafdf24 | ths | #define CPU_LOG_TB_OUT_ASM (1 << 0) |
810 | 9fddaa0c | bellard | #define CPU_LOG_TB_IN_ASM (1 << 1) |
811 | f193c797 | bellard | #define CPU_LOG_TB_OP (1 << 2) |
812 | f193c797 | bellard | #define CPU_LOG_TB_OP_OPT (1 << 3) |
813 | f193c797 | bellard | #define CPU_LOG_INT (1 << 4) |
814 | f193c797 | bellard | #define CPU_LOG_EXEC (1 << 5) |
815 | f193c797 | bellard | #define CPU_LOG_PCALL (1 << 6) |
816 | fd872598 | bellard | #define CPU_LOG_IOPORT (1 << 7) |
817 | 9fddaa0c | bellard | #define CPU_LOG_TB_CPU (1 << 8) |
818 | eca1bdf4 | aliguori | #define CPU_LOG_RESET (1 << 9) |
819 | f193c797 | bellard | |
820 | f193c797 | bellard | /* define log items */
|
821 | f193c797 | bellard | typedef struct CPULogItem { |
822 | f193c797 | bellard | int mask;
|
823 | f193c797 | bellard | const char *name; |
824 | f193c797 | bellard | const char *help; |
825 | f193c797 | bellard | } CPULogItem; |
826 | f193c797 | bellard | |
827 | c7cd6a37 | blueswir1 | extern const CPULogItem cpu_log_items[]; |
828 | f193c797 | bellard | |
829 | 34865134 | bellard | void cpu_set_log(int log_flags); |
830 | 34865134 | bellard | void cpu_set_log_filename(const char *filename); |
831 | f193c797 | bellard | int cpu_str_to_log_mask(const char *str); |
832 | 34865134 | bellard | |
833 | 09683d35 | bellard | /* IO ports API */
|
834 | 09683d35 | bellard | |
835 | 09683d35 | bellard | /* NOTE: as these functions may be even used when there is an isa
|
836 | 09683d35 | bellard | brige on non x86 targets, we always defined them */
|
837 | 09683d35 | bellard | #ifndef NO_CPU_IO_DEFS
|
838 | 09683d35 | bellard | void cpu_outb(CPUState *env, int addr, int val); |
839 | 09683d35 | bellard | void cpu_outw(CPUState *env, int addr, int val); |
840 | 09683d35 | bellard | void cpu_outl(CPUState *env, int addr, int val); |
841 | 09683d35 | bellard | int cpu_inb(CPUState *env, int addr); |
842 | 09683d35 | bellard | int cpu_inw(CPUState *env, int addr); |
843 | 09683d35 | bellard | int cpu_inl(CPUState *env, int addr); |
844 | 09683d35 | bellard | #endif
|
845 | 09683d35 | bellard | |
846 | 33417e70 | bellard | /* memory API */
|
847 | 33417e70 | bellard | |
848 | edf75d59 | bellard | extern int phys_ram_fd; |
849 | 1ccde1cb | bellard | extern uint8_t *phys_ram_dirty;
|
850 | 00f82b8a | aurel32 | extern ram_addr_t ram_size;
|
851 | 94a6b54f | pbrook | extern ram_addr_t last_ram_offset;
|
852 | edf75d59 | bellard | |
853 | edf75d59 | bellard | /* physical memory access */
|
854 | 0f459d16 | pbrook | |
855 | 0f459d16 | pbrook | /* MMIO pages are identified by a combination of an IO device index and
|
856 | 0f459d16 | pbrook | 3 flags. The ROMD code stores the page ram offset in iotlb entry,
|
857 | 0f459d16 | pbrook | so only a limited number of ids are avaiable. */
|
858 | 0f459d16 | pbrook | |
859 | 98699967 | bellard | #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) |
860 | edf75d59 | bellard | |
861 | 0f459d16 | pbrook | /* Flags stored in the low bits of the TLB virtual address. These are
|
862 | 0f459d16 | pbrook | defined so that fast path ram access is all zeros. */
|
863 | 0f459d16 | pbrook | /* Zero if TLB entry is valid. */
|
864 | 0f459d16 | pbrook | #define TLB_INVALID_MASK (1 << 3) |
865 | 0f459d16 | pbrook | /* Set if TLB entry references a clean RAM page. The iotlb entry will
|
866 | 0f459d16 | pbrook | contain the page physical address. */
|
867 | 0f459d16 | pbrook | #define TLB_NOTDIRTY (1 << 4) |
868 | 0f459d16 | pbrook | /* Set if TLB entry is an IO callback. */
|
869 | 0f459d16 | pbrook | #define TLB_MMIO (1 << 5) |
870 | 0f459d16 | pbrook | |
871 | 5fafdf24 | ths | int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
872 | 8b1f24b0 | bellard | uint8_t *buf, int len, int is_write); |
873 | 13eb76e0 | bellard | |
874 | 74576198 | aliguori | #define VGA_DIRTY_FLAG 0x01 |
875 | 74576198 | aliguori | #define CODE_DIRTY_FLAG 0x02 |
876 | 74576198 | aliguori | #define KQEMU_DIRTY_FLAG 0x04 |
877 | 74576198 | aliguori | #define MIGRATION_DIRTY_FLAG 0x08 |
878 | 0a962c02 | bellard | |
879 | 1ccde1cb | bellard | /* read dirty bit (return 0 or 1) */
|
880 | 04c504cc | bellard | static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) |
881 | 1ccde1cb | bellard | { |
882 | 0a962c02 | bellard | return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; |
883 | 0a962c02 | bellard | } |
884 | 0a962c02 | bellard | |
885 | 5fafdf24 | ths | static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, |
886 | 0a962c02 | bellard | int dirty_flags)
|
887 | 0a962c02 | bellard | { |
888 | 0a962c02 | bellard | return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
|
889 | 1ccde1cb | bellard | } |
890 | 1ccde1cb | bellard | |
891 | 04c504cc | bellard | static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) |
892 | 1ccde1cb | bellard | { |
893 | 0a962c02 | bellard | phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
|
894 | 1ccde1cb | bellard | } |
895 | 1ccde1cb | bellard | |
896 | 04c504cc | bellard | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
897 | 0a962c02 | bellard | int dirty_flags);
|
898 | 04c504cc | bellard | void cpu_tlb_update_dirty(CPUState *env);
|
899 | 1ccde1cb | bellard | |
900 | 74576198 | aliguori | int cpu_physical_memory_set_dirty_tracking(int enable); |
901 | 74576198 | aliguori | |
902 | 74576198 | aliguori | int cpu_physical_memory_get_dirty_tracking(void); |
903 | 74576198 | aliguori | |
904 | 2bec46dc | aliguori | void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
|
905 | 2bec46dc | aliguori | |
906 | e3db7226 | bellard | void dump_exec_info(FILE *f,
|
907 | e3db7226 | bellard | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); |
908 | e3db7226 | bellard | |
909 | f65ed4c1 | aliguori | /* Coalesced MMIO regions are areas where write operations can be reordered.
|
910 | f65ed4c1 | aliguori | * This usually implies that write operations are side-effect free. This allows
|
911 | f65ed4c1 | aliguori | * batching which can make a major impact on performance when using
|
912 | f65ed4c1 | aliguori | * virtualization.
|
913 | f65ed4c1 | aliguori | */
|
914 | f65ed4c1 | aliguori | void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
915 | f65ed4c1 | aliguori | |
916 | f65ed4c1 | aliguori | void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
917 | f65ed4c1 | aliguori | |
918 | effedbc9 | bellard | /*******************************************/
|
919 | effedbc9 | bellard | /* host CPU ticks (if available) */
|
920 | effedbc9 | bellard | |
921 | e58ffeb3 | malc | #if defined(_ARCH_PPC)
|
922 | effedbc9 | bellard | |
923 | effedbc9 | bellard | static inline int64_t cpu_get_real_ticks(void) |
924 | effedbc9 | bellard | { |
925 | 5e10fc90 | malc | int64_t retval; |
926 | 5e10fc90 | malc | #ifdef _ARCH_PPC64
|
927 | 5e10fc90 | malc | /* This reads timebase in one 64bit go and includes Cell workaround from:
|
928 | 5e10fc90 | malc | http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
|
929 | 5e10fc90 | malc | */
|
930 | 5e10fc90 | malc | __asm__ __volatile__ ( |
931 | 5e10fc90 | malc | "mftb %0\n\t"
|
932 | 5e10fc90 | malc | "cmpwi %0,0\n\t"
|
933 | 5e10fc90 | malc | "beq- $-8"
|
934 | 5e10fc90 | malc | : "=r" (retval));
|
935 | 5e10fc90 | malc | #else
|
936 | 5e10fc90 | malc | /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
|
937 | 5e10fc90 | malc | unsigned long junk; |
938 | 5e10fc90 | malc | __asm__ __volatile__ ( |
939 | 5e10fc90 | malc | "mftbu %1\n\t"
|
940 | 5e10fc90 | malc | "mftb %L0\n\t"
|
941 | 5e10fc90 | malc | "mftbu %0\n\t"
|
942 | 5e10fc90 | malc | "cmpw %0,%1\n\t"
|
943 | 5e10fc90 | malc | "bne $-16"
|
944 | 5e10fc90 | malc | : "=r" (retval), "=r" (junk)); |
945 | 5e10fc90 | malc | #endif
|
946 | 5e10fc90 | malc | return retval;
|
947 | effedbc9 | bellard | } |
948 | effedbc9 | bellard | |
949 | effedbc9 | bellard | #elif defined(__i386__)
|
950 | effedbc9 | bellard | |
951 | effedbc9 | bellard | static inline int64_t cpu_get_real_ticks(void) |
952 | 5f1ce948 | bellard | { |
953 | 5f1ce948 | bellard | int64_t val; |
954 | 5f1ce948 | bellard | asm volatile ("rdtsc" : "=A" (val)); |
955 | 5f1ce948 | bellard | return val;
|
956 | 5f1ce948 | bellard | } |
957 | 5f1ce948 | bellard | |
958 | effedbc9 | bellard | #elif defined(__x86_64__)
|
959 | effedbc9 | bellard | |
960 | effedbc9 | bellard | static inline int64_t cpu_get_real_ticks(void) |
961 | effedbc9 | bellard | { |
962 | effedbc9 | bellard | uint32_t low,high; |
963 | effedbc9 | bellard | int64_t val; |
964 | effedbc9 | bellard | asm volatile("rdtsc" : "=a" (low), "=d" (high)); |
965 | effedbc9 | bellard | val = high; |
966 | effedbc9 | bellard | val <<= 32;
|
967 | effedbc9 | bellard | val |= low; |
968 | effedbc9 | bellard | return val;
|
969 | effedbc9 | bellard | } |
970 | effedbc9 | bellard | |
971 | f54b3f92 | aurel32 | #elif defined(__hppa__)
|
972 | f54b3f92 | aurel32 | |
973 | f54b3f92 | aurel32 | static inline int64_t cpu_get_real_ticks(void) |
974 | f54b3f92 | aurel32 | { |
975 | f54b3f92 | aurel32 | int val;
|
976 | f54b3f92 | aurel32 | asm volatile ("mfctl %%cr16, %0" : "=r"(val)); |
977 | f54b3f92 | aurel32 | return val;
|
978 | f54b3f92 | aurel32 | } |
979 | f54b3f92 | aurel32 | |
980 | effedbc9 | bellard | #elif defined(__ia64)
|
981 | effedbc9 | bellard | |
982 | effedbc9 | bellard | static inline int64_t cpu_get_real_ticks(void) |
983 | effedbc9 | bellard | { |
984 | effedbc9 | bellard | int64_t val; |
985 | effedbc9 | bellard | asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); |
986 | effedbc9 | bellard | return val;
|
987 | effedbc9 | bellard | } |
988 | effedbc9 | bellard | |
989 | effedbc9 | bellard | #elif defined(__s390__)
|
990 | effedbc9 | bellard | |
991 | effedbc9 | bellard | static inline int64_t cpu_get_real_ticks(void) |
992 | effedbc9 | bellard | { |
993 | effedbc9 | bellard | int64_t val; |
994 | effedbc9 | bellard | asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); |
995 | effedbc9 | bellard | return val;
|
996 | effedbc9 | bellard | } |
997 | effedbc9 | bellard | |
998 | 3142255c | blueswir1 | #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
|
999 | effedbc9 | bellard | |
1000 | effedbc9 | bellard | static inline int64_t cpu_get_real_ticks (void) |
1001 | effedbc9 | bellard | { |
1002 | effedbc9 | bellard | #if defined(_LP64)
|
1003 | effedbc9 | bellard | uint64_t rval; |
1004 | effedbc9 | bellard | asm volatile("rd %%tick,%0" : "=r"(rval)); |
1005 | effedbc9 | bellard | return rval;
|
1006 | effedbc9 | bellard | #else
|
1007 | effedbc9 | bellard | union {
|
1008 | effedbc9 | bellard | uint64_t i64; |
1009 | effedbc9 | bellard | struct {
|
1010 | effedbc9 | bellard | uint32_t high; |
1011 | effedbc9 | bellard | uint32_t low; |
1012 | effedbc9 | bellard | } i32; |
1013 | effedbc9 | bellard | } rval; |
1014 | effedbc9 | bellard | asm volatile("rd %%tick,%1; srlx %1,32,%0" |
1015 | effedbc9 | bellard | : "=r"(rval.i32.high), "=r"(rval.i32.low)); |
1016 | effedbc9 | bellard | return rval.i64;
|
1017 | effedbc9 | bellard | #endif
|
1018 | effedbc9 | bellard | } |
1019 | c4b89d18 | ths | |
1020 | c4b89d18 | ths | #elif defined(__mips__)
|
1021 | c4b89d18 | ths | |
1022 | c4b89d18 | ths | static inline int64_t cpu_get_real_ticks(void) |
1023 | c4b89d18 | ths | { |
1024 | c4b89d18 | ths | #if __mips_isa_rev >= 2 |
1025 | c4b89d18 | ths | uint32_t count; |
1026 | c4b89d18 | ths | static uint32_t cyc_per_count = 0; |
1027 | c4b89d18 | ths | |
1028 | c4b89d18 | ths | if (!cyc_per_count)
|
1029 | c4b89d18 | ths | __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count)); |
1030 | c4b89d18 | ths | |
1031 | c4b89d18 | ths | __asm__ __volatile__("rdhwr %1, $2" : "=r" (count)); |
1032 | c4b89d18 | ths | return (int64_t)(count * cyc_per_count);
|
1033 | c4b89d18 | ths | #else
|
1034 | c4b89d18 | ths | /* FIXME */
|
1035 | c4b89d18 | ths | static int64_t ticks = 0; |
1036 | c4b89d18 | ths | return ticks++;
|
1037 | c4b89d18 | ths | #endif
|
1038 | c4b89d18 | ths | } |
1039 | c4b89d18 | ths | |
1040 | 46152182 | pbrook | #else
|
1041 | 46152182 | pbrook | /* The host CPU doesn't have an easily accessible cycle counter.
|
1042 | 85028e4d | ths | Just return a monotonically increasing value. This will be
|
1043 | 85028e4d | ths | totally wrong, but hopefully better than nothing. */
|
1044 | 46152182 | pbrook | static inline int64_t cpu_get_real_ticks (void) |
1045 | 46152182 | pbrook | { |
1046 | 46152182 | pbrook | static int64_t ticks = 0; |
1047 | 46152182 | pbrook | return ticks++;
|
1048 | 46152182 | pbrook | } |
1049 | effedbc9 | bellard | #endif
|
1050 | effedbc9 | bellard | |
1051 | effedbc9 | bellard | /* profiling */
|
1052 | effedbc9 | bellard | #ifdef CONFIG_PROFILER
|
1053 | effedbc9 | bellard | static inline int64_t profile_getclock(void) |
1054 | effedbc9 | bellard | { |
1055 | effedbc9 | bellard | return cpu_get_real_ticks();
|
1056 | effedbc9 | bellard | } |
1057 | effedbc9 | bellard | |
1058 | 5f1ce948 | bellard | extern int64_t kqemu_time, kqemu_time_start;
|
1059 | 5f1ce948 | bellard | extern int64_t qemu_time, qemu_time_start;
|
1060 | 5f1ce948 | bellard | extern int64_t tlb_flush_time;
|
1061 | 5f1ce948 | bellard | extern int64_t kqemu_exec_count;
|
1062 | 5f1ce948 | bellard | extern int64_t dev_time;
|
1063 | 5f1ce948 | bellard | extern int64_t kqemu_ret_int_count;
|
1064 | 5f1ce948 | bellard | extern int64_t kqemu_ret_excp_count;
|
1065 | 5f1ce948 | bellard | extern int64_t kqemu_ret_intr_count;
|
1066 | 5f1ce948 | bellard | #endif
|
1067 | 5f1ce948 | bellard | |
1068 | 5a9fdfec | bellard | #endif /* CPU_ALL_H */ |