root / include / exec / cpu-all.h @ 4917cf44
History | View | Annotate | Download (15 kB)
1 | 5a9fdfec | bellard | /*
|
---|---|---|---|
2 | 5a9fdfec | bellard | * defines common to all virtual CPUs
|
3 | 5fafdf24 | ths | *
|
4 | 5a9fdfec | bellard | * Copyright (c) 2003 Fabrice Bellard
|
5 | 5a9fdfec | bellard | *
|
6 | 5a9fdfec | bellard | * This library is free software; you can redistribute it and/or
|
7 | 5a9fdfec | bellard | * modify it under the terms of the GNU Lesser General Public
|
8 | 5a9fdfec | bellard | * License as published by the Free Software Foundation; either
|
9 | 5a9fdfec | bellard | * version 2 of the License, or (at your option) any later version.
|
10 | 5a9fdfec | bellard | *
|
11 | 5a9fdfec | bellard | * This library is distributed in the hope that it will be useful,
|
12 | 5a9fdfec | bellard | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 | 5a9fdfec | bellard | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 | 5a9fdfec | bellard | * Lesser General Public License for more details.
|
15 | 5a9fdfec | bellard | *
|
16 | 5a9fdfec | bellard | * You should have received a copy of the GNU Lesser General Public
|
17 | 8167ee88 | Blue Swirl | * License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 | 5a9fdfec | bellard | */
|
19 | 5a9fdfec | bellard | #ifndef CPU_ALL_H
|
20 | 5a9fdfec | bellard | #define CPU_ALL_H
|
21 | 5a9fdfec | bellard | |
22 | 7d99a001 | blueswir1 | #include "qemu-common.h" |
23 | 022c62cb | Paolo Bonzini | #include "exec/cpu-common.h" |
24 | b2a8658e | Umesh Deshpande | #include "qemu/thread.h" |
25 | 0ac4bd56 | bellard | |
26 | 5fafdf24 | ths | /* some important defines:
|
27 | 5fafdf24 | ths | *
|
28 | 0ac4bd56 | bellard | * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
|
29 | 0ac4bd56 | bellard | * memory accesses.
|
30 | 5fafdf24 | ths | *
|
31 | e2542fe2 | Juan Quintela | * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
|
32 | 0ac4bd56 | bellard | * otherwise little endian.
|
33 | 5fafdf24 | ths | *
|
34 | 0ac4bd56 | bellard | * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
|
35 | 5fafdf24 | ths | *
|
36 | 0ac4bd56 | bellard | * TARGET_WORDS_BIGENDIAN : same for target cpu
|
37 | 0ac4bd56 | bellard | */
|
38 | 0ac4bd56 | bellard | |
39 | e2542fe2 | Juan Quintela | #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
|
40 | f193c797 | bellard | #define BSWAP_NEEDED
|
41 | f193c797 | bellard | #endif
|
42 | f193c797 | bellard | |
43 | f193c797 | bellard | #ifdef BSWAP_NEEDED
|
44 | f193c797 | bellard | |
45 | f193c797 | bellard | static inline uint16_t tswap16(uint16_t s) |
46 | f193c797 | bellard | { |
47 | f193c797 | bellard | return bswap16(s);
|
48 | f193c797 | bellard | } |
49 | f193c797 | bellard | |
50 | f193c797 | bellard | static inline uint32_t tswap32(uint32_t s) |
51 | f193c797 | bellard | { |
52 | f193c797 | bellard | return bswap32(s);
|
53 | f193c797 | bellard | } |
54 | f193c797 | bellard | |
55 | f193c797 | bellard | static inline uint64_t tswap64(uint64_t s) |
56 | f193c797 | bellard | { |
57 | f193c797 | bellard | return bswap64(s);
|
58 | f193c797 | bellard | } |
59 | f193c797 | bellard | |
60 | f193c797 | bellard | static inline void tswap16s(uint16_t *s) |
61 | f193c797 | bellard | { |
62 | f193c797 | bellard | *s = bswap16(*s); |
63 | f193c797 | bellard | } |
64 | f193c797 | bellard | |
65 | f193c797 | bellard | static inline void tswap32s(uint32_t *s) |
66 | f193c797 | bellard | { |
67 | f193c797 | bellard | *s = bswap32(*s); |
68 | f193c797 | bellard | } |
69 | f193c797 | bellard | |
70 | f193c797 | bellard | static inline void tswap64s(uint64_t *s) |
71 | f193c797 | bellard | { |
72 | f193c797 | bellard | *s = bswap64(*s); |
73 | f193c797 | bellard | } |
74 | f193c797 | bellard | |
75 | f193c797 | bellard | #else
|
76 | f193c797 | bellard | |
77 | f193c797 | bellard | static inline uint16_t tswap16(uint16_t s) |
78 | f193c797 | bellard | { |
79 | f193c797 | bellard | return s;
|
80 | f193c797 | bellard | } |
81 | f193c797 | bellard | |
82 | f193c797 | bellard | static inline uint32_t tswap32(uint32_t s) |
83 | f193c797 | bellard | { |
84 | f193c797 | bellard | return s;
|
85 | f193c797 | bellard | } |
86 | f193c797 | bellard | |
87 | f193c797 | bellard | static inline uint64_t tswap64(uint64_t s) |
88 | f193c797 | bellard | { |
89 | f193c797 | bellard | return s;
|
90 | f193c797 | bellard | } |
91 | f193c797 | bellard | |
92 | f193c797 | bellard | static inline void tswap16s(uint16_t *s) |
93 | f193c797 | bellard | { |
94 | f193c797 | bellard | } |
95 | f193c797 | bellard | |
96 | f193c797 | bellard | static inline void tswap32s(uint32_t *s) |
97 | f193c797 | bellard | { |
98 | f193c797 | bellard | } |
99 | f193c797 | bellard | |
100 | f193c797 | bellard | static inline void tswap64s(uint64_t *s) |
101 | f193c797 | bellard | { |
102 | f193c797 | bellard | } |
103 | f193c797 | bellard | |
104 | f193c797 | bellard | #endif
|
105 | f193c797 | bellard | |
106 | f193c797 | bellard | #if TARGET_LONG_SIZE == 4 |
107 | f193c797 | bellard | #define tswapl(s) tswap32(s)
|
108 | f193c797 | bellard | #define tswapls(s) tswap32s((uint32_t *)(s))
|
109 | 0a962c02 | bellard | #define bswaptls(s) bswap32s(s)
|
110 | f193c797 | bellard | #else
|
111 | f193c797 | bellard | #define tswapl(s) tswap64(s)
|
112 | f193c797 | bellard | #define tswapls(s) tswap64s((uint64_t *)(s))
|
113 | 0a962c02 | bellard | #define bswaptls(s) bswap64s(s)
|
114 | f193c797 | bellard | #endif
|
115 | f193c797 | bellard | |
116 | 61382a50 | bellard | /* CPU memory access without any memory or io remapping */
|
117 | 61382a50 | bellard | |
118 | 83d73968 | bellard | /*
|
119 | 83d73968 | bellard | * the generic syntax for the memory accesses is:
|
120 | 83d73968 | bellard | *
|
121 | 83d73968 | bellard | * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
|
122 | 83d73968 | bellard | *
|
123 | 83d73968 | bellard | * store: st{type}{size}{endian}_{access_type}(ptr, val)
|
124 | 83d73968 | bellard | *
|
125 | 83d73968 | bellard | * type is:
|
126 | 83d73968 | bellard | * (empty): integer access
|
127 | 83d73968 | bellard | * f : float access
|
128 | 5fafdf24 | ths | *
|
129 | 83d73968 | bellard | * sign is:
|
130 | 83d73968 | bellard | * (empty): for floats or 32 bit size
|
131 | 83d73968 | bellard | * u : unsigned
|
132 | 83d73968 | bellard | * s : signed
|
133 | 83d73968 | bellard | *
|
134 | 83d73968 | bellard | * size is:
|
135 | 83d73968 | bellard | * b: 8 bits
|
136 | 83d73968 | bellard | * w: 16 bits
|
137 | 83d73968 | bellard | * l: 32 bits
|
138 | 83d73968 | bellard | * q: 64 bits
|
139 | 5fafdf24 | ths | *
|
140 | 83d73968 | bellard | * endian is:
|
141 | 83d73968 | bellard | * (empty): target cpu endianness or 8 bit access
|
142 | 83d73968 | bellard | * r : reversed target cpu endianness (not implemented yet)
|
143 | 83d73968 | bellard | * be : big endian (not implemented yet)
|
144 | 83d73968 | bellard | * le : little endian (not implemented yet)
|
145 | 83d73968 | bellard | *
|
146 | 83d73968 | bellard | * access_type is:
|
147 | 83d73968 | bellard | * raw : host memory access
|
148 | 83d73968 | bellard | * user : user mode access using soft MMU
|
149 | 83d73968 | bellard | * kernel : kernel mode access using soft MMU
|
150 | 83d73968 | bellard | */
|
151 | 2df3b95d | bellard | |
152 | cbbab922 | Paolo Bonzini | /* target-endianness CPU memory access functions */
|
153 | 2df3b95d | bellard | #if defined(TARGET_WORDS_BIGENDIAN)
|
154 | 2df3b95d | bellard | #define lduw_p(p) lduw_be_p(p)
|
155 | 2df3b95d | bellard | #define ldsw_p(p) ldsw_be_p(p)
|
156 | 2df3b95d | bellard | #define ldl_p(p) ldl_be_p(p)
|
157 | 2df3b95d | bellard | #define ldq_p(p) ldq_be_p(p)
|
158 | 2df3b95d | bellard | #define ldfl_p(p) ldfl_be_p(p)
|
159 | 2df3b95d | bellard | #define ldfq_p(p) ldfq_be_p(p)
|
160 | 2df3b95d | bellard | #define stw_p(p, v) stw_be_p(p, v)
|
161 | 2df3b95d | bellard | #define stl_p(p, v) stl_be_p(p, v)
|
162 | 2df3b95d | bellard | #define stq_p(p, v) stq_be_p(p, v)
|
163 | 2df3b95d | bellard | #define stfl_p(p, v) stfl_be_p(p, v)
|
164 | 2df3b95d | bellard | #define stfq_p(p, v) stfq_be_p(p, v)
|
165 | 2df3b95d | bellard | #else
|
166 | 2df3b95d | bellard | #define lduw_p(p) lduw_le_p(p)
|
167 | 2df3b95d | bellard | #define ldsw_p(p) ldsw_le_p(p)
|
168 | 2df3b95d | bellard | #define ldl_p(p) ldl_le_p(p)
|
169 | 2df3b95d | bellard | #define ldq_p(p) ldq_le_p(p)
|
170 | 2df3b95d | bellard | #define ldfl_p(p) ldfl_le_p(p)
|
171 | 2df3b95d | bellard | #define ldfq_p(p) ldfq_le_p(p)
|
172 | 2df3b95d | bellard | #define stw_p(p, v) stw_le_p(p, v)
|
173 | 2df3b95d | bellard | #define stl_p(p, v) stl_le_p(p, v)
|
174 | 2df3b95d | bellard | #define stq_p(p, v) stq_le_p(p, v)
|
175 | 2df3b95d | bellard | #define stfl_p(p, v) stfl_le_p(p, v)
|
176 | 2df3b95d | bellard | #define stfq_p(p, v) stfq_le_p(p, v)
|
177 | 5a9fdfec | bellard | #endif
|
178 | 5a9fdfec | bellard | |
179 | 61382a50 | bellard | /* MMU memory access macros */
|
180 | 61382a50 | bellard | |
181 | 53a5960a | pbrook | #if defined(CONFIG_USER_ONLY)
|
182 | 0e62fd79 | aurel32 | #include <assert.h> |
183 | 022c62cb | Paolo Bonzini | #include "exec/user/abitypes.h" |
184 | 0e62fd79 | aurel32 | |
185 | 53a5960a | pbrook | /* On some host systems the guest address space is reserved on the host.
|
186 | 53a5960a | pbrook | * This allows the guest address space to be offset to a convenient location.
|
187 | 53a5960a | pbrook | */
|
188 | 379f6698 | Paul Brook | #if defined(CONFIG_USE_GUEST_BASE)
|
189 | 379f6698 | Paul Brook | extern unsigned long guest_base; |
190 | 379f6698 | Paul Brook | extern int have_guest_base; |
191 | 68a1c816 | Paul Brook | extern unsigned long reserved_va; |
192 | 379f6698 | Paul Brook | #define GUEST_BASE guest_base
|
193 | 18e9ea8a | Aurelien Jarno | #define RESERVED_VA reserved_va
|
194 | 379f6698 | Paul Brook | #else
|
195 | 379f6698 | Paul Brook | #define GUEST_BASE 0ul |
196 | 18e9ea8a | Aurelien Jarno | #define RESERVED_VA 0ul |
197 | 379f6698 | Paul Brook | #endif
|
198 | 53a5960a | pbrook | |
199 | 53a5960a | pbrook | /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
200 | 8d9dde94 | Peter Maydell | #define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE)) |
201 | b9f83121 | Richard Henderson | |
202 | b9f83121 | Richard Henderson | #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
|
203 | b9f83121 | Richard Henderson | #define h2g_valid(x) 1 |
204 | b9f83121 | Richard Henderson | #else
|
205 | b9f83121 | Richard Henderson | #define h2g_valid(x) ({ \
|
206 | b9f83121 | Richard Henderson | unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ |
207 | 39879bbb | Alexander Graf | (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
|
208 | 39879bbb | Alexander Graf | (!RESERVED_VA || (__guest < RESERVED_VA)); \ |
209 | b9f83121 | Richard Henderson | }) |
210 | b9f83121 | Richard Henderson | #endif
|
211 | b9f83121 | Richard Henderson | |
212 | 0e62fd79 | aurel32 | #define h2g(x) ({ \
|
213 | 0e62fd79 | aurel32 | unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ |
214 | 0e62fd79 | aurel32 | /* Check if given address fits target address space */ \
|
215 | b9f83121 | Richard Henderson | assert(h2g_valid(x)); \ |
216 | 0e62fd79 | aurel32 | (abi_ulong)__ret; \ |
217 | 0e62fd79 | aurel32 | }) |
218 | 53a5960a | pbrook | |
219 | 53a5960a | pbrook | #define saddr(x) g2h(x)
|
220 | 53a5960a | pbrook | #define laddr(x) g2h(x)
|
221 | 53a5960a | pbrook | |
222 | 53a5960a | pbrook | #else /* !CONFIG_USER_ONLY */ |
223 | c27004ec | bellard | /* NOTE: we use double casts if pointers and target_ulong have
|
224 | c27004ec | bellard | different sizes */
|
225 | 27b0dc16 | Stefan Weil | #define saddr(x) (uint8_t *)(intptr_t)(x)
|
226 | 27b0dc16 | Stefan Weil | #define laddr(x) (uint8_t *)(intptr_t)(x)
|
227 | 53a5960a | pbrook | #endif
|
228 | 53a5960a | pbrook | |
229 | 53a5960a | pbrook | #define ldub_raw(p) ldub_p(laddr((p)))
|
230 | 53a5960a | pbrook | #define ldsb_raw(p) ldsb_p(laddr((p)))
|
231 | 53a5960a | pbrook | #define lduw_raw(p) lduw_p(laddr((p)))
|
232 | 53a5960a | pbrook | #define ldsw_raw(p) ldsw_p(laddr((p)))
|
233 | 53a5960a | pbrook | #define ldl_raw(p) ldl_p(laddr((p)))
|
234 | 53a5960a | pbrook | #define ldq_raw(p) ldq_p(laddr((p)))
|
235 | 53a5960a | pbrook | #define ldfl_raw(p) ldfl_p(laddr((p)))
|
236 | 53a5960a | pbrook | #define ldfq_raw(p) ldfq_p(laddr((p)))
|
237 | 53a5960a | pbrook | #define stb_raw(p, v) stb_p(saddr((p)), v)
|
238 | 53a5960a | pbrook | #define stw_raw(p, v) stw_p(saddr((p)), v)
|
239 | 53a5960a | pbrook | #define stl_raw(p, v) stl_p(saddr((p)), v)
|
240 | 53a5960a | pbrook | #define stq_raw(p, v) stq_p(saddr((p)), v)
|
241 | 53a5960a | pbrook | #define stfl_raw(p, v) stfl_p(saddr((p)), v)
|
242 | 53a5960a | pbrook | #define stfq_raw(p, v) stfq_p(saddr((p)), v)
|
243 | c27004ec | bellard | |
244 | c27004ec | bellard | |
245 | 5fafdf24 | ths | #if defined(CONFIG_USER_ONLY)
|
246 | 61382a50 | bellard | |
247 | 61382a50 | bellard | /* if user mode, no other memory access functions */
|
248 | 61382a50 | bellard | #define ldub(p) ldub_raw(p)
|
249 | 61382a50 | bellard | #define ldsb(p) ldsb_raw(p)
|
250 | 61382a50 | bellard | #define lduw(p) lduw_raw(p)
|
251 | 61382a50 | bellard | #define ldsw(p) ldsw_raw(p)
|
252 | 61382a50 | bellard | #define ldl(p) ldl_raw(p)
|
253 | 61382a50 | bellard | #define ldq(p) ldq_raw(p)
|
254 | 61382a50 | bellard | #define ldfl(p) ldfl_raw(p)
|
255 | 61382a50 | bellard | #define ldfq(p) ldfq_raw(p)
|
256 | 61382a50 | bellard | #define stb(p, v) stb_raw(p, v)
|
257 | 61382a50 | bellard | #define stw(p, v) stw_raw(p, v)
|
258 | 61382a50 | bellard | #define stl(p, v) stl_raw(p, v)
|
259 | 61382a50 | bellard | #define stq(p, v) stq_raw(p, v)
|
260 | 61382a50 | bellard | #define stfl(p, v) stfl_raw(p, v)
|
261 | 61382a50 | bellard | #define stfq(p, v) stfq_raw(p, v)
|
262 | 61382a50 | bellard | |
263 | e141ab52 | Blue Swirl | #define cpu_ldub_code(env1, p) ldub_raw(p)
|
264 | e141ab52 | Blue Swirl | #define cpu_ldsb_code(env1, p) ldsb_raw(p)
|
265 | e141ab52 | Blue Swirl | #define cpu_lduw_code(env1, p) lduw_raw(p)
|
266 | e141ab52 | Blue Swirl | #define cpu_ldsw_code(env1, p) ldsw_raw(p)
|
267 | e141ab52 | Blue Swirl | #define cpu_ldl_code(env1, p) ldl_raw(p)
|
268 | e141ab52 | Blue Swirl | #define cpu_ldq_code(env1, p) ldq_raw(p)
|
269 | 92fc4b58 | Blue Swirl | |
270 | 92fc4b58 | Blue Swirl | #define cpu_ldub_data(env, addr) ldub_raw(addr)
|
271 | 92fc4b58 | Blue Swirl | #define cpu_lduw_data(env, addr) lduw_raw(addr)
|
272 | 92fc4b58 | Blue Swirl | #define cpu_ldsw_data(env, addr) ldsw_raw(addr)
|
273 | 92fc4b58 | Blue Swirl | #define cpu_ldl_data(env, addr) ldl_raw(addr)
|
274 | 92fc4b58 | Blue Swirl | #define cpu_ldq_data(env, addr) ldq_raw(addr)
|
275 | 92fc4b58 | Blue Swirl | |
276 | 92fc4b58 | Blue Swirl | #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
|
277 | 92fc4b58 | Blue Swirl | #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
|
278 | 92fc4b58 | Blue Swirl | #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
|
279 | 92fc4b58 | Blue Swirl | #define cpu_stq_data(env, addr, data) stq_raw(addr, data)
|
280 | 92fc4b58 | Blue Swirl | |
281 | 92fc4b58 | Blue Swirl | #define cpu_ldub_kernel(env, addr) ldub_raw(addr)
|
282 | 92fc4b58 | Blue Swirl | #define cpu_lduw_kernel(env, addr) lduw_raw(addr)
|
283 | 92fc4b58 | Blue Swirl | #define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
|
284 | 92fc4b58 | Blue Swirl | #define cpu_ldl_kernel(env, addr) ldl_raw(addr)
|
285 | 92fc4b58 | Blue Swirl | #define cpu_ldq_kernel(env, addr) ldq_raw(addr)
|
286 | 92fc4b58 | Blue Swirl | |
287 | 92fc4b58 | Blue Swirl | #define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
|
288 | 92fc4b58 | Blue Swirl | #define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
|
289 | 92fc4b58 | Blue Swirl | #define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
|
290 | 92fc4b58 | Blue Swirl | #define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
|
291 | 61382a50 | bellard | |
292 | 61382a50 | bellard | #define ldub_kernel(p) ldub_raw(p)
|
293 | 61382a50 | bellard | #define ldsb_kernel(p) ldsb_raw(p)
|
294 | 61382a50 | bellard | #define lduw_kernel(p) lduw_raw(p)
|
295 | 61382a50 | bellard | #define ldsw_kernel(p) ldsw_raw(p)
|
296 | 61382a50 | bellard | #define ldl_kernel(p) ldl_raw(p)
|
297 | bc98a7ef | j_mayer | #define ldq_kernel(p) ldq_raw(p)
|
298 | 0ac4bd56 | bellard | #define ldfl_kernel(p) ldfl_raw(p)
|
299 | 0ac4bd56 | bellard | #define ldfq_kernel(p) ldfq_raw(p)
|
300 | 61382a50 | bellard | #define stb_kernel(p, v) stb_raw(p, v)
|
301 | 61382a50 | bellard | #define stw_kernel(p, v) stw_raw(p, v)
|
302 | 61382a50 | bellard | #define stl_kernel(p, v) stl_raw(p, v)
|
303 | 61382a50 | bellard | #define stq_kernel(p, v) stq_raw(p, v)
|
304 | 0ac4bd56 | bellard | #define stfl_kernel(p, v) stfl_raw(p, v)
|
305 | 0ac4bd56 | bellard | #define stfq_kernel(p, vt) stfq_raw(p, v)
|
306 | 61382a50 | bellard | |
307 | 2f5a189c | Blue Swirl | #define cpu_ldub_data(env, addr) ldub_raw(addr)
|
308 | 2f5a189c | Blue Swirl | #define cpu_lduw_data(env, addr) lduw_raw(addr)
|
309 | 2f5a189c | Blue Swirl | #define cpu_ldl_data(env, addr) ldl_raw(addr)
|
310 | 2f5a189c | Blue Swirl | |
311 | 2f5a189c | Blue Swirl | #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
|
312 | 2f5a189c | Blue Swirl | #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
|
313 | 2f5a189c | Blue Swirl | #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
|
314 | 61382a50 | bellard | #endif /* defined(CONFIG_USER_ONLY) */ |
315 | 61382a50 | bellard | |
316 | 5a9fdfec | bellard | /* page related stuff */
|
317 | 5a9fdfec | bellard | |
318 | 03875444 | aurel32 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
319 | 5a9fdfec | bellard | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
320 | 5a9fdfec | bellard | #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
321 | 5a9fdfec | bellard | |
322 | c6d50674 | Stefan Weil | /* ??? These should be the larger of uintptr_t and target_ulong. */
|
323 | c6d50674 | Stefan Weil | extern uintptr_t qemu_real_host_page_size;
|
324 | c6d50674 | Stefan Weil | extern uintptr_t qemu_host_page_size;
|
325 | c6d50674 | Stefan Weil | extern uintptr_t qemu_host_page_mask;
|
326 | 5a9fdfec | bellard | |
327 | 83fb7adf | bellard | #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) |
328 | 5a9fdfec | bellard | |
329 | 5a9fdfec | bellard | /* same as PROT_xxx */
|
330 | 5a9fdfec | bellard | #define PAGE_READ 0x0001 |
331 | 5a9fdfec | bellard | #define PAGE_WRITE 0x0002 |
332 | 5a9fdfec | bellard | #define PAGE_EXEC 0x0004 |
333 | 5a9fdfec | bellard | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
|
334 | 5a9fdfec | bellard | #define PAGE_VALID 0x0008 |
335 | 5a9fdfec | bellard | /* original state of the write flag (used when tracking self-modifying
|
336 | 5a9fdfec | bellard | code */
|
337 | 5fafdf24 | ths | #define PAGE_WRITE_ORG 0x0010 |
338 | 2e9a5713 | Paul Brook | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
339 | 2e9a5713 | Paul Brook | /* FIXME: Code that sets/uses this is broken and needs to go away. */
|
340 | 50a9569b | balrog | #define PAGE_RESERVED 0x0020 |
341 | 2e9a5713 | Paul Brook | #endif
|
342 | 5a9fdfec | bellard | |
343 | b480d9b7 | Paul Brook | #if defined(CONFIG_USER_ONLY)
|
344 | 5a9fdfec | bellard | void page_dump(FILE *f);
|
345 | 5cd2c5b6 | Richard Henderson | |
346 | b480d9b7 | Paul Brook | typedef int (*walk_memory_regions_fn)(void *, abi_ulong, |
347 | b480d9b7 | Paul Brook | abi_ulong, unsigned long); |
348 | 5cd2c5b6 | Richard Henderson | int walk_memory_regions(void *, walk_memory_regions_fn); |
349 | 5cd2c5b6 | Richard Henderson | |
350 | 53a5960a | pbrook | int page_get_flags(target_ulong address);
|
351 | 53a5960a | pbrook | void page_set_flags(target_ulong start, target_ulong end, int flags); |
352 | 3d97b40b | ths | int page_check_range(target_ulong start, target_ulong len, int flags); |
353 | b480d9b7 | Paul Brook | #endif
|
354 | 5a9fdfec | bellard | |
355 | 9349b4f9 | Andreas Färber | CPUArchState *cpu_copy(CPUArchState *env); |
356 | c5be9f08 | ths | |
357 | 9349b4f9 | Andreas Färber | void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...) |
358 | 2c80e423 | Stefan Weil | GCC_FMT_ATTR(2, 3); |
359 | 9349b4f9 | Andreas Färber | extern CPUArchState *first_cpu;
|
360 | db1a4972 | Paolo Bonzini | |
361 | 9c76219e | Richard Henderson | /* Flags for use in ENV->INTERRUPT_PENDING.
|
362 | 9c76219e | Richard Henderson | |
363 | 9c76219e | Richard Henderson | The numbers assigned here are non-sequential in order to preserve
|
364 | 9c76219e | Richard Henderson | binary compatibility with the vmstate dump. Bit 0 (0x0001) was
|
365 | 9c76219e | Richard Henderson | previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
|
366 | 9c76219e | Richard Henderson | the vmstate dump. */
|
367 | 9c76219e | Richard Henderson | |
368 | 9c76219e | Richard Henderson | /* External hardware interrupt pending. This is typically used for
|
369 | 9c76219e | Richard Henderson | interrupts from devices. */
|
370 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_HARD 0x0002 |
371 | 9c76219e | Richard Henderson | |
372 | 9c76219e | Richard Henderson | /* Exit the current TB. This is typically used when some system-level device
|
373 | 9c76219e | Richard Henderson | makes some change to the memory mapping. E.g. the a20 line change. */
|
374 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_EXITTB 0x0004 |
375 | 9c76219e | Richard Henderson | |
376 | 9c76219e | Richard Henderson | /* Halt the CPU. */
|
377 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_HALT 0x0020 |
378 | 9c76219e | Richard Henderson | |
379 | 9c76219e | Richard Henderson | /* Debug event pending. */
|
380 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_DEBUG 0x0080 |
381 | 9c76219e | Richard Henderson | |
382 | 9c76219e | Richard Henderson | /* Several target-specific external hardware interrupts. Each target/cpu.h
|
383 | 9c76219e | Richard Henderson | should define proper names based on these defines. */
|
384 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_EXT_0 0x0008 |
385 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_EXT_1 0x0010 |
386 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_EXT_2 0x0040 |
387 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_EXT_3 0x0200 |
388 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_EXT_4 0x1000 |
389 | 9c76219e | Richard Henderson | |
390 | 9c76219e | Richard Henderson | /* Several target-specific internal interrupts. These differ from the
|
391 | 07f35073 | Dong Xu Wang | preceding target-specific interrupts in that they are intended to
|
392 | 9c76219e | Richard Henderson | originate from within the cpu itself, typically in response to some
|
393 | 9c76219e | Richard Henderson | instruction being executed. These, therefore, are not masked while
|
394 | 9c76219e | Richard Henderson | single-stepping within the debugger. */
|
395 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_INT_0 0x0100 |
396 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_INT_1 0x0400 |
397 | 9c76219e | Richard Henderson | #define CPU_INTERRUPT_TGT_INT_2 0x0800 |
398 | d362e757 | Jan Kiszka | #define CPU_INTERRUPT_TGT_INT_3 0x2000 |
399 | 9c76219e | Richard Henderson | |
400 | d362e757 | Jan Kiszka | /* First unused bit: 0x4000. */
|
401 | 9c76219e | Richard Henderson | |
402 | 3125f763 | Richard Henderson | /* The set of all bits that should be masked when single-stepping. */
|
403 | 3125f763 | Richard Henderson | #define CPU_INTERRUPT_SSTEP_MASK \
|
404 | 3125f763 | Richard Henderson | (CPU_INTERRUPT_HARD \ |
405 | 3125f763 | Richard Henderson | | CPU_INTERRUPT_TGT_EXT_0 \ |
406 | 3125f763 | Richard Henderson | | CPU_INTERRUPT_TGT_EXT_1 \ |
407 | 3125f763 | Richard Henderson | | CPU_INTERRUPT_TGT_EXT_2 \ |
408 | 3125f763 | Richard Henderson | | CPU_INTERRUPT_TGT_EXT_3 \ |
409 | 3125f763 | Richard Henderson | | CPU_INTERRUPT_TGT_EXT_4) |
410 | 98699967 | bellard | |
411 | a1d1bb31 | aliguori | /* Breakpoint/watchpoint flags */
|
412 | a1d1bb31 | aliguori | #define BP_MEM_READ 0x01 |
413 | a1d1bb31 | aliguori | #define BP_MEM_WRITE 0x02 |
414 | a1d1bb31 | aliguori | #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
|
415 | 06d55cc1 | aliguori | #define BP_STOP_BEFORE_ACCESS 0x04 |
416 | 6e140f28 | aliguori | #define BP_WATCHPOINT_HIT 0x08 |
417 | a1d1bb31 | aliguori | #define BP_GDB 0x10 |
418 | 2dc9f411 | aliguori | #define BP_CPU 0x20 |
419 | a1d1bb31 | aliguori | |
420 | 9349b4f9 | Andreas Färber | int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
421 | a1d1bb31 | aliguori | CPUBreakpoint **breakpoint); |
422 | 9349b4f9 | Andreas Färber | int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags); |
423 | 9349b4f9 | Andreas Färber | void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
|
424 | 9349b4f9 | Andreas Färber | void cpu_breakpoint_remove_all(CPUArchState *env, int mask); |
425 | 9349b4f9 | Andreas Färber | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
|
426 | a1d1bb31 | aliguori | int flags, CPUWatchpoint **watchpoint);
|
427 | 9349b4f9 | Andreas Färber | int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
|
428 | a1d1bb31 | aliguori | target_ulong len, int flags);
|
429 | 9349b4f9 | Andreas Färber | void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
|
430 | 9349b4f9 | Andreas Färber | void cpu_watchpoint_remove_all(CPUArchState *env, int mask); |
431 | 60897d36 | edgar_igl | |
432 | 60897d36 | edgar_igl | #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ |
433 | 60897d36 | edgar_igl | #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ |
434 | 60897d36 | edgar_igl | #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ |
435 | 60897d36 | edgar_igl | |
436 | 9349b4f9 | Andreas Färber | void cpu_single_step(CPUArchState *env, int enabled); |
437 | 4c3a88a2 | bellard | |
438 | b3755a91 | Paul Brook | #if !defined(CONFIG_USER_ONLY)
|
439 | b3755a91 | Paul Brook | |
440 | 4fcc562b | Paul Brook | /* Return the physical page corresponding to a virtual one. Use it
|
441 | 4fcc562b | Paul Brook | only for debugging because no protection checks are done. Return -1
|
442 | 4fcc562b | Paul Brook | if no page found. */
|
443 | a8170e5e | Avi Kivity | hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr); |
444 | 4fcc562b | Paul Brook | |
445 | 33417e70 | bellard | /* memory API */
|
446 | 33417e70 | bellard | |
447 | edf75d59 | bellard | extern int phys_ram_fd; |
448 | c227f099 | Anthony Liguori | extern ram_addr_t ram_size;
|
449 | f471a17e | Alex Williamson | |
450 | cd19cfa2 | Huang Ying | /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
|
451 | cd19cfa2 | Huang Ying | #define RAM_PREALLOC_MASK (1 << 0) |
452 | cd19cfa2 | Huang Ying | |
453 | f471a17e | Alex Williamson | typedef struct RAMBlock { |
454 | 7c637366 | Avi Kivity | struct MemoryRegion *mr;
|
455 | f471a17e | Alex Williamson | uint8_t *host; |
456 | f471a17e | Alex Williamson | ram_addr_t offset; |
457 | f471a17e | Alex Williamson | ram_addr_t length; |
458 | cd19cfa2 | Huang Ying | uint32_t flags; |
459 | cc9e98cb | Alex Williamson | char idstr[256]; |
460 | b2a8658e | Umesh Deshpande | /* Reads can take either the iothread or the ramlist lock.
|
461 | b2a8658e | Umesh Deshpande | * Writes must take both locks.
|
462 | b2a8658e | Umesh Deshpande | */
|
463 | a3161038 | Paolo Bonzini | QTAILQ_ENTRY(RAMBlock) next; |
464 | 04b16653 | Alex Williamson | #if defined(__linux__) && !defined(TARGET_S390X)
|
465 | 04b16653 | Alex Williamson | int fd;
|
466 | 04b16653 | Alex Williamson | #endif
|
467 | f471a17e | Alex Williamson | } RAMBlock; |
468 | f471a17e | Alex Williamson | |
469 | f471a17e | Alex Williamson | typedef struct RAMList { |
470 | b2a8658e | Umesh Deshpande | QemuMutex mutex; |
471 | b2a8658e | Umesh Deshpande | /* Protected by the iothread lock. */
|
472 | f471a17e | Alex Williamson | uint8_t *phys_dirty; |
473 | 0d6d3c87 | Paolo Bonzini | RAMBlock *mru_block; |
474 | b2a8658e | Umesh Deshpande | /* Protected by the ramlist lock. */
|
475 | a3161038 | Paolo Bonzini | QTAILQ_HEAD(, RAMBlock) blocks; |
476 | f798b07f | Umesh Deshpande | uint32_t version; |
477 | f471a17e | Alex Williamson | } RAMList; |
478 | f471a17e | Alex Williamson | extern RAMList ram_list;
|
479 | edf75d59 | bellard | |
480 | c902760f | Marcelo Tosatti | extern const char *mem_path; |
481 | c902760f | Marcelo Tosatti | extern int mem_prealloc; |
482 | c902760f | Marcelo Tosatti | |
483 | 0f459d16 | pbrook | /* Flags stored in the low bits of the TLB virtual address. These are
|
484 | 0f459d16 | pbrook | defined so that fast path ram access is all zeros. */
|
485 | 0f459d16 | pbrook | /* Zero if TLB entry is valid. */
|
486 | 0f459d16 | pbrook | #define TLB_INVALID_MASK (1 << 3) |
487 | 0f459d16 | pbrook | /* Set if TLB entry references a clean RAM page. The iotlb entry will
|
488 | 0f459d16 | pbrook | contain the page physical address. */
|
489 | 0f459d16 | pbrook | #define TLB_NOTDIRTY (1 << 4) |
490 | 0f459d16 | pbrook | /* Set if TLB entry is an IO callback. */
|
491 | 0f459d16 | pbrook | #define TLB_MMIO (1 << 5) |
492 | 0f459d16 | pbrook | |
493 | 055403b2 | Stefan Weil | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
494 | 652d7ec2 | Juan Quintela | ram_addr_t last_ram_offset(void);
|
495 | b2a8658e | Umesh Deshpande | void qemu_mutex_lock_ramlist(void); |
496 | b2a8658e | Umesh Deshpande | void qemu_mutex_unlock_ramlist(void); |
497 | b3755a91 | Paul Brook | #endif /* !CONFIG_USER_ONLY */ |
498 | b3755a91 | Paul Brook | |
499 | 9349b4f9 | Andreas Färber | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
500 | b3755a91 | Paul Brook | uint8_t *buf, int len, int is_write); |
501 | b3755a91 | Paul Brook | |
502 | 5a9fdfec | bellard | #endif /* CPU_ALL_H */ |