root / cpu-all.h @ 7c637366
History | View | Annotate | Download (16.6 kB)
1 |
/*
|
---|---|
2 |
* defines common to all virtual CPUs
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
#ifndef CPU_ALL_H
|
20 |
#define CPU_ALL_H
|
21 |
|
22 |
#include "qemu-common.h" |
23 |
#include "qemu-tls.h" |
24 |
#include "cpu-common.h" |
25 |
|
26 |
/* some important defines:
|
27 |
*
|
28 |
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned
|
29 |
* memory accesses.
|
30 |
*
|
31 |
* HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
|
32 |
* otherwise little endian.
|
33 |
*
|
34 |
* (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
|
35 |
*
|
36 |
* TARGET_WORDS_BIGENDIAN : same for target cpu
|
37 |
*/
|
38 |
|
39 |
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
|
40 |
#define BSWAP_NEEDED
|
41 |
#endif
|
42 |
|
43 |
#ifdef BSWAP_NEEDED
|
44 |
|
45 |
static inline uint16_t tswap16(uint16_t s) |
46 |
{ |
47 |
return bswap16(s);
|
48 |
} |
49 |
|
50 |
static inline uint32_t tswap32(uint32_t s) |
51 |
{ |
52 |
return bswap32(s);
|
53 |
} |
54 |
|
55 |
static inline uint64_t tswap64(uint64_t s) |
56 |
{ |
57 |
return bswap64(s);
|
58 |
} |
59 |
|
60 |
static inline void tswap16s(uint16_t *s) |
61 |
{ |
62 |
*s = bswap16(*s); |
63 |
} |
64 |
|
65 |
static inline void tswap32s(uint32_t *s) |
66 |
{ |
67 |
*s = bswap32(*s); |
68 |
} |
69 |
|
70 |
static inline void tswap64s(uint64_t *s) |
71 |
{ |
72 |
*s = bswap64(*s); |
73 |
} |
74 |
|
75 |
#else
|
76 |
|
77 |
static inline uint16_t tswap16(uint16_t s) |
78 |
{ |
79 |
return s;
|
80 |
} |
81 |
|
82 |
static inline uint32_t tswap32(uint32_t s) |
83 |
{ |
84 |
return s;
|
85 |
} |
86 |
|
87 |
static inline uint64_t tswap64(uint64_t s) |
88 |
{ |
89 |
return s;
|
90 |
} |
91 |
|
92 |
static inline void tswap16s(uint16_t *s) |
93 |
{ |
94 |
} |
95 |
|
96 |
static inline void tswap32s(uint32_t *s) |
97 |
{ |
98 |
} |
99 |
|
100 |
static inline void tswap64s(uint64_t *s) |
101 |
{ |
102 |
} |
103 |
|
104 |
#endif
|
105 |
|
106 |
#if TARGET_LONG_SIZE == 4 |
107 |
#define tswapl(s) tswap32(s)
|
108 |
#define tswapls(s) tswap32s((uint32_t *)(s))
|
109 |
#define bswaptls(s) bswap32s(s)
|
110 |
#else
|
111 |
#define tswapl(s) tswap64(s)
|
112 |
#define tswapls(s) tswap64s((uint64_t *)(s))
|
113 |
#define bswaptls(s) bswap64s(s)
|
114 |
#endif
|
115 |
|
116 |
/* CPU memory access without any memory or io remapping */
|
117 |
|
118 |
/*
|
119 |
* the generic syntax for the memory accesses is:
|
120 |
*
|
121 |
* load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
|
122 |
*
|
123 |
* store: st{type}{size}{endian}_{access_type}(ptr, val)
|
124 |
*
|
125 |
* type is:
|
126 |
* (empty): integer access
|
127 |
* f : float access
|
128 |
*
|
129 |
* sign is:
|
130 |
* (empty): for floats or 32 bit size
|
131 |
* u : unsigned
|
132 |
* s : signed
|
133 |
*
|
134 |
* size is:
|
135 |
* b: 8 bits
|
136 |
* w: 16 bits
|
137 |
* l: 32 bits
|
138 |
* q: 64 bits
|
139 |
*
|
140 |
* endian is:
|
141 |
* (empty): target cpu endianness or 8 bit access
|
142 |
* r : reversed target cpu endianness (not implemented yet)
|
143 |
* be : big endian (not implemented yet)
|
144 |
* le : little endian (not implemented yet)
|
145 |
*
|
146 |
* access_type is:
|
147 |
* raw : host memory access
|
148 |
* user : user mode access using soft MMU
|
149 |
* kernel : kernel mode access using soft MMU
|
150 |
*/
|
151 |
|
152 |
/* target-endianness CPU memory access functions */
|
153 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
154 |
#define lduw_p(p) lduw_be_p(p)
|
155 |
#define ldsw_p(p) ldsw_be_p(p)
|
156 |
#define ldl_p(p) ldl_be_p(p)
|
157 |
#define ldq_p(p) ldq_be_p(p)
|
158 |
#define ldfl_p(p) ldfl_be_p(p)
|
159 |
#define ldfq_p(p) ldfq_be_p(p)
|
160 |
#define stw_p(p, v) stw_be_p(p, v)
|
161 |
#define stl_p(p, v) stl_be_p(p, v)
|
162 |
#define stq_p(p, v) stq_be_p(p, v)
|
163 |
#define stfl_p(p, v) stfl_be_p(p, v)
|
164 |
#define stfq_p(p, v) stfq_be_p(p, v)
|
165 |
#else
|
166 |
#define lduw_p(p) lduw_le_p(p)
|
167 |
#define ldsw_p(p) ldsw_le_p(p)
|
168 |
#define ldl_p(p) ldl_le_p(p)
|
169 |
#define ldq_p(p) ldq_le_p(p)
|
170 |
#define ldfl_p(p) ldfl_le_p(p)
|
171 |
#define ldfq_p(p) ldfq_le_p(p)
|
172 |
#define stw_p(p, v) stw_le_p(p, v)
|
173 |
#define stl_p(p, v) stl_le_p(p, v)
|
174 |
#define stq_p(p, v) stq_le_p(p, v)
|
175 |
#define stfl_p(p, v) stfl_le_p(p, v)
|
176 |
#define stfq_p(p, v) stfq_le_p(p, v)
|
177 |
#endif
|
178 |
|
179 |
/* MMU memory access macros */
|
180 |
|
181 |
#if defined(CONFIG_USER_ONLY)
|
182 |
#include <assert.h> |
183 |
#include "qemu-types.h" |
184 |
|
185 |
/* On some host systems the guest address space is reserved on the host.
|
186 |
* This allows the guest address space to be offset to a convenient location.
|
187 |
*/
|
188 |
#if defined(CONFIG_USE_GUEST_BASE)
|
189 |
extern unsigned long guest_base; |
190 |
extern int have_guest_base; |
191 |
extern unsigned long reserved_va; |
192 |
#define GUEST_BASE guest_base
|
193 |
#define RESERVED_VA reserved_va
|
194 |
#else
|
195 |
#define GUEST_BASE 0ul |
196 |
#define RESERVED_VA 0ul |
197 |
#endif
|
198 |
|
199 |
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
200 |
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) |
201 |
|
202 |
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
|
203 |
#define h2g_valid(x) 1 |
204 |
#else
|
205 |
#define h2g_valid(x) ({ \
|
206 |
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ |
207 |
__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
|
208 |
}) |
209 |
#endif
|
210 |
|
211 |
#define h2g(x) ({ \
|
212 |
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ |
213 |
/* Check if given address fits target address space */ \
|
214 |
assert(h2g_valid(x)); \ |
215 |
(abi_ulong)__ret; \ |
216 |
}) |
217 |
|
218 |
#define saddr(x) g2h(x)
|
219 |
#define laddr(x) g2h(x)
|
220 |
|
221 |
#else /* !CONFIG_USER_ONLY */ |
222 |
/* NOTE: we use double casts if pointers and target_ulong have
|
223 |
different sizes */
|
224 |
#define saddr(x) (uint8_t *)(long)(x) |
225 |
#define laddr(x) (uint8_t *)(long)(x) |
226 |
#endif
|
227 |
|
228 |
#define ldub_raw(p) ldub_p(laddr((p)))
|
229 |
#define ldsb_raw(p) ldsb_p(laddr((p)))
|
230 |
#define lduw_raw(p) lduw_p(laddr((p)))
|
231 |
#define ldsw_raw(p) ldsw_p(laddr((p)))
|
232 |
#define ldl_raw(p) ldl_p(laddr((p)))
|
233 |
#define ldq_raw(p) ldq_p(laddr((p)))
|
234 |
#define ldfl_raw(p) ldfl_p(laddr((p)))
|
235 |
#define ldfq_raw(p) ldfq_p(laddr((p)))
|
236 |
#define stb_raw(p, v) stb_p(saddr((p)), v)
|
237 |
#define stw_raw(p, v) stw_p(saddr((p)), v)
|
238 |
#define stl_raw(p, v) stl_p(saddr((p)), v)
|
239 |
#define stq_raw(p, v) stq_p(saddr((p)), v)
|
240 |
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
|
241 |
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
|
242 |
|
243 |
|
244 |
#if defined(CONFIG_USER_ONLY)
|
245 |
|
246 |
/* if user mode, no other memory access functions */
|
247 |
#define ldub(p) ldub_raw(p)
|
248 |
#define ldsb(p) ldsb_raw(p)
|
249 |
#define lduw(p) lduw_raw(p)
|
250 |
#define ldsw(p) ldsw_raw(p)
|
251 |
#define ldl(p) ldl_raw(p)
|
252 |
#define ldq(p) ldq_raw(p)
|
253 |
#define ldfl(p) ldfl_raw(p)
|
254 |
#define ldfq(p) ldfq_raw(p)
|
255 |
#define stb(p, v) stb_raw(p, v)
|
256 |
#define stw(p, v) stw_raw(p, v)
|
257 |
#define stl(p, v) stl_raw(p, v)
|
258 |
#define stq(p, v) stq_raw(p, v)
|
259 |
#define stfl(p, v) stfl_raw(p, v)
|
260 |
#define stfq(p, v) stfq_raw(p, v)
|
261 |
|
262 |
#define ldub_code(p) ldub_raw(p)
|
263 |
#define ldsb_code(p) ldsb_raw(p)
|
264 |
#define lduw_code(p) lduw_raw(p)
|
265 |
#define ldsw_code(p) ldsw_raw(p)
|
266 |
#define ldl_code(p) ldl_raw(p)
|
267 |
#define ldq_code(p) ldq_raw(p)
|
268 |
|
269 |
#define ldub_kernel(p) ldub_raw(p)
|
270 |
#define ldsb_kernel(p) ldsb_raw(p)
|
271 |
#define lduw_kernel(p) lduw_raw(p)
|
272 |
#define ldsw_kernel(p) ldsw_raw(p)
|
273 |
#define ldl_kernel(p) ldl_raw(p)
|
274 |
#define ldq_kernel(p) ldq_raw(p)
|
275 |
#define ldfl_kernel(p) ldfl_raw(p)
|
276 |
#define ldfq_kernel(p) ldfq_raw(p)
|
277 |
#define stb_kernel(p, v) stb_raw(p, v)
|
278 |
#define stw_kernel(p, v) stw_raw(p, v)
|
279 |
#define stl_kernel(p, v) stl_raw(p, v)
|
280 |
#define stq_kernel(p, v) stq_raw(p, v)
|
281 |
#define stfl_kernel(p, v) stfl_raw(p, v)
|
282 |
#define stfq_kernel(p, vt) stfq_raw(p, v)
|
283 |
|
284 |
#endif /* defined(CONFIG_USER_ONLY) */ |
285 |
|
286 |
/* page related stuff */
|
287 |
|
288 |
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
289 |
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
290 |
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
291 |
|
292 |
/* ??? These should be the larger of unsigned long and target_ulong. */
|
293 |
extern unsigned long qemu_real_host_page_size; |
294 |
extern unsigned long qemu_host_page_size; |
295 |
extern unsigned long qemu_host_page_mask; |
296 |
|
297 |
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) |
298 |
|
299 |
/* same as PROT_xxx */
|
300 |
#define PAGE_READ 0x0001 |
301 |
#define PAGE_WRITE 0x0002 |
302 |
#define PAGE_EXEC 0x0004 |
303 |
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
|
304 |
#define PAGE_VALID 0x0008 |
305 |
/* original state of the write flag (used when tracking self-modifying
|
306 |
code */
|
307 |
#define PAGE_WRITE_ORG 0x0010 |
308 |
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
309 |
/* FIXME: Code that sets/uses this is broken and needs to go away. */
|
310 |
#define PAGE_RESERVED 0x0020 |
311 |
#endif
|
312 |
|
313 |
#if defined(CONFIG_USER_ONLY)
|
314 |
void page_dump(FILE *f);
|
315 |
|
316 |
typedef int (*walk_memory_regions_fn)(void *, abi_ulong, |
317 |
abi_ulong, unsigned long); |
318 |
int walk_memory_regions(void *, walk_memory_regions_fn); |
319 |
|
320 |
int page_get_flags(target_ulong address);
|
321 |
void page_set_flags(target_ulong start, target_ulong end, int flags); |
322 |
int page_check_range(target_ulong start, target_ulong len, int flags); |
323 |
#endif
|
324 |
|
325 |
CPUState *cpu_copy(CPUState *env); |
326 |
CPUState *qemu_get_cpu(int cpu);
|
327 |
|
328 |
#define CPU_DUMP_CODE 0x00010000 |
329 |
|
330 |
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
|
331 |
int flags);
|
332 |
void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
|
333 |
int flags);
|
334 |
|
335 |
void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) |
336 |
GCC_FMT_ATTR(2, 3); |
337 |
extern CPUState *first_cpu;
|
338 |
DECLARE_TLS(CPUState *,cpu_single_env); |
339 |
#define cpu_single_env tls_var(cpu_single_env)
|
340 |
|
341 |
/* Flags for use in ENV->INTERRUPT_PENDING.
|
342 |
|
343 |
The numbers assigned here are non-sequential in order to preserve
|
344 |
binary compatibility with the vmstate dump. Bit 0 (0x0001) was
|
345 |
previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
|
346 |
the vmstate dump. */
|
347 |
|
348 |
/* External hardware interrupt pending. This is typically used for
|
349 |
interrupts from devices. */
|
350 |
#define CPU_INTERRUPT_HARD 0x0002 |
351 |
|
352 |
/* Exit the current TB. This is typically used when some system-level device
|
353 |
makes some change to the memory mapping. E.g. the a20 line change. */
|
354 |
#define CPU_INTERRUPT_EXITTB 0x0004 |
355 |
|
356 |
/* Halt the CPU. */
|
357 |
#define CPU_INTERRUPT_HALT 0x0020 |
358 |
|
359 |
/* Debug event pending. */
|
360 |
#define CPU_INTERRUPT_DEBUG 0x0080 |
361 |
|
362 |
/* Several target-specific external hardware interrupts. Each target/cpu.h
|
363 |
should define proper names based on these defines. */
|
364 |
#define CPU_INTERRUPT_TGT_EXT_0 0x0008 |
365 |
#define CPU_INTERRUPT_TGT_EXT_1 0x0010 |
366 |
#define CPU_INTERRUPT_TGT_EXT_2 0x0040 |
367 |
#define CPU_INTERRUPT_TGT_EXT_3 0x0200 |
368 |
#define CPU_INTERRUPT_TGT_EXT_4 0x1000 |
369 |
|
370 |
/* Several target-specific internal interrupts. These differ from the
|
371 |
preceding target-specific interrupts in that they are intended to
|
372 |
originate from within the cpu itself, typically in response to some
|
373 |
instruction being executed. These, therefore, are not masked while
|
374 |
single-stepping within the debugger. */
|
375 |
#define CPU_INTERRUPT_TGT_INT_0 0x0100 |
376 |
#define CPU_INTERRUPT_TGT_INT_1 0x0400 |
377 |
#define CPU_INTERRUPT_TGT_INT_2 0x0800 |
378 |
|
379 |
/* First unused bit: 0x2000. */
|
380 |
|
381 |
/* The set of all bits that should be masked when single-stepping. */
|
382 |
#define CPU_INTERRUPT_SSTEP_MASK \
|
383 |
(CPU_INTERRUPT_HARD \ |
384 |
| CPU_INTERRUPT_TGT_EXT_0 \ |
385 |
| CPU_INTERRUPT_TGT_EXT_1 \ |
386 |
| CPU_INTERRUPT_TGT_EXT_2 \ |
387 |
| CPU_INTERRUPT_TGT_EXT_3 \ |
388 |
| CPU_INTERRUPT_TGT_EXT_4) |
389 |
|
390 |
#ifndef CONFIG_USER_ONLY
|
391 |
typedef void (*CPUInterruptHandler)(CPUState *, int); |
392 |
|
393 |
extern CPUInterruptHandler cpu_interrupt_handler;
|
394 |
|
395 |
static inline void cpu_interrupt(CPUState *s, int mask) |
396 |
{ |
397 |
cpu_interrupt_handler(s, mask); |
398 |
} |
399 |
#else /* USER_ONLY */ |
400 |
void cpu_interrupt(CPUState *env, int mask); |
401 |
#endif /* USER_ONLY */ |
402 |
|
403 |
void cpu_reset_interrupt(CPUState *env, int mask); |
404 |
|
405 |
void cpu_exit(CPUState *s);
|
406 |
|
407 |
bool qemu_cpu_has_work(CPUState *env);
|
408 |
|
409 |
/* Breakpoint/watchpoint flags */
|
410 |
#define BP_MEM_READ 0x01 |
411 |
#define BP_MEM_WRITE 0x02 |
412 |
#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
|
413 |
#define BP_STOP_BEFORE_ACCESS 0x04 |
414 |
#define BP_WATCHPOINT_HIT 0x08 |
415 |
#define BP_GDB 0x10 |
416 |
#define BP_CPU 0x20 |
417 |
|
418 |
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, |
419 |
CPUBreakpoint **breakpoint); |
420 |
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); |
421 |
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
|
422 |
void cpu_breakpoint_remove_all(CPUState *env, int mask); |
423 |
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
|
424 |
int flags, CPUWatchpoint **watchpoint);
|
425 |
int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
|
426 |
target_ulong len, int flags);
|
427 |
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
|
428 |
void cpu_watchpoint_remove_all(CPUState *env, int mask); |
429 |
|
430 |
#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ |
431 |
#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ |
432 |
#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ |
433 |
|
434 |
void cpu_single_step(CPUState *env, int enabled); |
435 |
void cpu_reset(CPUState *s);
|
436 |
int cpu_is_stopped(CPUState *env);
|
437 |
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data); |
438 |
|
439 |
#define CPU_LOG_TB_OUT_ASM (1 << 0) |
440 |
#define CPU_LOG_TB_IN_ASM (1 << 1) |
441 |
#define CPU_LOG_TB_OP (1 << 2) |
442 |
#define CPU_LOG_TB_OP_OPT (1 << 3) |
443 |
#define CPU_LOG_INT (1 << 4) |
444 |
#define CPU_LOG_EXEC (1 << 5) |
445 |
#define CPU_LOG_PCALL (1 << 6) |
446 |
#define CPU_LOG_IOPORT (1 << 7) |
447 |
#define CPU_LOG_TB_CPU (1 << 8) |
448 |
#define CPU_LOG_RESET (1 << 9) |
449 |
|
450 |
/* define log items */
|
451 |
typedef struct CPULogItem { |
452 |
int mask;
|
453 |
const char *name; |
454 |
const char *help; |
455 |
} CPULogItem; |
456 |
|
457 |
extern const CPULogItem cpu_log_items[]; |
458 |
|
459 |
void cpu_set_log(int log_flags); |
460 |
void cpu_set_log_filename(const char *filename); |
461 |
int cpu_str_to_log_mask(const char *str); |
462 |
|
463 |
#if !defined(CONFIG_USER_ONLY)
|
464 |
|
465 |
/* Return the physical page corresponding to a virtual one. Use it
|
466 |
only for debugging because no protection checks are done. Return -1
|
467 |
if no page found. */
|
468 |
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); |
469 |
|
470 |
/* memory API */
|
471 |
|
472 |
extern int phys_ram_fd; |
473 |
extern ram_addr_t ram_size;
|
474 |
|
475 |
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
|
476 |
#define RAM_PREALLOC_MASK (1 << 0) |
477 |
|
478 |
typedef struct RAMBlock { |
479 |
struct MemoryRegion *mr;
|
480 |
uint8_t *host; |
481 |
ram_addr_t offset; |
482 |
ram_addr_t length; |
483 |
uint32_t flags; |
484 |
char idstr[256]; |
485 |
QLIST_ENTRY(RAMBlock) next; |
486 |
#if defined(__linux__) && !defined(TARGET_S390X)
|
487 |
int fd;
|
488 |
#endif
|
489 |
} RAMBlock; |
490 |
|
491 |
typedef struct RAMList { |
492 |
uint8_t *phys_dirty; |
493 |
QLIST_HEAD(, RAMBlock) blocks; |
494 |
} RAMList; |
495 |
extern RAMList ram_list;
|
496 |
|
497 |
extern const char *mem_path; |
498 |
extern int mem_prealloc; |
499 |
|
500 |
/* physical memory access */
|
501 |
|
502 |
/* MMIO pages are identified by a combination of an IO device index and
|
503 |
3 flags. The ROMD code stores the page ram offset in iotlb entry,
|
504 |
so only a limited number of ids are avaiable. */
|
505 |
|
506 |
#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) |
507 |
|
508 |
/* Flags stored in the low bits of the TLB virtual address. These are
|
509 |
defined so that fast path ram access is all zeros. */
|
510 |
/* Zero if TLB entry is valid. */
|
511 |
#define TLB_INVALID_MASK (1 << 3) |
512 |
/* Set if TLB entry references a clean RAM page. The iotlb entry will
|
513 |
contain the page physical address. */
|
514 |
#define TLB_NOTDIRTY (1 << 4) |
515 |
/* Set if TLB entry is an IO callback. */
|
516 |
#define TLB_MMIO (1 << 5) |
517 |
|
518 |
#define VGA_DIRTY_FLAG 0x01 |
519 |
#define CODE_DIRTY_FLAG 0x02 |
520 |
#define MIGRATION_DIRTY_FLAG 0x08 |
521 |
|
522 |
/* read dirty bit (return 0 or 1) */
|
523 |
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) |
524 |
{ |
525 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff; |
526 |
} |
527 |
|
528 |
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) |
529 |
{ |
530 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
|
531 |
} |
532 |
|
533 |
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, |
534 |
int dirty_flags)
|
535 |
{ |
536 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
|
537 |
} |
538 |
|
539 |
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) |
540 |
{ |
541 |
ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
|
542 |
} |
543 |
|
544 |
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, |
545 |
int dirty_flags)
|
546 |
{ |
547 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
548 |
} |
549 |
|
550 |
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, |
551 |
int length,
|
552 |
int dirty_flags)
|
553 |
{ |
554 |
int i, mask, len;
|
555 |
uint8_t *p; |
556 |
|
557 |
len = length >> TARGET_PAGE_BITS; |
558 |
mask = ~dirty_flags; |
559 |
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); |
560 |
for (i = 0; i < len; i++) { |
561 |
p[i] &= mask; |
562 |
} |
563 |
} |
564 |
|
565 |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
566 |
int dirty_flags);
|
567 |
void cpu_tlb_update_dirty(CPUState *env);
|
568 |
|
569 |
int cpu_physical_memory_set_dirty_tracking(int enable); |
570 |
|
571 |
int cpu_physical_memory_get_dirty_tracking(void); |
572 |
|
573 |
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
574 |
#endif /* !CONFIG_USER_ONLY */ |
575 |
|
576 |
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
577 |
uint8_t *buf, int len, int is_write); |
578 |
|
579 |
#endif /* CPU_ALL_H */ |