root / exec.c @ 5dcb6b91
History | View | Annotate | Download (78.3 kB)
1 |
/*
|
---|---|
2 |
* virtual page mapping and translated block handling
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include "config.h" |
21 |
#ifdef _WIN32
|
22 |
#include <windows.h> |
23 |
#else
|
24 |
#include <sys/types.h> |
25 |
#include <sys/mman.h> |
26 |
#endif
|
27 |
#include <stdlib.h> |
28 |
#include <stdio.h> |
29 |
#include <stdarg.h> |
30 |
#include <string.h> |
31 |
#include <errno.h> |
32 |
#include <unistd.h> |
33 |
#include <inttypes.h> |
34 |
|
35 |
#include "cpu.h" |
36 |
#include "exec-all.h" |
37 |
#if defined(CONFIG_USER_ONLY)
|
38 |
#include <qemu.h> |
39 |
#endif
|
40 |
|
41 |
//#define DEBUG_TB_INVALIDATE
|
42 |
//#define DEBUG_FLUSH
|
43 |
//#define DEBUG_TLB
|
44 |
//#define DEBUG_UNASSIGNED
|
45 |
|
46 |
/* make various TB consistency checks */
|
47 |
//#define DEBUG_TB_CHECK
|
48 |
//#define DEBUG_TLB_CHECK
|
49 |
|
50 |
//#define DEBUG_IOPORT
|
51 |
|
52 |
#if !defined(CONFIG_USER_ONLY)
|
53 |
/* TB consistency checks only implemented for usermode emulation. */
|
54 |
#undef DEBUG_TB_CHECK
|
55 |
#endif
|
56 |
|
57 |
/* threshold to flush the translated code buffer */
|
58 |
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
|
59 |
|
60 |
#define SMC_BITMAP_USE_THRESHOLD 10 |
61 |
|
62 |
#define MMAP_AREA_START 0x00000000 |
63 |
#define MMAP_AREA_END 0xa8000000 |
64 |
|
65 |
#if defined(TARGET_SPARC64)
|
66 |
#define TARGET_PHYS_ADDR_SPACE_BITS 41 |
67 |
#elif defined(TARGET_SPARC)
|
68 |
#define TARGET_PHYS_ADDR_SPACE_BITS 36 |
69 |
#elif defined(TARGET_ALPHA)
|
70 |
#define TARGET_PHYS_ADDR_SPACE_BITS 42 |
71 |
#define TARGET_VIRT_ADDR_SPACE_BITS 42 |
72 |
#elif defined(TARGET_PPC64)
|
73 |
#define TARGET_PHYS_ADDR_SPACE_BITS 42 |
74 |
#else
|
75 |
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
|
76 |
#define TARGET_PHYS_ADDR_SPACE_BITS 32 |
77 |
#endif
|
78 |
|
79 |
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
80 |
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
81 |
int nb_tbs;
|
82 |
/* any access to the tbs or the page table must use this lock */
|
83 |
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; |
84 |
|
85 |
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
|
86 |
uint8_t *code_gen_ptr; |
87 |
|
88 |
int phys_ram_size;
|
89 |
int phys_ram_fd;
|
90 |
uint8_t *phys_ram_base; |
91 |
uint8_t *phys_ram_dirty; |
92 |
static ram_addr_t phys_ram_alloc_offset = 0; |
93 |
|
94 |
CPUState *first_cpu; |
95 |
/* current CPU in the current thread. It is only valid inside
|
96 |
cpu_exec() */
|
97 |
CPUState *cpu_single_env; |
98 |
|
99 |
typedef struct PageDesc { |
100 |
/* list of TBs intersecting this ram page */
|
101 |
TranslationBlock *first_tb; |
102 |
/* in order to optimize self modifying code, we count the number
|
103 |
of lookups we do to a given page to use a bitmap */
|
104 |
unsigned int code_write_count; |
105 |
uint8_t *code_bitmap; |
106 |
#if defined(CONFIG_USER_ONLY)
|
107 |
unsigned long flags; |
108 |
#endif
|
109 |
} PageDesc; |
110 |
|
111 |
typedef struct PhysPageDesc { |
112 |
/* offset in host memory of the page + io_index in the low 12 bits */
|
113 |
uint32_t phys_offset; |
114 |
} PhysPageDesc; |
115 |
|
116 |
#define L2_BITS 10 |
117 |
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
|
118 |
/* XXX: this is a temporary hack for alpha target.
|
119 |
* In the future, this is to be replaced by a multi-level table
|
120 |
* to actually be able to handle the complete 64 bits address space.
|
121 |
*/
|
122 |
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
|
123 |
#else
|
124 |
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
125 |
#endif
|
126 |
|
127 |
#define L1_SIZE (1 << L1_BITS) |
128 |
#define L2_SIZE (1 << L2_BITS) |
129 |
|
130 |
static void io_mem_init(void); |
131 |
|
132 |
unsigned long qemu_real_host_page_size; |
133 |
unsigned long qemu_host_page_bits; |
134 |
unsigned long qemu_host_page_size; |
135 |
unsigned long qemu_host_page_mask; |
136 |
|
137 |
/* XXX: for system emulation, it could just be an array */
|
138 |
static PageDesc *l1_map[L1_SIZE];
|
139 |
PhysPageDesc **l1_phys_map; |
140 |
|
141 |
/* io memory support */
|
142 |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
|
143 |
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
|
144 |
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
|
145 |
static int io_mem_nb; |
146 |
#if defined(CONFIG_SOFTMMU)
|
147 |
static int io_mem_watch; |
148 |
#endif
|
149 |
|
150 |
/* log support */
|
151 |
char *logfilename = "/tmp/qemu.log"; |
152 |
FILE *logfile; |
153 |
int loglevel;
|
154 |
|
155 |
/* statistics */
|
156 |
static int tlb_flush_count; |
157 |
static int tb_flush_count; |
158 |
static int tb_phys_invalidate_count; |
159 |
|
160 |
static void page_init(void) |
161 |
{ |
162 |
/* NOTE: we can always suppose that qemu_host_page_size >=
|
163 |
TARGET_PAGE_SIZE */
|
164 |
#ifdef _WIN32
|
165 |
{ |
166 |
SYSTEM_INFO system_info; |
167 |
DWORD old_protect; |
168 |
|
169 |
GetSystemInfo(&system_info); |
170 |
qemu_real_host_page_size = system_info.dwPageSize; |
171 |
|
172 |
VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
|
173 |
PAGE_EXECUTE_READWRITE, &old_protect); |
174 |
} |
175 |
#else
|
176 |
qemu_real_host_page_size = getpagesize(); |
177 |
{ |
178 |
unsigned long start, end; |
179 |
|
180 |
start = (unsigned long)code_gen_buffer; |
181 |
start &= ~(qemu_real_host_page_size - 1);
|
182 |
|
183 |
end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); |
184 |
end += qemu_real_host_page_size - 1;
|
185 |
end &= ~(qemu_real_host_page_size - 1);
|
186 |
|
187 |
mprotect((void *)start, end - start,
|
188 |
PROT_READ | PROT_WRITE | PROT_EXEC); |
189 |
} |
190 |
#endif
|
191 |
|
192 |
if (qemu_host_page_size == 0) |
193 |
qemu_host_page_size = qemu_real_host_page_size; |
194 |
if (qemu_host_page_size < TARGET_PAGE_SIZE)
|
195 |
qemu_host_page_size = TARGET_PAGE_SIZE; |
196 |
qemu_host_page_bits = 0;
|
197 |
while ((1 << qemu_host_page_bits) < qemu_host_page_size) |
198 |
qemu_host_page_bits++; |
199 |
qemu_host_page_mask = ~(qemu_host_page_size - 1);
|
200 |
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
201 |
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); |
202 |
} |
203 |
|
204 |
static inline PageDesc *page_find_alloc(unsigned int index) |
205 |
{ |
206 |
PageDesc **lp, *p; |
207 |
|
208 |
lp = &l1_map[index >> L2_BITS]; |
209 |
p = *lp; |
210 |
if (!p) {
|
211 |
/* allocate if not found */
|
212 |
p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
|
213 |
memset(p, 0, sizeof(PageDesc) * L2_SIZE); |
214 |
*lp = p; |
215 |
} |
216 |
return p + (index & (L2_SIZE - 1)); |
217 |
} |
218 |
|
219 |
static inline PageDesc *page_find(unsigned int index) |
220 |
{ |
221 |
PageDesc *p; |
222 |
|
223 |
p = l1_map[index >> L2_BITS]; |
224 |
if (!p)
|
225 |
return 0; |
226 |
return p + (index & (L2_SIZE - 1)); |
227 |
} |
228 |
|
229 |
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
230 |
{ |
231 |
void **lp, **p;
|
232 |
PhysPageDesc *pd; |
233 |
|
234 |
p = (void **)l1_phys_map;
|
235 |
#if TARGET_PHYS_ADDR_SPACE_BITS > 32 |
236 |
|
237 |
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) |
238 |
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
|
239 |
#endif
|
240 |
lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
|
241 |
p = *lp; |
242 |
if (!p) {
|
243 |
/* allocate if not found */
|
244 |
if (!alloc)
|
245 |
return NULL; |
246 |
p = qemu_vmalloc(sizeof(void *) * L1_SIZE); |
247 |
memset(p, 0, sizeof(void *) * L1_SIZE); |
248 |
*lp = p; |
249 |
} |
250 |
#endif
|
251 |
lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
|
252 |
pd = *lp; |
253 |
if (!pd) {
|
254 |
int i;
|
255 |
/* allocate if not found */
|
256 |
if (!alloc)
|
257 |
return NULL; |
258 |
pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
|
259 |
*lp = pd; |
260 |
for (i = 0; i < L2_SIZE; i++) |
261 |
pd[i].phys_offset = IO_MEM_UNASSIGNED; |
262 |
} |
263 |
return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); |
264 |
} |
265 |
|
266 |
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
267 |
{ |
268 |
return phys_page_find_alloc(index, 0); |
269 |
} |
270 |
|
271 |
#if !defined(CONFIG_USER_ONLY)
|
272 |
static void tlb_protect_code(ram_addr_t ram_addr); |
273 |
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
274 |
target_ulong vaddr); |
275 |
#endif
|
276 |
|
277 |
void cpu_exec_init(CPUState *env)
|
278 |
{ |
279 |
CPUState **penv; |
280 |
int cpu_index;
|
281 |
|
282 |
if (!code_gen_ptr) {
|
283 |
code_gen_ptr = code_gen_buffer; |
284 |
page_init(); |
285 |
io_mem_init(); |
286 |
} |
287 |
env->next_cpu = NULL;
|
288 |
penv = &first_cpu; |
289 |
cpu_index = 0;
|
290 |
while (*penv != NULL) { |
291 |
penv = (CPUState **)&(*penv)->next_cpu; |
292 |
cpu_index++; |
293 |
} |
294 |
env->cpu_index = cpu_index; |
295 |
env->nb_watchpoints = 0;
|
296 |
*penv = env; |
297 |
} |
298 |
|
299 |
static inline void invalidate_page_bitmap(PageDesc *p) |
300 |
{ |
301 |
if (p->code_bitmap) {
|
302 |
qemu_free(p->code_bitmap); |
303 |
p->code_bitmap = NULL;
|
304 |
} |
305 |
p->code_write_count = 0;
|
306 |
} |
307 |
|
308 |
/* set to NULL all the 'first_tb' fields in all PageDescs */
|
309 |
static void page_flush_tb(void) |
310 |
{ |
311 |
int i, j;
|
312 |
PageDesc *p; |
313 |
|
314 |
for(i = 0; i < L1_SIZE; i++) { |
315 |
p = l1_map[i]; |
316 |
if (p) {
|
317 |
for(j = 0; j < L2_SIZE; j++) { |
318 |
p->first_tb = NULL;
|
319 |
invalidate_page_bitmap(p); |
320 |
p++; |
321 |
} |
322 |
} |
323 |
} |
324 |
} |
325 |
|
326 |
/* flush all the translation blocks */
|
327 |
/* XXX: tb_flush is currently not thread safe */
|
328 |
void tb_flush(CPUState *env1)
|
329 |
{ |
330 |
CPUState *env; |
331 |
#if defined(DEBUG_FLUSH)
|
332 |
printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
|
333 |
code_gen_ptr - code_gen_buffer, |
334 |
nb_tbs, |
335 |
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
336 |
#endif
|
337 |
nb_tbs = 0;
|
338 |
|
339 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
340 |
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
341 |
} |
342 |
|
343 |
memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
344 |
page_flush_tb(); |
345 |
|
346 |
code_gen_ptr = code_gen_buffer; |
347 |
/* XXX: flush processor icache at this point if cache flush is
|
348 |
expensive */
|
349 |
tb_flush_count++; |
350 |
} |
351 |
|
352 |
#ifdef DEBUG_TB_CHECK
|
353 |
|
354 |
static void tb_invalidate_check(target_ulong address) |
355 |
{ |
356 |
TranslationBlock *tb; |
357 |
int i;
|
358 |
address &= TARGET_PAGE_MASK; |
359 |
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
360 |
for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
361 |
if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
|
362 |
address >= tb->pc + tb->size)) { |
363 |
printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
|
364 |
address, (long)tb->pc, tb->size);
|
365 |
} |
366 |
} |
367 |
} |
368 |
} |
369 |
|
370 |
/* verify that all the pages have correct rights for code */
|
371 |
static void tb_page_check(void) |
372 |
{ |
373 |
TranslationBlock *tb; |
374 |
int i, flags1, flags2;
|
375 |
|
376 |
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
377 |
for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
378 |
flags1 = page_get_flags(tb->pc); |
379 |
flags2 = page_get_flags(tb->pc + tb->size - 1);
|
380 |
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
|
381 |
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
|
382 |
(long)tb->pc, tb->size, flags1, flags2);
|
383 |
} |
384 |
} |
385 |
} |
386 |
} |
387 |
|
388 |
void tb_jmp_check(TranslationBlock *tb)
|
389 |
{ |
390 |
TranslationBlock *tb1; |
391 |
unsigned int n1; |
392 |
|
393 |
/* suppress any remaining jumps to this TB */
|
394 |
tb1 = tb->jmp_first; |
395 |
for(;;) {
|
396 |
n1 = (long)tb1 & 3; |
397 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
398 |
if (n1 == 2) |
399 |
break;
|
400 |
tb1 = tb1->jmp_next[n1]; |
401 |
} |
402 |
/* check end of list */
|
403 |
if (tb1 != tb) {
|
404 |
printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); |
405 |
} |
406 |
} |
407 |
|
408 |
#endif
|
409 |
|
410 |
/* invalidate one TB */
|
411 |
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, |
412 |
int next_offset)
|
413 |
{ |
414 |
TranslationBlock *tb1; |
415 |
for(;;) {
|
416 |
tb1 = *ptb; |
417 |
if (tb1 == tb) {
|
418 |
*ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
|
419 |
break;
|
420 |
} |
421 |
ptb = (TranslationBlock **)((char *)tb1 + next_offset);
|
422 |
} |
423 |
} |
424 |
|
425 |
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
426 |
{ |
427 |
TranslationBlock *tb1; |
428 |
unsigned int n1; |
429 |
|
430 |
for(;;) {
|
431 |
tb1 = *ptb; |
432 |
n1 = (long)tb1 & 3; |
433 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
434 |
if (tb1 == tb) {
|
435 |
*ptb = tb1->page_next[n1]; |
436 |
break;
|
437 |
} |
438 |
ptb = &tb1->page_next[n1]; |
439 |
} |
440 |
} |
441 |
|
442 |
static inline void tb_jmp_remove(TranslationBlock *tb, int n) |
443 |
{ |
444 |
TranslationBlock *tb1, **ptb; |
445 |
unsigned int n1; |
446 |
|
447 |
ptb = &tb->jmp_next[n]; |
448 |
tb1 = *ptb; |
449 |
if (tb1) {
|
450 |
/* find tb(n) in circular list */
|
451 |
for(;;) {
|
452 |
tb1 = *ptb; |
453 |
n1 = (long)tb1 & 3; |
454 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
455 |
if (n1 == n && tb1 == tb)
|
456 |
break;
|
457 |
if (n1 == 2) { |
458 |
ptb = &tb1->jmp_first; |
459 |
} else {
|
460 |
ptb = &tb1->jmp_next[n1]; |
461 |
} |
462 |
} |
463 |
/* now we can suppress tb(n) from the list */
|
464 |
*ptb = tb->jmp_next[n]; |
465 |
|
466 |
tb->jmp_next[n] = NULL;
|
467 |
} |
468 |
} |
469 |
|
470 |
/* reset the jump entry 'n' of a TB so that it is not chained to
|
471 |
another TB */
|
472 |
static inline void tb_reset_jump(TranslationBlock *tb, int n) |
473 |
{ |
474 |
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
475 |
} |
476 |
|
477 |
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
478 |
{ |
479 |
CPUState *env; |
480 |
PageDesc *p; |
481 |
unsigned int h, n1; |
482 |
target_ulong phys_pc; |
483 |
TranslationBlock *tb1, *tb2; |
484 |
|
485 |
/* remove the TB from the hash list */
|
486 |
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
|
487 |
h = tb_phys_hash_func(phys_pc); |
488 |
tb_remove(&tb_phys_hash[h], tb, |
489 |
offsetof(TranslationBlock, phys_hash_next)); |
490 |
|
491 |
/* remove the TB from the page list */
|
492 |
if (tb->page_addr[0] != page_addr) { |
493 |
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
|
494 |
tb_page_remove(&p->first_tb, tb); |
495 |
invalidate_page_bitmap(p); |
496 |
} |
497 |
if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
498 |
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
|
499 |
tb_page_remove(&p->first_tb, tb); |
500 |
invalidate_page_bitmap(p); |
501 |
} |
502 |
|
503 |
tb_invalidated_flag = 1;
|
504 |
|
505 |
/* remove the TB from the hash list */
|
506 |
h = tb_jmp_cache_hash_func(tb->pc); |
507 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
508 |
if (env->tb_jmp_cache[h] == tb)
|
509 |
env->tb_jmp_cache[h] = NULL;
|
510 |
} |
511 |
|
512 |
/* suppress this TB from the two jump lists */
|
513 |
tb_jmp_remove(tb, 0);
|
514 |
tb_jmp_remove(tb, 1);
|
515 |
|
516 |
/* suppress any remaining jumps to this TB */
|
517 |
tb1 = tb->jmp_first; |
518 |
for(;;) {
|
519 |
n1 = (long)tb1 & 3; |
520 |
if (n1 == 2) |
521 |
break;
|
522 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
523 |
tb2 = tb1->jmp_next[n1]; |
524 |
tb_reset_jump(tb1, n1); |
525 |
tb1->jmp_next[n1] = NULL;
|
526 |
tb1 = tb2; |
527 |
} |
528 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
529 |
|
530 |
tb_phys_invalidate_count++; |
531 |
} |
532 |
|
533 |
static inline void set_bits(uint8_t *tab, int start, int len) |
534 |
{ |
535 |
int end, mask, end1;
|
536 |
|
537 |
end = start + len; |
538 |
tab += start >> 3;
|
539 |
mask = 0xff << (start & 7); |
540 |
if ((start & ~7) == (end & ~7)) { |
541 |
if (start < end) {
|
542 |
mask &= ~(0xff << (end & 7)); |
543 |
*tab |= mask; |
544 |
} |
545 |
} else {
|
546 |
*tab++ |= mask; |
547 |
start = (start + 8) & ~7; |
548 |
end1 = end & ~7;
|
549 |
while (start < end1) {
|
550 |
*tab++ = 0xff;
|
551 |
start += 8;
|
552 |
} |
553 |
if (start < end) {
|
554 |
mask = ~(0xff << (end & 7)); |
555 |
*tab |= mask; |
556 |
} |
557 |
} |
558 |
} |
559 |
|
560 |
static void build_page_bitmap(PageDesc *p) |
561 |
{ |
562 |
int n, tb_start, tb_end;
|
563 |
TranslationBlock *tb; |
564 |
|
565 |
p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
|
566 |
if (!p->code_bitmap)
|
567 |
return;
|
568 |
memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); |
569 |
|
570 |
tb = p->first_tb; |
571 |
while (tb != NULL) { |
572 |
n = (long)tb & 3; |
573 |
tb = (TranslationBlock *)((long)tb & ~3); |
574 |
/* NOTE: this is subtle as a TB may span two physical pages */
|
575 |
if (n == 0) { |
576 |
/* NOTE: tb_end may be after the end of the page, but
|
577 |
it is not a problem */
|
578 |
tb_start = tb->pc & ~TARGET_PAGE_MASK; |
579 |
tb_end = tb_start + tb->size; |
580 |
if (tb_end > TARGET_PAGE_SIZE)
|
581 |
tb_end = TARGET_PAGE_SIZE; |
582 |
} else {
|
583 |
tb_start = 0;
|
584 |
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); |
585 |
} |
586 |
set_bits(p->code_bitmap, tb_start, tb_end - tb_start); |
587 |
tb = tb->page_next[n]; |
588 |
} |
589 |
} |
590 |
|
591 |
#ifdef TARGET_HAS_PRECISE_SMC
|
592 |
|
593 |
static void tb_gen_code(CPUState *env, |
594 |
target_ulong pc, target_ulong cs_base, int flags,
|
595 |
int cflags)
|
596 |
{ |
597 |
TranslationBlock *tb; |
598 |
uint8_t *tc_ptr; |
599 |
target_ulong phys_pc, phys_page2, virt_page2; |
600 |
int code_gen_size;
|
601 |
|
602 |
phys_pc = get_phys_addr_code(env, pc); |
603 |
tb = tb_alloc(pc); |
604 |
if (!tb) {
|
605 |
/* flush must be done */
|
606 |
tb_flush(env); |
607 |
/* cannot fail at this point */
|
608 |
tb = tb_alloc(pc); |
609 |
} |
610 |
tc_ptr = code_gen_ptr; |
611 |
tb->tc_ptr = tc_ptr; |
612 |
tb->cs_base = cs_base; |
613 |
tb->flags = flags; |
614 |
tb->cflags = cflags; |
615 |
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); |
616 |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
617 |
|
618 |
/* check next page if needed */
|
619 |
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
|
620 |
phys_page2 = -1;
|
621 |
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
|
622 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
623 |
} |
624 |
tb_link_phys(tb, phys_pc, phys_page2); |
625 |
} |
626 |
#endif
|
627 |
|
628 |
/* invalidate all TBs which intersect with the target physical page
|
629 |
starting in range [start;end[. NOTE: start and end must refer to
|
630 |
the same physical page. 'is_cpu_write_access' should be true if called
|
631 |
from a real cpu write access: the virtual CPU will exit the current
|
632 |
TB if code is modified inside this TB. */
|
633 |
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
634 |
int is_cpu_write_access)
|
635 |
{ |
636 |
int n, current_tb_modified, current_tb_not_found, current_flags;
|
637 |
CPUState *env = cpu_single_env; |
638 |
PageDesc *p; |
639 |
TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; |
640 |
target_ulong tb_start, tb_end; |
641 |
target_ulong current_pc, current_cs_base; |
642 |
|
643 |
p = page_find(start >> TARGET_PAGE_BITS); |
644 |
if (!p)
|
645 |
return;
|
646 |
if (!p->code_bitmap &&
|
647 |
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && |
648 |
is_cpu_write_access) { |
649 |
/* build code bitmap */
|
650 |
build_page_bitmap(p); |
651 |
} |
652 |
|
653 |
/* we remove all the TBs in the range [start, end[ */
|
654 |
/* XXX: see if in some cases it could be faster to invalidate all the code */
|
655 |
current_tb_not_found = is_cpu_write_access; |
656 |
current_tb_modified = 0;
|
657 |
current_tb = NULL; /* avoid warning */ |
658 |
current_pc = 0; /* avoid warning */ |
659 |
current_cs_base = 0; /* avoid warning */ |
660 |
current_flags = 0; /* avoid warning */ |
661 |
tb = p->first_tb; |
662 |
while (tb != NULL) { |
663 |
n = (long)tb & 3; |
664 |
tb = (TranslationBlock *)((long)tb & ~3); |
665 |
tb_next = tb->page_next[n]; |
666 |
/* NOTE: this is subtle as a TB may span two physical pages */
|
667 |
if (n == 0) { |
668 |
/* NOTE: tb_end may be after the end of the page, but
|
669 |
it is not a problem */
|
670 |
tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
|
671 |
tb_end = tb_start + tb->size; |
672 |
} else {
|
673 |
tb_start = tb->page_addr[1];
|
674 |
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); |
675 |
} |
676 |
if (!(tb_end <= start || tb_start >= end)) {
|
677 |
#ifdef TARGET_HAS_PRECISE_SMC
|
678 |
if (current_tb_not_found) {
|
679 |
current_tb_not_found = 0;
|
680 |
current_tb = NULL;
|
681 |
if (env->mem_write_pc) {
|
682 |
/* now we have a real cpu fault */
|
683 |
current_tb = tb_find_pc(env->mem_write_pc); |
684 |
} |
685 |
} |
686 |
if (current_tb == tb &&
|
687 |
!(current_tb->cflags & CF_SINGLE_INSN)) { |
688 |
/* If we are modifying the current TB, we must stop
|
689 |
its execution. We could be more precise by checking
|
690 |
that the modification is after the current PC, but it
|
691 |
would require a specialized function to partially
|
692 |
restore the CPU state */
|
693 |
|
694 |
current_tb_modified = 1;
|
695 |
cpu_restore_state(current_tb, env, |
696 |
env->mem_write_pc, NULL);
|
697 |
#if defined(TARGET_I386)
|
698 |
current_flags = env->hflags; |
699 |
current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
700 |
current_cs_base = (target_ulong)env->segs[R_CS].base; |
701 |
current_pc = current_cs_base + env->eip; |
702 |
#else
|
703 |
#error unsupported CPU
|
704 |
#endif
|
705 |
} |
706 |
#endif /* TARGET_HAS_PRECISE_SMC */ |
707 |
/* we need to do that to handle the case where a signal
|
708 |
occurs while doing tb_phys_invalidate() */
|
709 |
saved_tb = NULL;
|
710 |
if (env) {
|
711 |
saved_tb = env->current_tb; |
712 |
env->current_tb = NULL;
|
713 |
} |
714 |
tb_phys_invalidate(tb, -1);
|
715 |
if (env) {
|
716 |
env->current_tb = saved_tb; |
717 |
if (env->interrupt_request && env->current_tb)
|
718 |
cpu_interrupt(env, env->interrupt_request); |
719 |
} |
720 |
} |
721 |
tb = tb_next; |
722 |
} |
723 |
#if !defined(CONFIG_USER_ONLY)
|
724 |
/* if no code remaining, no need to continue to use slow writes */
|
725 |
if (!p->first_tb) {
|
726 |
invalidate_page_bitmap(p); |
727 |
if (is_cpu_write_access) {
|
728 |
tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); |
729 |
} |
730 |
} |
731 |
#endif
|
732 |
#ifdef TARGET_HAS_PRECISE_SMC
|
733 |
if (current_tb_modified) {
|
734 |
/* we generate a block containing just the instruction
|
735 |
modifying the memory. It will ensure that it cannot modify
|
736 |
itself */
|
737 |
env->current_tb = NULL;
|
738 |
tb_gen_code(env, current_pc, current_cs_base, current_flags, |
739 |
CF_SINGLE_INSN); |
740 |
cpu_resume_from_signal(env, NULL);
|
741 |
} |
742 |
#endif
|
743 |
} |
744 |
|
745 |
/* len must be <= 8 and start must be a multiple of len */
|
746 |
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) |
747 |
{ |
748 |
PageDesc *p; |
749 |
int offset, b;
|
750 |
#if 0
|
751 |
if (1) {
|
752 |
if (loglevel) {
|
753 |
fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
|
754 |
cpu_single_env->mem_write_vaddr, len,
|
755 |
cpu_single_env->eip,
|
756 |
cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
|
757 |
}
|
758 |
}
|
759 |
#endif
|
760 |
p = page_find(start >> TARGET_PAGE_BITS); |
761 |
if (!p)
|
762 |
return;
|
763 |
if (p->code_bitmap) {
|
764 |
offset = start & ~TARGET_PAGE_MASK; |
765 |
b = p->code_bitmap[offset >> 3] >> (offset & 7); |
766 |
if (b & ((1 << len) - 1)) |
767 |
goto do_invalidate;
|
768 |
} else {
|
769 |
do_invalidate:
|
770 |
tb_invalidate_phys_page_range(start, start + len, 1);
|
771 |
} |
772 |
} |
773 |
|
774 |
#if !defined(CONFIG_SOFTMMU)
|
775 |
static void tb_invalidate_phys_page(target_ulong addr, |
776 |
unsigned long pc, void *puc) |
777 |
{ |
778 |
int n, current_flags, current_tb_modified;
|
779 |
target_ulong current_pc, current_cs_base; |
780 |
PageDesc *p; |
781 |
TranslationBlock *tb, *current_tb; |
782 |
#ifdef TARGET_HAS_PRECISE_SMC
|
783 |
CPUState *env = cpu_single_env; |
784 |
#endif
|
785 |
|
786 |
addr &= TARGET_PAGE_MASK; |
787 |
p = page_find(addr >> TARGET_PAGE_BITS); |
788 |
if (!p)
|
789 |
return;
|
790 |
tb = p->first_tb; |
791 |
current_tb_modified = 0;
|
792 |
current_tb = NULL;
|
793 |
current_pc = 0; /* avoid warning */ |
794 |
current_cs_base = 0; /* avoid warning */ |
795 |
current_flags = 0; /* avoid warning */ |
796 |
#ifdef TARGET_HAS_PRECISE_SMC
|
797 |
if (tb && pc != 0) { |
798 |
current_tb = tb_find_pc(pc); |
799 |
} |
800 |
#endif
|
801 |
while (tb != NULL) { |
802 |
n = (long)tb & 3; |
803 |
tb = (TranslationBlock *)((long)tb & ~3); |
804 |
#ifdef TARGET_HAS_PRECISE_SMC
|
805 |
if (current_tb == tb &&
|
806 |
!(current_tb->cflags & CF_SINGLE_INSN)) { |
807 |
/* If we are modifying the current TB, we must stop
|
808 |
its execution. We could be more precise by checking
|
809 |
that the modification is after the current PC, but it
|
810 |
would require a specialized function to partially
|
811 |
restore the CPU state */
|
812 |
|
813 |
current_tb_modified = 1;
|
814 |
cpu_restore_state(current_tb, env, pc, puc); |
815 |
#if defined(TARGET_I386)
|
816 |
current_flags = env->hflags; |
817 |
current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
818 |
current_cs_base = (target_ulong)env->segs[R_CS].base; |
819 |
current_pc = current_cs_base + env->eip; |
820 |
#else
|
821 |
#error unsupported CPU
|
822 |
#endif
|
823 |
} |
824 |
#endif /* TARGET_HAS_PRECISE_SMC */ |
825 |
tb_phys_invalidate(tb, addr); |
826 |
tb = tb->page_next[n]; |
827 |
} |
828 |
p->first_tb = NULL;
|
829 |
#ifdef TARGET_HAS_PRECISE_SMC
|
830 |
if (current_tb_modified) {
|
831 |
/* we generate a block containing just the instruction
|
832 |
modifying the memory. It will ensure that it cannot modify
|
833 |
itself */
|
834 |
env->current_tb = NULL;
|
835 |
tb_gen_code(env, current_pc, current_cs_base, current_flags, |
836 |
CF_SINGLE_INSN); |
837 |
cpu_resume_from_signal(env, puc); |
838 |
} |
839 |
#endif
|
840 |
} |
841 |
#endif
|
842 |
|
843 |
/* add the tb in the target page and protect it if necessary */
|
844 |
static inline void tb_alloc_page(TranslationBlock *tb, |
845 |
unsigned int n, target_ulong page_addr) |
846 |
{ |
847 |
PageDesc *p; |
848 |
TranslationBlock *last_first_tb; |
849 |
|
850 |
tb->page_addr[n] = page_addr; |
851 |
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); |
852 |
tb->page_next[n] = p->first_tb; |
853 |
last_first_tb = p->first_tb; |
854 |
p->first_tb = (TranslationBlock *)((long)tb | n);
|
855 |
invalidate_page_bitmap(p); |
856 |
|
857 |
#if defined(TARGET_HAS_SMC) || 1 |
858 |
|
859 |
#if defined(CONFIG_USER_ONLY)
|
860 |
if (p->flags & PAGE_WRITE) {
|
861 |
target_ulong addr; |
862 |
PageDesc *p2; |
863 |
int prot;
|
864 |
|
865 |
/* force the host page as non writable (writes will have a
|
866 |
page fault + mprotect overhead) */
|
867 |
page_addr &= qemu_host_page_mask; |
868 |
prot = 0;
|
869 |
for(addr = page_addr; addr < page_addr + qemu_host_page_size;
|
870 |
addr += TARGET_PAGE_SIZE) { |
871 |
|
872 |
p2 = page_find (addr >> TARGET_PAGE_BITS); |
873 |
if (!p2)
|
874 |
continue;
|
875 |
prot |= p2->flags; |
876 |
p2->flags &= ~PAGE_WRITE; |
877 |
page_get_flags(addr); |
878 |
} |
879 |
mprotect(g2h(page_addr), qemu_host_page_size, |
880 |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
881 |
#ifdef DEBUG_TB_INVALIDATE
|
882 |
printf("protecting code page: 0x%08lx\n",
|
883 |
page_addr); |
884 |
#endif
|
885 |
} |
886 |
#else
|
887 |
/* if some code is already present, then the pages are already
|
888 |
protected. So we handle the case where only the first TB is
|
889 |
allocated in a physical page */
|
890 |
if (!last_first_tb) {
|
891 |
tlb_protect_code(page_addr); |
892 |
} |
893 |
#endif
|
894 |
|
895 |
#endif /* TARGET_HAS_SMC */ |
896 |
} |
897 |
|
898 |
/* Allocate a new translation block. Flush the translation buffer if
|
899 |
too many translation blocks or too much generated code. */
|
900 |
TranslationBlock *tb_alloc(target_ulong pc) |
901 |
{ |
902 |
TranslationBlock *tb; |
903 |
|
904 |
if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
|
905 |
(code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) |
906 |
return NULL; |
907 |
tb = &tbs[nb_tbs++]; |
908 |
tb->pc = pc; |
909 |
tb->cflags = 0;
|
910 |
return tb;
|
911 |
} |
912 |
|
913 |
/* add a new TB and link it to the physical page tables. phys_page2 is
|
914 |
(-1) to indicate that only one page contains the TB. */
|
915 |
void tb_link_phys(TranslationBlock *tb,
|
916 |
target_ulong phys_pc, target_ulong phys_page2) |
917 |
{ |
918 |
unsigned int h; |
919 |
TranslationBlock **ptb; |
920 |
|
921 |
/* add in the physical hash table */
|
922 |
h = tb_phys_hash_func(phys_pc); |
923 |
ptb = &tb_phys_hash[h]; |
924 |
tb->phys_hash_next = *ptb; |
925 |
*ptb = tb; |
926 |
|
927 |
/* add in the page list */
|
928 |
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
|
929 |
if (phys_page2 != -1) |
930 |
tb_alloc_page(tb, 1, phys_page2);
|
931 |
else
|
932 |
tb->page_addr[1] = -1; |
933 |
|
934 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
935 |
tb->jmp_next[0] = NULL; |
936 |
tb->jmp_next[1] = NULL; |
937 |
#ifdef USE_CODE_COPY
|
938 |
tb->cflags &= ~CF_FP_USED; |
939 |
if (tb->cflags & CF_TB_FP_USED)
|
940 |
tb->cflags |= CF_FP_USED; |
941 |
#endif
|
942 |
|
943 |
/* init original jump addresses */
|
944 |
if (tb->tb_next_offset[0] != 0xffff) |
945 |
tb_reset_jump(tb, 0);
|
946 |
if (tb->tb_next_offset[1] != 0xffff) |
947 |
tb_reset_jump(tb, 1);
|
948 |
|
949 |
#ifdef DEBUG_TB_CHECK
|
950 |
tb_page_check(); |
951 |
#endif
|
952 |
} |
953 |
|
954 |
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
|
955 |
tb[1].tc_ptr. Return NULL if not found */
|
956 |
TranslationBlock *tb_find_pc(unsigned long tc_ptr) |
957 |
{ |
958 |
int m_min, m_max, m;
|
959 |
unsigned long v; |
960 |
TranslationBlock *tb; |
961 |
|
962 |
if (nb_tbs <= 0) |
963 |
return NULL; |
964 |
if (tc_ptr < (unsigned long)code_gen_buffer || |
965 |
tc_ptr >= (unsigned long)code_gen_ptr) |
966 |
return NULL; |
967 |
/* binary search (cf Knuth) */
|
968 |
m_min = 0;
|
969 |
m_max = nb_tbs - 1;
|
970 |
while (m_min <= m_max) {
|
971 |
m = (m_min + m_max) >> 1;
|
972 |
tb = &tbs[m]; |
973 |
v = (unsigned long)tb->tc_ptr; |
974 |
if (v == tc_ptr)
|
975 |
return tb;
|
976 |
else if (tc_ptr < v) { |
977 |
m_max = m - 1;
|
978 |
} else {
|
979 |
m_min = m + 1;
|
980 |
} |
981 |
} |
982 |
return &tbs[m_max];
|
983 |
} |
984 |
|
985 |
static void tb_reset_jump_recursive(TranslationBlock *tb); |
986 |
|
987 |
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) |
988 |
{ |
989 |
TranslationBlock *tb1, *tb_next, **ptb; |
990 |
unsigned int n1; |
991 |
|
992 |
tb1 = tb->jmp_next[n]; |
993 |
if (tb1 != NULL) { |
994 |
/* find head of list */
|
995 |
for(;;) {
|
996 |
n1 = (long)tb1 & 3; |
997 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
998 |
if (n1 == 2) |
999 |
break;
|
1000 |
tb1 = tb1->jmp_next[n1]; |
1001 |
} |
1002 |
/* we are now sure now that tb jumps to tb1 */
|
1003 |
tb_next = tb1; |
1004 |
|
1005 |
/* remove tb from the jmp_first list */
|
1006 |
ptb = &tb_next->jmp_first; |
1007 |
for(;;) {
|
1008 |
tb1 = *ptb; |
1009 |
n1 = (long)tb1 & 3; |
1010 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
1011 |
if (n1 == n && tb1 == tb)
|
1012 |
break;
|
1013 |
ptb = &tb1->jmp_next[n1]; |
1014 |
} |
1015 |
*ptb = tb->jmp_next[n]; |
1016 |
tb->jmp_next[n] = NULL;
|
1017 |
|
1018 |
/* suppress the jump to next tb in generated code */
|
1019 |
tb_reset_jump(tb, n); |
1020 |
|
1021 |
/* suppress jumps in the tb on which we could have jumped */
|
1022 |
tb_reset_jump_recursive(tb_next); |
1023 |
} |
1024 |
} |
1025 |
|
1026 |
static void tb_reset_jump_recursive(TranslationBlock *tb) |
1027 |
{ |
1028 |
tb_reset_jump_recursive2(tb, 0);
|
1029 |
tb_reset_jump_recursive2(tb, 1);
|
1030 |
} |
1031 |
|
1032 |
#if defined(TARGET_HAS_ICE)
|
1033 |
static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
1034 |
{ |
1035 |
target_phys_addr_t addr; |
1036 |
target_ulong pd; |
1037 |
ram_addr_t ram_addr; |
1038 |
PhysPageDesc *p; |
1039 |
|
1040 |
addr = cpu_get_phys_page_debug(env, pc); |
1041 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
1042 |
if (!p) {
|
1043 |
pd = IO_MEM_UNASSIGNED; |
1044 |
} else {
|
1045 |
pd = p->phys_offset; |
1046 |
} |
1047 |
ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); |
1048 |
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
1049 |
} |
1050 |
#endif
|
1051 |
|
1052 |
/* Add a watchpoint. */
|
1053 |
int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
|
1054 |
{ |
1055 |
int i;
|
1056 |
|
1057 |
for (i = 0; i < env->nb_watchpoints; i++) { |
1058 |
if (addr == env->watchpoint[i].vaddr)
|
1059 |
return 0; |
1060 |
} |
1061 |
if (env->nb_watchpoints >= MAX_WATCHPOINTS)
|
1062 |
return -1; |
1063 |
|
1064 |
i = env->nb_watchpoints++; |
1065 |
env->watchpoint[i].vaddr = addr; |
1066 |
tlb_flush_page(env, addr); |
1067 |
/* FIXME: This flush is needed because of the hack to make memory ops
|
1068 |
terminate the TB. It can be removed once the proper IO trap and
|
1069 |
re-execute bits are in. */
|
1070 |
tb_flush(env); |
1071 |
return i;
|
1072 |
} |
1073 |
|
1074 |
/* Remove a watchpoint. */
|
1075 |
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
|
1076 |
{ |
1077 |
int i;
|
1078 |
|
1079 |
for (i = 0; i < env->nb_watchpoints; i++) { |
1080 |
if (addr == env->watchpoint[i].vaddr) {
|
1081 |
env->nb_watchpoints--; |
1082 |
env->watchpoint[i] = env->watchpoint[env->nb_watchpoints]; |
1083 |
tlb_flush_page(env, addr); |
1084 |
return 0; |
1085 |
} |
1086 |
} |
1087 |
return -1; |
1088 |
} |
1089 |
|
1090 |
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
|
1091 |
breakpoint is reached */
|
1092 |
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
|
1093 |
{ |
1094 |
#if defined(TARGET_HAS_ICE)
|
1095 |
int i;
|
1096 |
|
1097 |
for(i = 0; i < env->nb_breakpoints; i++) { |
1098 |
if (env->breakpoints[i] == pc)
|
1099 |
return 0; |
1100 |
} |
1101 |
|
1102 |
if (env->nb_breakpoints >= MAX_BREAKPOINTS)
|
1103 |
return -1; |
1104 |
env->breakpoints[env->nb_breakpoints++] = pc; |
1105 |
|
1106 |
breakpoint_invalidate(env, pc); |
1107 |
return 0; |
1108 |
#else
|
1109 |
return -1; |
1110 |
#endif
|
1111 |
} |
1112 |
|
1113 |
/* remove a breakpoint */
|
1114 |
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
|
1115 |
{ |
1116 |
#if defined(TARGET_HAS_ICE)
|
1117 |
int i;
|
1118 |
for(i = 0; i < env->nb_breakpoints; i++) { |
1119 |
if (env->breakpoints[i] == pc)
|
1120 |
goto found;
|
1121 |
} |
1122 |
return -1; |
1123 |
found:
|
1124 |
env->nb_breakpoints--; |
1125 |
if (i < env->nb_breakpoints)
|
1126 |
env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; |
1127 |
|
1128 |
breakpoint_invalidate(env, pc); |
1129 |
return 0; |
1130 |
#else
|
1131 |
return -1; |
1132 |
#endif
|
1133 |
} |
1134 |
|
1135 |
/* enable or disable single step mode. EXCP_DEBUG is returned by the
|
1136 |
CPU loop after each instruction */
|
1137 |
void cpu_single_step(CPUState *env, int enabled) |
1138 |
{ |
1139 |
#if defined(TARGET_HAS_ICE)
|
1140 |
if (env->singlestep_enabled != enabled) {
|
1141 |
env->singlestep_enabled = enabled; |
1142 |
/* must flush all the translated code to avoid inconsistancies */
|
1143 |
/* XXX: only flush what is necessary */
|
1144 |
tb_flush(env); |
1145 |
} |
1146 |
#endif
|
1147 |
} |
1148 |
|
1149 |
/* enable or disable low levels log */
|
1150 |
void cpu_set_log(int log_flags) |
1151 |
{ |
1152 |
loglevel = log_flags; |
1153 |
if (loglevel && !logfile) {
|
1154 |
logfile = fopen(logfilename, "w");
|
1155 |
if (!logfile) {
|
1156 |
perror(logfilename); |
1157 |
_exit(1);
|
1158 |
} |
1159 |
#if !defined(CONFIG_SOFTMMU)
|
1160 |
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
|
1161 |
{ |
1162 |
static uint8_t logfile_buf[4096]; |
1163 |
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
|
1164 |
} |
1165 |
#else
|
1166 |
setvbuf(logfile, NULL, _IOLBF, 0); |
1167 |
#endif
|
1168 |
} |
1169 |
} |
1170 |
|
1171 |
void cpu_set_log_filename(const char *filename) |
1172 |
{ |
1173 |
logfilename = strdup(filename); |
1174 |
} |
1175 |
|
1176 |
/* mask must never be zero, except for A20 change call */
|
1177 |
void cpu_interrupt(CPUState *env, int mask) |
1178 |
{ |
1179 |
TranslationBlock *tb; |
1180 |
static int interrupt_lock; |
1181 |
|
1182 |
env->interrupt_request |= mask; |
1183 |
/* if the cpu is currently executing code, we must unlink it and
|
1184 |
all the potentially executing TB */
|
1185 |
tb = env->current_tb; |
1186 |
if (tb && !testandset(&interrupt_lock)) {
|
1187 |
env->current_tb = NULL;
|
1188 |
tb_reset_jump_recursive(tb); |
1189 |
interrupt_lock = 0;
|
1190 |
} |
1191 |
} |
1192 |
|
1193 |
void cpu_reset_interrupt(CPUState *env, int mask) |
1194 |
{ |
1195 |
env->interrupt_request &= ~mask; |
1196 |
} |
1197 |
|
1198 |
CPULogItem cpu_log_items[] = { |
1199 |
{ CPU_LOG_TB_OUT_ASM, "out_asm",
|
1200 |
"show generated host assembly code for each compiled TB" },
|
1201 |
{ CPU_LOG_TB_IN_ASM, "in_asm",
|
1202 |
"show target assembly code for each compiled TB" },
|
1203 |
{ CPU_LOG_TB_OP, "op",
|
1204 |
"show micro ops for each compiled TB (only usable if 'in_asm' used)" },
|
1205 |
#ifdef TARGET_I386
|
1206 |
{ CPU_LOG_TB_OP_OPT, "op_opt",
|
1207 |
"show micro ops after optimization for each compiled TB" },
|
1208 |
#endif
|
1209 |
{ CPU_LOG_INT, "int",
|
1210 |
"show interrupts/exceptions in short format" },
|
1211 |
{ CPU_LOG_EXEC, "exec",
|
1212 |
"show trace before each executed TB (lots of logs)" },
|
1213 |
{ CPU_LOG_TB_CPU, "cpu",
|
1214 |
"show CPU state before bloc translation" },
|
1215 |
#ifdef TARGET_I386
|
1216 |
{ CPU_LOG_PCALL, "pcall",
|
1217 |
"show protected mode far calls/returns/exceptions" },
|
1218 |
#endif
|
1219 |
#ifdef DEBUG_IOPORT
|
1220 |
{ CPU_LOG_IOPORT, "ioport",
|
1221 |
"show all i/o ports accesses" },
|
1222 |
#endif
|
1223 |
{ 0, NULL, NULL }, |
1224 |
}; |
1225 |
|
1226 |
static int cmp1(const char *s1, int n, const char *s2) |
1227 |
{ |
1228 |
if (strlen(s2) != n)
|
1229 |
return 0; |
1230 |
return memcmp(s1, s2, n) == 0; |
1231 |
} |
1232 |
|
1233 |
/* takes a comma separated list of log masks. Return 0 if error. */
|
1234 |
int cpu_str_to_log_mask(const char *str) |
1235 |
{ |
1236 |
CPULogItem *item; |
1237 |
int mask;
|
1238 |
const char *p, *p1; |
1239 |
|
1240 |
p = str; |
1241 |
mask = 0;
|
1242 |
for(;;) {
|
1243 |
p1 = strchr(p, ',');
|
1244 |
if (!p1)
|
1245 |
p1 = p + strlen(p); |
1246 |
if(cmp1(p,p1-p,"all")) { |
1247 |
for(item = cpu_log_items; item->mask != 0; item++) { |
1248 |
mask |= item->mask; |
1249 |
} |
1250 |
} else {
|
1251 |
for(item = cpu_log_items; item->mask != 0; item++) { |
1252 |
if (cmp1(p, p1 - p, item->name))
|
1253 |
goto found;
|
1254 |
} |
1255 |
return 0; |
1256 |
} |
1257 |
found:
|
1258 |
mask |= item->mask; |
1259 |
if (*p1 != ',') |
1260 |
break;
|
1261 |
p = p1 + 1;
|
1262 |
} |
1263 |
return mask;
|
1264 |
} |
1265 |
|
1266 |
void cpu_abort(CPUState *env, const char *fmt, ...) |
1267 |
{ |
1268 |
va_list ap; |
1269 |
|
1270 |
va_start(ap, fmt); |
1271 |
fprintf(stderr, "qemu: fatal: ");
|
1272 |
vfprintf(stderr, fmt, ap); |
1273 |
fprintf(stderr, "\n");
|
1274 |
#ifdef TARGET_I386
|
1275 |
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); |
1276 |
#else
|
1277 |
cpu_dump_state(env, stderr, fprintf, 0);
|
1278 |
#endif
|
1279 |
va_end(ap); |
1280 |
abort(); |
1281 |
} |
1282 |
|
1283 |
CPUState *cpu_copy(CPUState *env) |
1284 |
{ |
1285 |
CPUState *new_env = cpu_init(); |
1286 |
/* preserve chaining and index */
|
1287 |
CPUState *next_cpu = new_env->next_cpu; |
1288 |
int cpu_index = new_env->cpu_index;
|
1289 |
memcpy(new_env, env, sizeof(CPUState));
|
1290 |
new_env->next_cpu = next_cpu; |
1291 |
new_env->cpu_index = cpu_index; |
1292 |
return new_env;
|
1293 |
} |
1294 |
|
1295 |
#if !defined(CONFIG_USER_ONLY)
|
1296 |
|
1297 |
/* NOTE: if flush_global is true, also flush global entries (not
|
1298 |
implemented yet) */
|
1299 |
void tlb_flush(CPUState *env, int flush_global) |
1300 |
{ |
1301 |
int i;
|
1302 |
|
1303 |
#if defined(DEBUG_TLB)
|
1304 |
printf("tlb_flush:\n");
|
1305 |
#endif
|
1306 |
/* must reset current TB so that interrupts cannot modify the
|
1307 |
links while we are modifying them */
|
1308 |
env->current_tb = NULL;
|
1309 |
|
1310 |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
1311 |
env->tlb_table[0][i].addr_read = -1; |
1312 |
env->tlb_table[0][i].addr_write = -1; |
1313 |
env->tlb_table[0][i].addr_code = -1; |
1314 |
env->tlb_table[1][i].addr_read = -1; |
1315 |
env->tlb_table[1][i].addr_write = -1; |
1316 |
env->tlb_table[1][i].addr_code = -1; |
1317 |
#if (NB_MMU_MODES >= 3) |
1318 |
env->tlb_table[2][i].addr_read = -1; |
1319 |
env->tlb_table[2][i].addr_write = -1; |
1320 |
env->tlb_table[2][i].addr_code = -1; |
1321 |
#if (NB_MMU_MODES == 4) |
1322 |
env->tlb_table[3][i].addr_read = -1; |
1323 |
env->tlb_table[3][i].addr_write = -1; |
1324 |
env->tlb_table[3][i].addr_code = -1; |
1325 |
#endif
|
1326 |
#endif
|
1327 |
} |
1328 |
|
1329 |
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
1330 |
|
1331 |
#if !defined(CONFIG_SOFTMMU)
|
1332 |
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
|
1333 |
#endif
|
1334 |
#ifdef USE_KQEMU
|
1335 |
if (env->kqemu_enabled) {
|
1336 |
kqemu_flush(env, flush_global); |
1337 |
} |
1338 |
#endif
|
1339 |
tlb_flush_count++; |
1340 |
} |
1341 |
|
1342 |
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
1343 |
{ |
1344 |
if (addr == (tlb_entry->addr_read &
|
1345 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
1346 |
addr == (tlb_entry->addr_write & |
1347 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
1348 |
addr == (tlb_entry->addr_code & |
1349 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
1350 |
tlb_entry->addr_read = -1;
|
1351 |
tlb_entry->addr_write = -1;
|
1352 |
tlb_entry->addr_code = -1;
|
1353 |
} |
1354 |
} |
1355 |
|
1356 |
void tlb_flush_page(CPUState *env, target_ulong addr)
|
1357 |
{ |
1358 |
int i;
|
1359 |
TranslationBlock *tb; |
1360 |
|
1361 |
#if defined(DEBUG_TLB)
|
1362 |
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
1363 |
#endif
|
1364 |
/* must reset current TB so that interrupts cannot modify the
|
1365 |
links while we are modifying them */
|
1366 |
env->current_tb = NULL;
|
1367 |
|
1368 |
addr &= TARGET_PAGE_MASK; |
1369 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1370 |
tlb_flush_entry(&env->tlb_table[0][i], addr);
|
1371 |
tlb_flush_entry(&env->tlb_table[1][i], addr);
|
1372 |
#if (NB_MMU_MODES >= 3) |
1373 |
tlb_flush_entry(&env->tlb_table[2][i], addr);
|
1374 |
#if (NB_MMU_MODES == 4) |
1375 |
tlb_flush_entry(&env->tlb_table[3][i], addr);
|
1376 |
#endif
|
1377 |
#endif
|
1378 |
|
1379 |
/* Discard jump cache entries for any tb which might potentially
|
1380 |
overlap the flushed page. */
|
1381 |
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); |
1382 |
memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); |
1383 |
|
1384 |
i = tb_jmp_cache_hash_page(addr); |
1385 |
memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); |
1386 |
|
1387 |
#if !defined(CONFIG_SOFTMMU)
|
1388 |
if (addr < MMAP_AREA_END)
|
1389 |
munmap((void *)addr, TARGET_PAGE_SIZE);
|
1390 |
#endif
|
1391 |
#ifdef USE_KQEMU
|
1392 |
if (env->kqemu_enabled) {
|
1393 |
kqemu_flush_page(env, addr); |
1394 |
} |
1395 |
#endif
|
1396 |
} |
1397 |
|
1398 |
/* update the TLBs so that writes to code in the virtual page 'addr'
|
1399 |
can be detected */
|
1400 |
static void tlb_protect_code(ram_addr_t ram_addr) |
1401 |
{ |
1402 |
cpu_physical_memory_reset_dirty(ram_addr, |
1403 |
ram_addr + TARGET_PAGE_SIZE, |
1404 |
CODE_DIRTY_FLAG); |
1405 |
} |
1406 |
|
1407 |
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
1408 |
tested for self modifying code */
|
1409 |
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
1410 |
target_ulong vaddr) |
1411 |
{ |
1412 |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; |
1413 |
} |
1414 |
|
1415 |
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
1416 |
unsigned long start, unsigned long length) |
1417 |
{ |
1418 |
unsigned long addr; |
1419 |
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
|
1420 |
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
1421 |
if ((addr - start) < length) {
|
1422 |
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
1423 |
} |
1424 |
} |
1425 |
} |
1426 |
|
1427 |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
1428 |
int dirty_flags)
|
1429 |
{ |
1430 |
CPUState *env; |
1431 |
unsigned long length, start1; |
1432 |
int i, mask, len;
|
1433 |
uint8_t *p; |
1434 |
|
1435 |
start &= TARGET_PAGE_MASK; |
1436 |
end = TARGET_PAGE_ALIGN(end); |
1437 |
|
1438 |
length = end - start; |
1439 |
if (length == 0) |
1440 |
return;
|
1441 |
len = length >> TARGET_PAGE_BITS; |
1442 |
#ifdef USE_KQEMU
|
1443 |
/* XXX: should not depend on cpu context */
|
1444 |
env = first_cpu; |
1445 |
if (env->kqemu_enabled) {
|
1446 |
ram_addr_t addr; |
1447 |
addr = start; |
1448 |
for(i = 0; i < len; i++) { |
1449 |
kqemu_set_notdirty(env, addr); |
1450 |
addr += TARGET_PAGE_SIZE; |
1451 |
} |
1452 |
} |
1453 |
#endif
|
1454 |
mask = ~dirty_flags; |
1455 |
p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); |
1456 |
for(i = 0; i < len; i++) |
1457 |
p[i] &= mask; |
1458 |
|
1459 |
/* we modify the TLB cache so that the dirty bit will be set again
|
1460 |
when accessing the range */
|
1461 |
start1 = start + (unsigned long)phys_ram_base; |
1462 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
1463 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1464 |
tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
|
1465 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1466 |
tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
|
1467 |
#if (NB_MMU_MODES >= 3) |
1468 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1469 |
tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
|
1470 |
#if (NB_MMU_MODES == 4) |
1471 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1472 |
tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
|
1473 |
#endif
|
1474 |
#endif
|
1475 |
} |
1476 |
|
1477 |
#if !defined(CONFIG_SOFTMMU)
|
1478 |
/* XXX: this is expensive */
|
1479 |
{ |
1480 |
VirtPageDesc *p; |
1481 |
int j;
|
1482 |
target_ulong addr; |
1483 |
|
1484 |
for(i = 0; i < L1_SIZE; i++) { |
1485 |
p = l1_virt_map[i]; |
1486 |
if (p) {
|
1487 |
addr = i << (TARGET_PAGE_BITS + L2_BITS); |
1488 |
for(j = 0; j < L2_SIZE; j++) { |
1489 |
if (p->valid_tag == virt_valid_tag &&
|
1490 |
p->phys_addr >= start && p->phys_addr < end && |
1491 |
(p->prot & PROT_WRITE)) { |
1492 |
if (addr < MMAP_AREA_END) {
|
1493 |
mprotect((void *)addr, TARGET_PAGE_SIZE,
|
1494 |
p->prot & ~PROT_WRITE); |
1495 |
} |
1496 |
} |
1497 |
addr += TARGET_PAGE_SIZE; |
1498 |
p++; |
1499 |
} |
1500 |
} |
1501 |
} |
1502 |
} |
1503 |
#endif
|
1504 |
} |
1505 |
|
1506 |
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
1507 |
{ |
1508 |
ram_addr_t ram_addr; |
1509 |
|
1510 |
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
|
1511 |
ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + |
1512 |
tlb_entry->addend - (unsigned long)phys_ram_base; |
1513 |
if (!cpu_physical_memory_is_dirty(ram_addr)) {
|
1514 |
tlb_entry->addr_write |= IO_MEM_NOTDIRTY; |
1515 |
} |
1516 |
} |
1517 |
} |
1518 |
|
1519 |
/* update the TLB according to the current state of the dirty bits */
|
1520 |
void cpu_tlb_update_dirty(CPUState *env)
|
1521 |
{ |
1522 |
int i;
|
1523 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1524 |
tlb_update_dirty(&env->tlb_table[0][i]);
|
1525 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1526 |
tlb_update_dirty(&env->tlb_table[1][i]);
|
1527 |
#if (NB_MMU_MODES >= 3) |
1528 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1529 |
tlb_update_dirty(&env->tlb_table[2][i]);
|
1530 |
#if (NB_MMU_MODES == 4) |
1531 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1532 |
tlb_update_dirty(&env->tlb_table[3][i]);
|
1533 |
#endif
|
1534 |
#endif
|
1535 |
} |
1536 |
|
1537 |
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
1538 |
unsigned long start) |
1539 |
{ |
1540 |
unsigned long addr; |
1541 |
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
|
1542 |
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
1543 |
if (addr == start) {
|
1544 |
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; |
1545 |
} |
1546 |
} |
1547 |
} |
1548 |
|
1549 |
/* update the TLB corresponding to virtual page vaddr and phys addr
|
1550 |
addr so that it is no longer dirty */
|
1551 |
static inline void tlb_set_dirty(CPUState *env, |
1552 |
unsigned long addr, target_ulong vaddr) |
1553 |
{ |
1554 |
int i;
|
1555 |
|
1556 |
addr &= TARGET_PAGE_MASK; |
1557 |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1558 |
tlb_set_dirty1(&env->tlb_table[0][i], addr);
|
1559 |
tlb_set_dirty1(&env->tlb_table[1][i], addr);
|
1560 |
#if (NB_MMU_MODES >= 3) |
1561 |
tlb_set_dirty1(&env->tlb_table[2][i], addr);
|
1562 |
#if (NB_MMU_MODES == 4) |
1563 |
tlb_set_dirty1(&env->tlb_table[3][i], addr);
|
1564 |
#endif
|
1565 |
#endif
|
1566 |
} |
1567 |
|
1568 |
/* add a new TLB entry. At most one entry for a given virtual address
|
1569 |
is permitted. Return 0 if OK or 2 if the page could not be mapped
|
1570 |
(can only happen in non SOFTMMU mode for I/O pages or pages
|
1571 |
conflicting with the host address space). */
|
1572 |
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
1573 |
target_phys_addr_t paddr, int prot,
|
1574 |
int is_user, int is_softmmu) |
1575 |
{ |
1576 |
PhysPageDesc *p; |
1577 |
unsigned long pd; |
1578 |
unsigned int index; |
1579 |
target_ulong address; |
1580 |
target_phys_addr_t addend; |
1581 |
int ret;
|
1582 |
CPUTLBEntry *te; |
1583 |
int i;
|
1584 |
|
1585 |
p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
1586 |
if (!p) {
|
1587 |
pd = IO_MEM_UNASSIGNED; |
1588 |
} else {
|
1589 |
pd = p->phys_offset; |
1590 |
} |
1591 |
#if defined(DEBUG_TLB)
|
1592 |
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", |
1593 |
vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
|
1594 |
#endif
|
1595 |
|
1596 |
ret = 0;
|
1597 |
#if !defined(CONFIG_SOFTMMU)
|
1598 |
if (is_softmmu)
|
1599 |
#endif
|
1600 |
{ |
1601 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
|
1602 |
/* IO memory case */
|
1603 |
address = vaddr | pd; |
1604 |
addend = paddr; |
1605 |
} else {
|
1606 |
/* standard memory */
|
1607 |
address = vaddr; |
1608 |
addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); |
1609 |
} |
1610 |
|
1611 |
/* Make accesses to pages with watchpoints go via the
|
1612 |
watchpoint trap routines. */
|
1613 |
for (i = 0; i < env->nb_watchpoints; i++) { |
1614 |
if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
|
1615 |
if (address & ~TARGET_PAGE_MASK) {
|
1616 |
env->watchpoint[i].is_ram = 0;
|
1617 |
address = vaddr | io_mem_watch; |
1618 |
} else {
|
1619 |
env->watchpoint[i].is_ram = 1;
|
1620 |
/* TODO: Figure out how to make read watchpoints coexist
|
1621 |
with code. */
|
1622 |
pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD; |
1623 |
} |
1624 |
} |
1625 |
} |
1626 |
|
1627 |
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1628 |
addend -= vaddr; |
1629 |
te = &env->tlb_table[is_user][index]; |
1630 |
te->addend = addend; |
1631 |
if (prot & PAGE_READ) {
|
1632 |
te->addr_read = address; |
1633 |
} else {
|
1634 |
te->addr_read = -1;
|
1635 |
} |
1636 |
if (prot & PAGE_EXEC) {
|
1637 |
te->addr_code = address; |
1638 |
} else {
|
1639 |
te->addr_code = -1;
|
1640 |
} |
1641 |
if (prot & PAGE_WRITE) {
|
1642 |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
|
1643 |
(pd & IO_MEM_ROMD)) { |
1644 |
/* write access calls the I/O callback */
|
1645 |
te->addr_write = vaddr | |
1646 |
(pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); |
1647 |
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1648 |
!cpu_physical_memory_is_dirty(pd)) { |
1649 |
te->addr_write = vaddr | IO_MEM_NOTDIRTY; |
1650 |
} else {
|
1651 |
te->addr_write = address; |
1652 |
} |
1653 |
} else {
|
1654 |
te->addr_write = -1;
|
1655 |
} |
1656 |
} |
1657 |
#if !defined(CONFIG_SOFTMMU)
|
1658 |
else {
|
1659 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
|
1660 |
/* IO access: no mapping is done as it will be handled by the
|
1661 |
soft MMU */
|
1662 |
if (!(env->hflags & HF_SOFTMMU_MASK))
|
1663 |
ret = 2;
|
1664 |
} else {
|
1665 |
void *map_addr;
|
1666 |
|
1667 |
if (vaddr >= MMAP_AREA_END) {
|
1668 |
ret = 2;
|
1669 |
} else {
|
1670 |
if (prot & PROT_WRITE) {
|
1671 |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
|
1672 |
#if defined(TARGET_HAS_SMC) || 1 |
1673 |
first_tb || |
1674 |
#endif
|
1675 |
((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1676 |
!cpu_physical_memory_is_dirty(pd))) { |
1677 |
/* ROM: we do as if code was inside */
|
1678 |
/* if code is present, we only map as read only and save the
|
1679 |
original mapping */
|
1680 |
VirtPageDesc *vp; |
1681 |
|
1682 |
vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
|
1683 |
vp->phys_addr = pd; |
1684 |
vp->prot = prot; |
1685 |
vp->valid_tag = virt_valid_tag; |
1686 |
prot &= ~PAGE_WRITE; |
1687 |
} |
1688 |
} |
1689 |
map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
|
1690 |
MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); |
1691 |
if (map_addr == MAP_FAILED) {
|
1692 |
cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
|
1693 |
paddr, vaddr); |
1694 |
} |
1695 |
} |
1696 |
} |
1697 |
} |
1698 |
#endif
|
1699 |
return ret;
|
1700 |
} |
1701 |
|
1702 |
/* called from signal handler: invalidate the code and unprotect the
|
1703 |
page. Return TRUE if the fault was succesfully handled. */
|
1704 |
int page_unprotect(target_ulong addr, unsigned long pc, void *puc) |
1705 |
{ |
1706 |
#if !defined(CONFIG_SOFTMMU)
|
1707 |
VirtPageDesc *vp; |
1708 |
|
1709 |
#if defined(DEBUG_TLB)
|
1710 |
printf("page_unprotect: addr=0x%08x\n", addr);
|
1711 |
#endif
|
1712 |
addr &= TARGET_PAGE_MASK; |
1713 |
|
1714 |
/* if it is not mapped, no need to worry here */
|
1715 |
if (addr >= MMAP_AREA_END)
|
1716 |
return 0; |
1717 |
vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
1718 |
if (!vp)
|
1719 |
return 0; |
1720 |
/* NOTE: in this case, validate_tag is _not_ tested as it
|
1721 |
validates only the code TLB */
|
1722 |
if (vp->valid_tag != virt_valid_tag)
|
1723 |
return 0; |
1724 |
if (!(vp->prot & PAGE_WRITE))
|
1725 |
return 0; |
1726 |
#if defined(DEBUG_TLB)
|
1727 |
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
|
1728 |
addr, vp->phys_addr, vp->prot); |
1729 |
#endif
|
1730 |
if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) |
1731 |
cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
|
1732 |
(unsigned long)addr, vp->prot); |
1733 |
/* set the dirty bit */
|
1734 |
phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
|
1735 |
/* flush the code inside */
|
1736 |
tb_invalidate_phys_page(vp->phys_addr, pc, puc); |
1737 |
return 1; |
1738 |
#else
|
1739 |
return 0; |
1740 |
#endif
|
1741 |
} |
1742 |
|
1743 |
#else
|
1744 |
|
1745 |
void tlb_flush(CPUState *env, int flush_global) |
1746 |
{ |
1747 |
} |
1748 |
|
1749 |
void tlb_flush_page(CPUState *env, target_ulong addr)
|
1750 |
{ |
1751 |
} |
1752 |
|
1753 |
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
1754 |
target_phys_addr_t paddr, int prot,
|
1755 |
int is_user, int is_softmmu) |
1756 |
{ |
1757 |
return 0; |
1758 |
} |
1759 |
|
1760 |
/* dump memory mappings */
|
1761 |
void page_dump(FILE *f)
|
1762 |
{ |
1763 |
unsigned long start, end; |
1764 |
int i, j, prot, prot1;
|
1765 |
PageDesc *p; |
1766 |
|
1767 |
fprintf(f, "%-8s %-8s %-8s %s\n",
|
1768 |
"start", "end", "size", "prot"); |
1769 |
start = -1;
|
1770 |
end = -1;
|
1771 |
prot = 0;
|
1772 |
for(i = 0; i <= L1_SIZE; i++) { |
1773 |
if (i < L1_SIZE)
|
1774 |
p = l1_map[i]; |
1775 |
else
|
1776 |
p = NULL;
|
1777 |
for(j = 0;j < L2_SIZE; j++) { |
1778 |
if (!p)
|
1779 |
prot1 = 0;
|
1780 |
else
|
1781 |
prot1 = p[j].flags; |
1782 |
if (prot1 != prot) {
|
1783 |
end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
|
1784 |
if (start != -1) { |
1785 |
fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
|
1786 |
start, end, end - start, |
1787 |
prot & PAGE_READ ? 'r' : '-', |
1788 |
prot & PAGE_WRITE ? 'w' : '-', |
1789 |
prot & PAGE_EXEC ? 'x' : '-'); |
1790 |
} |
1791 |
if (prot1 != 0) |
1792 |
start = end; |
1793 |
else
|
1794 |
start = -1;
|
1795 |
prot = prot1; |
1796 |
} |
1797 |
if (!p)
|
1798 |
break;
|
1799 |
} |
1800 |
} |
1801 |
} |
1802 |
|
1803 |
int page_get_flags(target_ulong address)
|
1804 |
{ |
1805 |
PageDesc *p; |
1806 |
|
1807 |
p = page_find(address >> TARGET_PAGE_BITS); |
1808 |
if (!p)
|
1809 |
return 0; |
1810 |
return p->flags;
|
1811 |
} |
1812 |
|
1813 |
/* modify the flags of a page and invalidate the code if
|
1814 |
necessary. The flag PAGE_WRITE_ORG is positionned automatically
|
1815 |
depending on PAGE_WRITE */
|
1816 |
void page_set_flags(target_ulong start, target_ulong end, int flags) |
1817 |
{ |
1818 |
PageDesc *p; |
1819 |
target_ulong addr; |
1820 |
|
1821 |
start = start & TARGET_PAGE_MASK; |
1822 |
end = TARGET_PAGE_ALIGN(end); |
1823 |
if (flags & PAGE_WRITE)
|
1824 |
flags |= PAGE_WRITE_ORG; |
1825 |
spin_lock(&tb_lock); |
1826 |
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
1827 |
p = page_find_alloc(addr >> TARGET_PAGE_BITS); |
1828 |
/* if the write protection is set, then we invalidate the code
|
1829 |
inside */
|
1830 |
if (!(p->flags & PAGE_WRITE) &&
|
1831 |
(flags & PAGE_WRITE) && |
1832 |
p->first_tb) { |
1833 |
tb_invalidate_phys_page(addr, 0, NULL); |
1834 |
} |
1835 |
p->flags = flags; |
1836 |
} |
1837 |
spin_unlock(&tb_lock); |
1838 |
} |
1839 |
|
1840 |
/* called from signal handler: invalidate the code and unprotect the
|
1841 |
page. Return TRUE if the fault was succesfully handled. */
|
1842 |
int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
1843 |
{ |
1844 |
unsigned int page_index, prot, pindex; |
1845 |
PageDesc *p, *p1; |
1846 |
target_ulong host_start, host_end, addr; |
1847 |
|
1848 |
host_start = address & qemu_host_page_mask; |
1849 |
page_index = host_start >> TARGET_PAGE_BITS; |
1850 |
p1 = page_find(page_index); |
1851 |
if (!p1)
|
1852 |
return 0; |
1853 |
host_end = host_start + qemu_host_page_size; |
1854 |
p = p1; |
1855 |
prot = 0;
|
1856 |
for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
|
1857 |
prot |= p->flags; |
1858 |
p++; |
1859 |
} |
1860 |
/* if the page was really writable, then we change its
|
1861 |
protection back to writable */
|
1862 |
if (prot & PAGE_WRITE_ORG) {
|
1863 |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
1864 |
if (!(p1[pindex].flags & PAGE_WRITE)) {
|
1865 |
mprotect((void *)g2h(host_start), qemu_host_page_size,
|
1866 |
(prot & PAGE_BITS) | PAGE_WRITE); |
1867 |
p1[pindex].flags |= PAGE_WRITE; |
1868 |
/* and since the content will be modified, we must invalidate
|
1869 |
the corresponding translated code. */
|
1870 |
tb_invalidate_phys_page(address, pc, puc); |
1871 |
#ifdef DEBUG_TB_CHECK
|
1872 |
tb_invalidate_check(address); |
1873 |
#endif
|
1874 |
return 1; |
1875 |
} |
1876 |
} |
1877 |
return 0; |
1878 |
} |
1879 |
|
1880 |
/* call this function when system calls directly modify a memory area */
|
1881 |
/* ??? This should be redundant now we have lock_user. */
|
1882 |
void page_unprotect_range(target_ulong data, target_ulong data_size)
|
1883 |
{ |
1884 |
target_ulong start, end, addr; |
1885 |
|
1886 |
start = data; |
1887 |
end = start + data_size; |
1888 |
start &= TARGET_PAGE_MASK; |
1889 |
end = TARGET_PAGE_ALIGN(end); |
1890 |
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
1891 |
page_unprotect(addr, 0, NULL); |
1892 |
} |
1893 |
} |
1894 |
|
1895 |
static inline void tlb_set_dirty(CPUState *env, |
1896 |
unsigned long addr, target_ulong vaddr) |
1897 |
{ |
1898 |
} |
1899 |
#endif /* defined(CONFIG_USER_ONLY) */ |
1900 |
|
1901 |
/* register physical memory. 'size' must be a multiple of the target
|
1902 |
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
|
1903 |
io memory page */
|
1904 |
void cpu_register_physical_memory(target_phys_addr_t start_addr,
|
1905 |
unsigned long size, |
1906 |
unsigned long phys_offset) |
1907 |
{ |
1908 |
target_phys_addr_t addr, end_addr; |
1909 |
PhysPageDesc *p; |
1910 |
CPUState *env; |
1911 |
|
1912 |
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
|
1913 |
end_addr = start_addr + size; |
1914 |
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
|
1915 |
p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
|
1916 |
p->phys_offset = phys_offset; |
1917 |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
|
1918 |
(phys_offset & IO_MEM_ROMD)) |
1919 |
phys_offset += TARGET_PAGE_SIZE; |
1920 |
} |
1921 |
|
1922 |
/* since each CPU stores ram addresses in its TLB cache, we must
|
1923 |
reset the modified entries */
|
1924 |
/* XXX: slow ! */
|
1925 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
1926 |
tlb_flush(env, 1);
|
1927 |
} |
1928 |
} |
1929 |
|
1930 |
/* XXX: temporary until new memory mapping API */
|
1931 |
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr) |
1932 |
{ |
1933 |
PhysPageDesc *p; |
1934 |
|
1935 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
1936 |
if (!p)
|
1937 |
return IO_MEM_UNASSIGNED;
|
1938 |
return p->phys_offset;
|
1939 |
} |
1940 |
|
1941 |
/* XXX: better than nothing */
|
1942 |
ram_addr_t qemu_ram_alloc(unsigned int size) |
1943 |
{ |
1944 |
ram_addr_t addr; |
1945 |
if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
|
1946 |
fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
|
1947 |
size, phys_ram_size); |
1948 |
abort(); |
1949 |
} |
1950 |
addr = phys_ram_alloc_offset; |
1951 |
phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); |
1952 |
return addr;
|
1953 |
} |
1954 |
|
1955 |
void qemu_ram_free(ram_addr_t addr)
|
1956 |
{ |
1957 |
} |
1958 |
|
1959 |
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
1960 |
{ |
1961 |
#ifdef DEBUG_UNASSIGNED
|
1962 |
printf("Unassigned mem read " TARGET_FMT_lx "\n", addr); |
1963 |
#endif
|
1964 |
#ifdef TARGET_SPARC
|
1965 |
do_unassigned_access(addr, 0, 0, 0); |
1966 |
#endif
|
1967 |
return 0; |
1968 |
} |
1969 |
|
1970 |
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
1971 |
{ |
1972 |
#ifdef DEBUG_UNASSIGNED
|
1973 |
printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val); |
1974 |
#endif
|
1975 |
#ifdef TARGET_SPARC
|
1976 |
do_unassigned_access(addr, 1, 0, 0); |
1977 |
#endif
|
1978 |
} |
1979 |
|
1980 |
static CPUReadMemoryFunc *unassigned_mem_read[3] = { |
1981 |
unassigned_mem_readb, |
1982 |
unassigned_mem_readb, |
1983 |
unassigned_mem_readb, |
1984 |
}; |
1985 |
|
1986 |
static CPUWriteMemoryFunc *unassigned_mem_write[3] = { |
1987 |
unassigned_mem_writeb, |
1988 |
unassigned_mem_writeb, |
1989 |
unassigned_mem_writeb, |
1990 |
}; |
1991 |
|
1992 |
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
1993 |
{ |
1994 |
unsigned long ram_addr; |
1995 |
int dirty_flags;
|
1996 |
ram_addr = addr - (unsigned long)phys_ram_base; |
1997 |
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
1998 |
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
|
1999 |
#if !defined(CONFIG_USER_ONLY)
|
2000 |
tb_invalidate_phys_page_fast(ram_addr, 1);
|
2001 |
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2002 |
#endif
|
2003 |
} |
2004 |
stb_p((uint8_t *)(long)addr, val);
|
2005 |
#ifdef USE_KQEMU
|
2006 |
if (cpu_single_env->kqemu_enabled &&
|
2007 |
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
2008 |
kqemu_modify_page(cpu_single_env, ram_addr); |
2009 |
#endif
|
2010 |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
|
2011 |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
2012 |
/* we remove the notdirty callback only if the code has been
|
2013 |
flushed */
|
2014 |
if (dirty_flags == 0xff) |
2015 |
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
2016 |
} |
2017 |
|
2018 |
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
2019 |
{ |
2020 |
unsigned long ram_addr; |
2021 |
int dirty_flags;
|
2022 |
ram_addr = addr - (unsigned long)phys_ram_base; |
2023 |
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2024 |
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
|
2025 |
#if !defined(CONFIG_USER_ONLY)
|
2026 |
tb_invalidate_phys_page_fast(ram_addr, 2);
|
2027 |
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2028 |
#endif
|
2029 |
} |
2030 |
stw_p((uint8_t *)(long)addr, val);
|
2031 |
#ifdef USE_KQEMU
|
2032 |
if (cpu_single_env->kqemu_enabled &&
|
2033 |
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
2034 |
kqemu_modify_page(cpu_single_env, ram_addr); |
2035 |
#endif
|
2036 |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
|
2037 |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
2038 |
/* we remove the notdirty callback only if the code has been
|
2039 |
flushed */
|
2040 |
if (dirty_flags == 0xff) |
2041 |
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
2042 |
} |
2043 |
|
2044 |
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
2045 |
{ |
2046 |
unsigned long ram_addr; |
2047 |
int dirty_flags;
|
2048 |
ram_addr = addr - (unsigned long)phys_ram_base; |
2049 |
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2050 |
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
|
2051 |
#if !defined(CONFIG_USER_ONLY)
|
2052 |
tb_invalidate_phys_page_fast(ram_addr, 4);
|
2053 |
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2054 |
#endif
|
2055 |
} |
2056 |
stl_p((uint8_t *)(long)addr, val);
|
2057 |
#ifdef USE_KQEMU
|
2058 |
if (cpu_single_env->kqemu_enabled &&
|
2059 |
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
2060 |
kqemu_modify_page(cpu_single_env, ram_addr); |
2061 |
#endif
|
2062 |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
|
2063 |
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
2064 |
/* we remove the notdirty callback only if the code has been
|
2065 |
flushed */
|
2066 |
if (dirty_flags == 0xff) |
2067 |
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
2068 |
} |
2069 |
|
2070 |
static CPUReadMemoryFunc *error_mem_read[3] = { |
2071 |
NULL, /* never used */ |
2072 |
NULL, /* never used */ |
2073 |
NULL, /* never used */ |
2074 |
}; |
2075 |
|
2076 |
static CPUWriteMemoryFunc *notdirty_mem_write[3] = { |
2077 |
notdirty_mem_writeb, |
2078 |
notdirty_mem_writew, |
2079 |
notdirty_mem_writel, |
2080 |
}; |
2081 |
|
2082 |
#if defined(CONFIG_SOFTMMU)
|
2083 |
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
|
2084 |
so these check for a hit then pass through to the normal out-of-line
|
2085 |
phys routines. */
|
2086 |
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) |
2087 |
{ |
2088 |
return ldub_phys(addr);
|
2089 |
} |
2090 |
|
2091 |
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) |
2092 |
{ |
2093 |
return lduw_phys(addr);
|
2094 |
} |
2095 |
|
2096 |
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) |
2097 |
{ |
2098 |
return ldl_phys(addr);
|
2099 |
} |
2100 |
|
2101 |
/* Generate a debug exception if a watchpoint has been hit.
|
2102 |
Returns the real physical address of the access. addr will be a host
|
2103 |
address in the is_ram case. */
|
2104 |
static target_ulong check_watchpoint(target_phys_addr_t addr)
|
2105 |
{ |
2106 |
CPUState *env = cpu_single_env; |
2107 |
target_ulong watch; |
2108 |
target_ulong retaddr; |
2109 |
int i;
|
2110 |
|
2111 |
retaddr = addr; |
2112 |
for (i = 0; i < env->nb_watchpoints; i++) { |
2113 |
watch = env->watchpoint[i].vaddr; |
2114 |
if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) { |
2115 |
if (env->watchpoint[i].is_ram)
|
2116 |
retaddr = addr - (unsigned long)phys_ram_base; |
2117 |
if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) { |
2118 |
cpu_single_env->watchpoint_hit = i + 1;
|
2119 |
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG); |
2120 |
break;
|
2121 |
} |
2122 |
} |
2123 |
} |
2124 |
return retaddr;
|
2125 |
} |
2126 |
|
2127 |
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, |
2128 |
uint32_t val) |
2129 |
{ |
2130 |
addr = check_watchpoint(addr); |
2131 |
stb_phys(addr, val); |
2132 |
} |
2133 |
|
2134 |
static void watch_mem_writew(void *opaque, target_phys_addr_t addr, |
2135 |
uint32_t val) |
2136 |
{ |
2137 |
addr = check_watchpoint(addr); |
2138 |
stw_phys(addr, val); |
2139 |
} |
2140 |
|
2141 |
static void watch_mem_writel(void *opaque, target_phys_addr_t addr, |
2142 |
uint32_t val) |
2143 |
{ |
2144 |
addr = check_watchpoint(addr); |
2145 |
stl_phys(addr, val); |
2146 |
} |
2147 |
|
2148 |
static CPUReadMemoryFunc *watch_mem_read[3] = { |
2149 |
watch_mem_readb, |
2150 |
watch_mem_readw, |
2151 |
watch_mem_readl, |
2152 |
}; |
2153 |
|
2154 |
static CPUWriteMemoryFunc *watch_mem_write[3] = { |
2155 |
watch_mem_writeb, |
2156 |
watch_mem_writew, |
2157 |
watch_mem_writel, |
2158 |
}; |
2159 |
#endif
|
2160 |
|
2161 |
static void io_mem_init(void) |
2162 |
{ |
2163 |
cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
|
2164 |
cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
|
2165 |
cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
|
2166 |
io_mem_nb = 5;
|
2167 |
|
2168 |
#if defined(CONFIG_SOFTMMU)
|
2169 |
io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
|
2170 |
watch_mem_write, NULL);
|
2171 |
#endif
|
2172 |
/* alloc dirty bits array */
|
2173 |
phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); |
2174 |
memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
|
2175 |
} |
2176 |
|
2177 |
/* mem_read and mem_write are arrays of functions containing the
|
2178 |
function to access byte (index 0), word (index 1) and dword (index
|
2179 |
2). All functions must be supplied. If io_index is non zero, the
|
2180 |
corresponding io zone is modified. If it is zero, a new io zone is
|
2181 |
allocated. The return value can be used with
|
2182 |
cpu_register_physical_memory(). (-1) is returned if error. */
|
2183 |
int cpu_register_io_memory(int io_index, |
2184 |
CPUReadMemoryFunc **mem_read, |
2185 |
CPUWriteMemoryFunc **mem_write, |
2186 |
void *opaque)
|
2187 |
{ |
2188 |
int i;
|
2189 |
|
2190 |
if (io_index <= 0) { |
2191 |
if (io_mem_nb >= IO_MEM_NB_ENTRIES)
|
2192 |
return -1; |
2193 |
io_index = io_mem_nb++; |
2194 |
} else {
|
2195 |
if (io_index >= IO_MEM_NB_ENTRIES)
|
2196 |
return -1; |
2197 |
} |
2198 |
|
2199 |
for(i = 0;i < 3; i++) { |
2200 |
io_mem_read[io_index][i] = mem_read[i]; |
2201 |
io_mem_write[io_index][i] = mem_write[i]; |
2202 |
} |
2203 |
io_mem_opaque[io_index] = opaque; |
2204 |
return io_index << IO_MEM_SHIFT;
|
2205 |
} |
2206 |
|
2207 |
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
|
2208 |
{ |
2209 |
return io_mem_write[io_index >> IO_MEM_SHIFT];
|
2210 |
} |
2211 |
|
2212 |
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
|
2213 |
{ |
2214 |
return io_mem_read[io_index >> IO_MEM_SHIFT];
|
2215 |
} |
2216 |
|
2217 |
/* physical memory access (slow version, mainly for debug) */
|
2218 |
#if defined(CONFIG_USER_ONLY)
|
2219 |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
2220 |
int len, int is_write) |
2221 |
{ |
2222 |
int l, flags;
|
2223 |
target_ulong page; |
2224 |
void * p;
|
2225 |
|
2226 |
while (len > 0) { |
2227 |
page = addr & TARGET_PAGE_MASK; |
2228 |
l = (page + TARGET_PAGE_SIZE) - addr; |
2229 |
if (l > len)
|
2230 |
l = len; |
2231 |
flags = page_get_flags(page); |
2232 |
if (!(flags & PAGE_VALID))
|
2233 |
return;
|
2234 |
if (is_write) {
|
2235 |
if (!(flags & PAGE_WRITE))
|
2236 |
return;
|
2237 |
p = lock_user(addr, len, 0);
|
2238 |
memcpy(p, buf, len); |
2239 |
unlock_user(p, addr, len); |
2240 |
} else {
|
2241 |
if (!(flags & PAGE_READ))
|
2242 |
return;
|
2243 |
p = lock_user(addr, len, 1);
|
2244 |
memcpy(buf, p, len); |
2245 |
unlock_user(p, addr, 0);
|
2246 |
} |
2247 |
len -= l; |
2248 |
buf += l; |
2249 |
addr += l; |
2250 |
} |
2251 |
} |
2252 |
|
2253 |
#else
|
2254 |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
2255 |
int len, int is_write) |
2256 |
{ |
2257 |
int l, io_index;
|
2258 |
uint8_t *ptr; |
2259 |
uint32_t val; |
2260 |
target_phys_addr_t page; |
2261 |
unsigned long pd; |
2262 |
PhysPageDesc *p; |
2263 |
|
2264 |
while (len > 0) { |
2265 |
page = addr & TARGET_PAGE_MASK; |
2266 |
l = (page + TARGET_PAGE_SIZE) - addr; |
2267 |
if (l > len)
|
2268 |
l = len; |
2269 |
p = phys_page_find(page >> TARGET_PAGE_BITS); |
2270 |
if (!p) {
|
2271 |
pd = IO_MEM_UNASSIGNED; |
2272 |
} else {
|
2273 |
pd = p->phys_offset; |
2274 |
} |
2275 |
|
2276 |
if (is_write) {
|
2277 |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
|
2278 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2279 |
/* XXX: could force cpu_single_env to NULL to avoid
|
2280 |
potential bugs */
|
2281 |
if (l >= 4 && ((addr & 3) == 0)) { |
2282 |
/* 32 bit write access */
|
2283 |
val = ldl_p(buf); |
2284 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
|
2285 |
l = 4;
|
2286 |
} else if (l >= 2 && ((addr & 1) == 0)) { |
2287 |
/* 16 bit write access */
|
2288 |
val = lduw_p(buf); |
2289 |
io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
|
2290 |
l = 2;
|
2291 |
} else {
|
2292 |
/* 8 bit write access */
|
2293 |
val = ldub_p(buf); |
2294 |
io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
|
2295 |
l = 1;
|
2296 |
} |
2297 |
} else {
|
2298 |
unsigned long addr1; |
2299 |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
2300 |
/* RAM case */
|
2301 |
ptr = phys_ram_base + addr1; |
2302 |
memcpy(ptr, buf, l); |
2303 |
if (!cpu_physical_memory_is_dirty(addr1)) {
|
2304 |
/* invalidate code */
|
2305 |
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
|
2306 |
/* set dirty bit */
|
2307 |
phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
2308 |
(0xff & ~CODE_DIRTY_FLAG);
|
2309 |
} |
2310 |
} |
2311 |
} else {
|
2312 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
|
2313 |
!(pd & IO_MEM_ROMD)) { |
2314 |
/* I/O case */
|
2315 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2316 |
if (l >= 4 && ((addr & 3) == 0)) { |
2317 |
/* 32 bit read access */
|
2318 |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
|
2319 |
stl_p(buf, val); |
2320 |
l = 4;
|
2321 |
} else if (l >= 2 && ((addr & 1) == 0)) { |
2322 |
/* 16 bit read access */
|
2323 |
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
|
2324 |
stw_p(buf, val); |
2325 |
l = 2;
|
2326 |
} else {
|
2327 |
/* 8 bit read access */
|
2328 |
val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
|
2329 |
stb_p(buf, val); |
2330 |
l = 1;
|
2331 |
} |
2332 |
} else {
|
2333 |
/* RAM case */
|
2334 |
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
2335 |
(addr & ~TARGET_PAGE_MASK); |
2336 |
memcpy(buf, ptr, l); |
2337 |
} |
2338 |
} |
2339 |
len -= l; |
2340 |
buf += l; |
2341 |
addr += l; |
2342 |
} |
2343 |
} |
2344 |
|
2345 |
/* used for ROM loading : can write in RAM and ROM */
|
2346 |
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
2347 |
const uint8_t *buf, int len) |
2348 |
{ |
2349 |
int l;
|
2350 |
uint8_t *ptr; |
2351 |
target_phys_addr_t page; |
2352 |
unsigned long pd; |
2353 |
PhysPageDesc *p; |
2354 |
|
2355 |
while (len > 0) { |
2356 |
page = addr & TARGET_PAGE_MASK; |
2357 |
l = (page + TARGET_PAGE_SIZE) - addr; |
2358 |
if (l > len)
|
2359 |
l = len; |
2360 |
p = phys_page_find(page >> TARGET_PAGE_BITS); |
2361 |
if (!p) {
|
2362 |
pd = IO_MEM_UNASSIGNED; |
2363 |
} else {
|
2364 |
pd = p->phys_offset; |
2365 |
} |
2366 |
|
2367 |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
|
2368 |
(pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
2369 |
!(pd & IO_MEM_ROMD)) { |
2370 |
/* do nothing */
|
2371 |
} else {
|
2372 |
unsigned long addr1; |
2373 |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
2374 |
/* ROM/RAM case */
|
2375 |
ptr = phys_ram_base + addr1; |
2376 |
memcpy(ptr, buf, l); |
2377 |
} |
2378 |
len -= l; |
2379 |
buf += l; |
2380 |
addr += l; |
2381 |
} |
2382 |
} |
2383 |
|
2384 |
|
2385 |
/* warning: addr must be aligned */
|
2386 |
uint32_t ldl_phys(target_phys_addr_t addr) |
2387 |
{ |
2388 |
int io_index;
|
2389 |
uint8_t *ptr; |
2390 |
uint32_t val; |
2391 |
unsigned long pd; |
2392 |
PhysPageDesc *p; |
2393 |
|
2394 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
2395 |
if (!p) {
|
2396 |
pd = IO_MEM_UNASSIGNED; |
2397 |
} else {
|
2398 |
pd = p->phys_offset; |
2399 |
} |
2400 |
|
2401 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
|
2402 |
!(pd & IO_MEM_ROMD)) { |
2403 |
/* I/O case */
|
2404 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2405 |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
|
2406 |
} else {
|
2407 |
/* RAM case */
|
2408 |
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
2409 |
(addr & ~TARGET_PAGE_MASK); |
2410 |
val = ldl_p(ptr); |
2411 |
} |
2412 |
return val;
|
2413 |
} |
2414 |
|
2415 |
/* warning: addr must be aligned */
|
2416 |
uint64_t ldq_phys(target_phys_addr_t addr) |
2417 |
{ |
2418 |
int io_index;
|
2419 |
uint8_t *ptr; |
2420 |
uint64_t val; |
2421 |
unsigned long pd; |
2422 |
PhysPageDesc *p; |
2423 |
|
2424 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
2425 |
if (!p) {
|
2426 |
pd = IO_MEM_UNASSIGNED; |
2427 |
} else {
|
2428 |
pd = p->phys_offset; |
2429 |
} |
2430 |
|
2431 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
|
2432 |
!(pd & IO_MEM_ROMD)) { |
2433 |
/* I/O case */
|
2434 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2435 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2436 |
val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; |
2437 |
val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); |
2438 |
#else
|
2439 |
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
|
2440 |
val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; |
2441 |
#endif
|
2442 |
} else {
|
2443 |
/* RAM case */
|
2444 |
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
2445 |
(addr & ~TARGET_PAGE_MASK); |
2446 |
val = ldq_p(ptr); |
2447 |
} |
2448 |
return val;
|
2449 |
} |
2450 |
|
2451 |
/* XXX: optimize */
|
2452 |
uint32_t ldub_phys(target_phys_addr_t addr) |
2453 |
{ |
2454 |
uint8_t val; |
2455 |
cpu_physical_memory_read(addr, &val, 1);
|
2456 |
return val;
|
2457 |
} |
2458 |
|
2459 |
/* XXX: optimize */
|
2460 |
uint32_t lduw_phys(target_phys_addr_t addr) |
2461 |
{ |
2462 |
uint16_t val; |
2463 |
cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
|
2464 |
return tswap16(val);
|
2465 |
} |
2466 |
|
2467 |
/* warning: addr must be aligned. The ram page is not masked as dirty
|
2468 |
and the code inside is not invalidated. It is useful if the dirty
|
2469 |
bits are used to track modified PTEs */
|
2470 |
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
|
2471 |
{ |
2472 |
int io_index;
|
2473 |
uint8_t *ptr; |
2474 |
unsigned long pd; |
2475 |
PhysPageDesc *p; |
2476 |
|
2477 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
2478 |
if (!p) {
|
2479 |
pd = IO_MEM_UNASSIGNED; |
2480 |
} else {
|
2481 |
pd = p->phys_offset; |
2482 |
} |
2483 |
|
2484 |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
|
2485 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2486 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
|
2487 |
} else {
|
2488 |
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
2489 |
(addr & ~TARGET_PAGE_MASK); |
2490 |
stl_p(ptr, val); |
2491 |
} |
2492 |
} |
2493 |
|
2494 |
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
|
2495 |
{ |
2496 |
int io_index;
|
2497 |
uint8_t *ptr; |
2498 |
unsigned long pd; |
2499 |
PhysPageDesc *p; |
2500 |
|
2501 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
2502 |
if (!p) {
|
2503 |
pd = IO_MEM_UNASSIGNED; |
2504 |
} else {
|
2505 |
pd = p->phys_offset; |
2506 |
} |
2507 |
|
2508 |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
|
2509 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2510 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2511 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); |
2512 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); |
2513 |
#else
|
2514 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
|
2515 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); |
2516 |
#endif
|
2517 |
} else {
|
2518 |
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
2519 |
(addr & ~TARGET_PAGE_MASK); |
2520 |
stq_p(ptr, val); |
2521 |
} |
2522 |
} |
2523 |
|
2524 |
/* warning: addr must be aligned */
|
2525 |
void stl_phys(target_phys_addr_t addr, uint32_t val)
|
2526 |
{ |
2527 |
int io_index;
|
2528 |
uint8_t *ptr; |
2529 |
unsigned long pd; |
2530 |
PhysPageDesc *p; |
2531 |
|
2532 |
p = phys_page_find(addr >> TARGET_PAGE_BITS); |
2533 |
if (!p) {
|
2534 |
pd = IO_MEM_UNASSIGNED; |
2535 |
} else {
|
2536 |
pd = p->phys_offset; |
2537 |
} |
2538 |
|
2539 |
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
|
2540 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
2541 |
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
|
2542 |
} else {
|
2543 |
unsigned long addr1; |
2544 |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
2545 |
/* RAM case */
|
2546 |
ptr = phys_ram_base + addr1; |
2547 |
stl_p(ptr, val); |
2548 |
if (!cpu_physical_memory_is_dirty(addr1)) {
|
2549 |
/* invalidate code */
|
2550 |
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); |
2551 |
/* set dirty bit */
|
2552 |
phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
2553 |
(0xff & ~CODE_DIRTY_FLAG);
|
2554 |
} |
2555 |
} |
2556 |
} |
2557 |
|
2558 |
/* XXX: optimize */
|
2559 |
void stb_phys(target_phys_addr_t addr, uint32_t val)
|
2560 |
{ |
2561 |
uint8_t v = val; |
2562 |
cpu_physical_memory_write(addr, &v, 1);
|
2563 |
} |
2564 |
|
2565 |
/* XXX: optimize */
|
2566 |
void stw_phys(target_phys_addr_t addr, uint32_t val)
|
2567 |
{ |
2568 |
uint16_t v = tswap16(val); |
2569 |
cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); |
2570 |
} |
2571 |
|
2572 |
/* XXX: optimize */
|
2573 |
void stq_phys(target_phys_addr_t addr, uint64_t val)
|
2574 |
{ |
2575 |
val = tswap64(val); |
2576 |
cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); |
2577 |
} |
2578 |
|
2579 |
#endif
|
2580 |
|
2581 |
/* virtual memory access for debug */
|
2582 |
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
2583 |
uint8_t *buf, int len, int is_write) |
2584 |
{ |
2585 |
int l;
|
2586 |
target_phys_addr_t phys_addr; |
2587 |
target_ulong page; |
2588 |
|
2589 |
while (len > 0) { |
2590 |
page = addr & TARGET_PAGE_MASK; |
2591 |
phys_addr = cpu_get_phys_page_debug(env, page); |
2592 |
/* if no physical page mapped, return an error */
|
2593 |
if (phys_addr == -1) |
2594 |
return -1; |
2595 |
l = (page + TARGET_PAGE_SIZE) - addr; |
2596 |
if (l > len)
|
2597 |
l = len; |
2598 |
cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), |
2599 |
buf, l, is_write); |
2600 |
len -= l; |
2601 |
buf += l; |
2602 |
addr += l; |
2603 |
} |
2604 |
return 0; |
2605 |
} |
2606 |
|
2607 |
void dump_exec_info(FILE *f,
|
2608 |
int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) |
2609 |
{ |
2610 |
int i, target_code_size, max_target_code_size;
|
2611 |
int direct_jmp_count, direct_jmp2_count, cross_page;
|
2612 |
TranslationBlock *tb; |
2613 |
|
2614 |
target_code_size = 0;
|
2615 |
max_target_code_size = 0;
|
2616 |
cross_page = 0;
|
2617 |
direct_jmp_count = 0;
|
2618 |
direct_jmp2_count = 0;
|
2619 |
for(i = 0; i < nb_tbs; i++) { |
2620 |
tb = &tbs[i]; |
2621 |
target_code_size += tb->size; |
2622 |
if (tb->size > max_target_code_size)
|
2623 |
max_target_code_size = tb->size; |
2624 |
if (tb->page_addr[1] != -1) |
2625 |
cross_page++; |
2626 |
if (tb->tb_next_offset[0] != 0xffff) { |
2627 |
direct_jmp_count++; |
2628 |
if (tb->tb_next_offset[1] != 0xffff) { |
2629 |
direct_jmp2_count++; |
2630 |
} |
2631 |
} |
2632 |
} |
2633 |
/* XXX: avoid using doubles ? */
|
2634 |
cpu_fprintf(f, "TB count %d\n", nb_tbs);
|
2635 |
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
|
2636 |
nb_tbs ? target_code_size / nb_tbs : 0,
|
2637 |
max_target_code_size); |
2638 |
cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
|
2639 |
nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
|
2640 |
target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); |
2641 |
cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
|
2642 |
cross_page, |
2643 |
nb_tbs ? (cross_page * 100) / nb_tbs : 0); |
2644 |
cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
|
2645 |
direct_jmp_count, |
2646 |
nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, |
2647 |
direct_jmp2_count, |
2648 |
nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); |
2649 |
cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
|
2650 |
cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
|
2651 |
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
|
2652 |
} |
2653 |
|
2654 |
#if !defined(CONFIG_USER_ONLY)
|
2655 |
|
2656 |
#define MMUSUFFIX _cmmu
|
2657 |
#define GETPC() NULL |
2658 |
#define env cpu_single_env
|
2659 |
#define SOFTMMU_CODE_ACCESS
|
2660 |
|
2661 |
#define SHIFT 0 |
2662 |
#include "softmmu_template.h" |
2663 |
|
2664 |
#define SHIFT 1 |
2665 |
#include "softmmu_template.h" |
2666 |
|
2667 |
#define SHIFT 2 |
2668 |
#include "softmmu_template.h" |
2669 |
|
2670 |
#define SHIFT 3 |
2671 |
#include "softmmu_template.h" |
2672 |
|
2673 |
#undef env
|
2674 |
|
2675 |
#endif
|