root / exec.c @ fd872598
History | View | Annotate | Download (60 kB)
1 |
/*
|
---|---|
2 |
* virtual page mapping and translated block handling
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include "config.h" |
21 |
#include <stdlib.h> |
22 |
#include <stdio.h> |
23 |
#include <stdarg.h> |
24 |
#include <string.h> |
25 |
#include <errno.h> |
26 |
#include <unistd.h> |
27 |
#include <inttypes.h> |
28 |
#if !defined(CONFIG_SOFTMMU)
|
29 |
#include <sys/mman.h> |
30 |
#endif
|
31 |
|
32 |
#include "cpu.h" |
33 |
#include "exec-all.h" |
34 |
|
35 |
//#define DEBUG_TB_INVALIDATE
|
36 |
//#define DEBUG_FLUSH
|
37 |
//#define DEBUG_TLB
|
38 |
|
39 |
/* make various TB consistency checks */
|
40 |
//#define DEBUG_TB_CHECK
|
41 |
//#define DEBUG_TLB_CHECK
|
42 |
|
43 |
/* threshold to flush the translated code buffer */
|
44 |
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
|
45 |
|
46 |
#define SMC_BITMAP_USE_THRESHOLD 10 |
47 |
|
48 |
#define MMAP_AREA_START 0x00000000 |
49 |
#define MMAP_AREA_END 0xa8000000 |
50 |
|
51 |
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
52 |
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE]; |
53 |
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
54 |
int nb_tbs;
|
55 |
/* any access to the tbs or the page table must use this lock */
|
56 |
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; |
57 |
|
58 |
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; |
59 |
uint8_t *code_gen_ptr; |
60 |
|
61 |
int phys_ram_size;
|
62 |
int phys_ram_fd;
|
63 |
uint8_t *phys_ram_base; |
64 |
uint8_t *phys_ram_dirty; |
65 |
|
66 |
typedef struct PageDesc { |
67 |
/* offset in host memory of the page + io_index in the low 12 bits */
|
68 |
unsigned long phys_offset; |
69 |
/* list of TBs intersecting this physical page */
|
70 |
TranslationBlock *first_tb; |
71 |
/* in order to optimize self modifying code, we count the number
|
72 |
of lookups we do to a given page to use a bitmap */
|
73 |
unsigned int code_write_count; |
74 |
uint8_t *code_bitmap; |
75 |
#if defined(CONFIG_USER_ONLY)
|
76 |
unsigned long flags; |
77 |
#endif
|
78 |
} PageDesc; |
79 |
|
80 |
typedef struct VirtPageDesc { |
81 |
/* physical address of code page. It is valid only if 'valid_tag'
|
82 |
matches 'virt_valid_tag' */
|
83 |
target_ulong phys_addr; |
84 |
unsigned int valid_tag; |
85 |
#if !defined(CONFIG_SOFTMMU)
|
86 |
/* original page access rights. It is valid only if 'valid_tag'
|
87 |
matches 'virt_valid_tag' */
|
88 |
unsigned int prot; |
89 |
#endif
|
90 |
} VirtPageDesc; |
91 |
|
92 |
#define L2_BITS 10 |
93 |
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
94 |
|
95 |
#define L1_SIZE (1 << L1_BITS) |
96 |
#define L2_SIZE (1 << L2_BITS) |
97 |
|
98 |
static void io_mem_init(void); |
99 |
|
100 |
unsigned long real_host_page_size; |
101 |
unsigned long host_page_bits; |
102 |
unsigned long host_page_size; |
103 |
unsigned long host_page_mask; |
104 |
|
105 |
static PageDesc *l1_map[L1_SIZE];
|
106 |
|
107 |
#if !defined(CONFIG_USER_ONLY)
|
108 |
static VirtPageDesc *l1_virt_map[L1_SIZE];
|
109 |
static unsigned int virt_valid_tag; |
110 |
#endif
|
111 |
|
112 |
/* io memory support */
|
113 |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
|
114 |
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
|
115 |
static int io_mem_nb; |
116 |
|
117 |
/* log support */
|
118 |
char *logfilename = "/tmp/qemu.log"; |
119 |
FILE *logfile; |
120 |
int loglevel;
|
121 |
|
122 |
static void page_init(void) |
123 |
{ |
124 |
/* NOTE: we can always suppose that host_page_size >=
|
125 |
TARGET_PAGE_SIZE */
|
126 |
#ifdef _WIN32
|
127 |
real_host_page_size = 4096;
|
128 |
#else
|
129 |
real_host_page_size = getpagesize(); |
130 |
#endif
|
131 |
if (host_page_size == 0) |
132 |
host_page_size = real_host_page_size; |
133 |
if (host_page_size < TARGET_PAGE_SIZE)
|
134 |
host_page_size = TARGET_PAGE_SIZE; |
135 |
host_page_bits = 0;
|
136 |
while ((1 << host_page_bits) < host_page_size) |
137 |
host_page_bits++; |
138 |
host_page_mask = ~(host_page_size - 1);
|
139 |
#if !defined(CONFIG_USER_ONLY)
|
140 |
virt_valid_tag = 1;
|
141 |
#endif
|
142 |
} |
143 |
|
144 |
static inline PageDesc *page_find_alloc(unsigned int index) |
145 |
{ |
146 |
PageDesc **lp, *p; |
147 |
|
148 |
lp = &l1_map[index >> L2_BITS]; |
149 |
p = *lp; |
150 |
if (!p) {
|
151 |
/* allocate if not found */
|
152 |
p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
|
153 |
memset(p, 0, sizeof(PageDesc) * L2_SIZE); |
154 |
*lp = p; |
155 |
} |
156 |
return p + (index & (L2_SIZE - 1)); |
157 |
} |
158 |
|
159 |
static inline PageDesc *page_find(unsigned int index) |
160 |
{ |
161 |
PageDesc *p; |
162 |
|
163 |
p = l1_map[index >> L2_BITS]; |
164 |
if (!p)
|
165 |
return 0; |
166 |
return p + (index & (L2_SIZE - 1)); |
167 |
} |
168 |
|
169 |
#if !defined(CONFIG_USER_ONLY)
|
170 |
static void tlb_protect_code(CPUState *env, target_ulong addr); |
171 |
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); |
172 |
|
173 |
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) |
174 |
{ |
175 |
VirtPageDesc **lp, *p; |
176 |
|
177 |
lp = &l1_virt_map[index >> L2_BITS]; |
178 |
p = *lp; |
179 |
if (!p) {
|
180 |
/* allocate if not found */
|
181 |
p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
|
182 |
memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE); |
183 |
*lp = p; |
184 |
} |
185 |
return p + (index & (L2_SIZE - 1)); |
186 |
} |
187 |
|
188 |
static inline VirtPageDesc *virt_page_find(unsigned int index) |
189 |
{ |
190 |
VirtPageDesc *p; |
191 |
|
192 |
p = l1_virt_map[index >> L2_BITS]; |
193 |
if (!p)
|
194 |
return 0; |
195 |
return p + (index & (L2_SIZE - 1)); |
196 |
} |
197 |
|
198 |
static void virt_page_flush(void) |
199 |
{ |
200 |
int i, j;
|
201 |
VirtPageDesc *p; |
202 |
|
203 |
virt_valid_tag++; |
204 |
|
205 |
if (virt_valid_tag == 0) { |
206 |
virt_valid_tag = 1;
|
207 |
for(i = 0; i < L1_SIZE; i++) { |
208 |
p = l1_virt_map[i]; |
209 |
if (p) {
|
210 |
for(j = 0; j < L2_SIZE; j++) |
211 |
p[j].valid_tag = 0;
|
212 |
} |
213 |
} |
214 |
} |
215 |
} |
216 |
#else
|
217 |
static void virt_page_flush(void) |
218 |
{ |
219 |
} |
220 |
#endif
|
221 |
|
222 |
void cpu_exec_init(void) |
223 |
{ |
224 |
if (!code_gen_ptr) {
|
225 |
code_gen_ptr = code_gen_buffer; |
226 |
page_init(); |
227 |
io_mem_init(); |
228 |
} |
229 |
} |
230 |
|
231 |
static inline void invalidate_page_bitmap(PageDesc *p) |
232 |
{ |
233 |
if (p->code_bitmap) {
|
234 |
qemu_free(p->code_bitmap); |
235 |
p->code_bitmap = NULL;
|
236 |
} |
237 |
p->code_write_count = 0;
|
238 |
} |
239 |
|
240 |
/* set to NULL all the 'first_tb' fields in all PageDescs */
|
241 |
static void page_flush_tb(void) |
242 |
{ |
243 |
int i, j;
|
244 |
PageDesc *p; |
245 |
|
246 |
for(i = 0; i < L1_SIZE; i++) { |
247 |
p = l1_map[i]; |
248 |
if (p) {
|
249 |
for(j = 0; j < L2_SIZE; j++) { |
250 |
p->first_tb = NULL;
|
251 |
invalidate_page_bitmap(p); |
252 |
p++; |
253 |
} |
254 |
} |
255 |
} |
256 |
} |
257 |
|
258 |
/* flush all the translation blocks */
|
259 |
/* XXX: tb_flush is currently not thread safe */
|
260 |
void tb_flush(CPUState *env)
|
261 |
{ |
262 |
int i;
|
263 |
#if defined(DEBUG_FLUSH)
|
264 |
printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
|
265 |
code_gen_ptr - code_gen_buffer, |
266 |
nb_tbs, |
267 |
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
268 |
#endif
|
269 |
nb_tbs = 0;
|
270 |
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) |
271 |
tb_hash[i] = NULL;
|
272 |
virt_page_flush(); |
273 |
|
274 |
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) |
275 |
tb_phys_hash[i] = NULL;
|
276 |
page_flush_tb(); |
277 |
|
278 |
code_gen_ptr = code_gen_buffer; |
279 |
/* XXX: flush processor icache at this point if cache flush is
|
280 |
expensive */
|
281 |
} |
282 |
|
283 |
#ifdef DEBUG_TB_CHECK
|
284 |
|
285 |
static void tb_invalidate_check(unsigned long address) |
286 |
{ |
287 |
TranslationBlock *tb; |
288 |
int i;
|
289 |
address &= TARGET_PAGE_MASK; |
290 |
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { |
291 |
for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { |
292 |
if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
|
293 |
address >= tb->pc + tb->size)) { |
294 |
printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
|
295 |
address, tb->pc, tb->size); |
296 |
} |
297 |
} |
298 |
} |
299 |
} |
300 |
|
301 |
/* verify that all the pages have correct rights for code */
|
302 |
static void tb_page_check(void) |
303 |
{ |
304 |
TranslationBlock *tb; |
305 |
int i, flags1, flags2;
|
306 |
|
307 |
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { |
308 |
for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { |
309 |
flags1 = page_get_flags(tb->pc); |
310 |
flags2 = page_get_flags(tb->pc + tb->size - 1);
|
311 |
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
|
312 |
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
|
313 |
tb->pc, tb->size, flags1, flags2); |
314 |
} |
315 |
} |
316 |
} |
317 |
} |
318 |
|
319 |
void tb_jmp_check(TranslationBlock *tb)
|
320 |
{ |
321 |
TranslationBlock *tb1; |
322 |
unsigned int n1; |
323 |
|
324 |
/* suppress any remaining jumps to this TB */
|
325 |
tb1 = tb->jmp_first; |
326 |
for(;;) {
|
327 |
n1 = (long)tb1 & 3; |
328 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
329 |
if (n1 == 2) |
330 |
break;
|
331 |
tb1 = tb1->jmp_next[n1]; |
332 |
} |
333 |
/* check end of list */
|
334 |
if (tb1 != tb) {
|
335 |
printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); |
336 |
} |
337 |
} |
338 |
|
339 |
#endif
|
340 |
|
341 |
/* invalidate one TB */
|
342 |
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, |
343 |
int next_offset)
|
344 |
{ |
345 |
TranslationBlock *tb1; |
346 |
for(;;) {
|
347 |
tb1 = *ptb; |
348 |
if (tb1 == tb) {
|
349 |
*ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
|
350 |
break;
|
351 |
} |
352 |
ptb = (TranslationBlock **)((char *)tb1 + next_offset);
|
353 |
} |
354 |
} |
355 |
|
356 |
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
357 |
{ |
358 |
TranslationBlock *tb1; |
359 |
unsigned int n1; |
360 |
|
361 |
for(;;) {
|
362 |
tb1 = *ptb; |
363 |
n1 = (long)tb1 & 3; |
364 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
365 |
if (tb1 == tb) {
|
366 |
*ptb = tb1->page_next[n1]; |
367 |
break;
|
368 |
} |
369 |
ptb = &tb1->page_next[n1]; |
370 |
} |
371 |
} |
372 |
|
373 |
static inline void tb_jmp_remove(TranslationBlock *tb, int n) |
374 |
{ |
375 |
TranslationBlock *tb1, **ptb; |
376 |
unsigned int n1; |
377 |
|
378 |
ptb = &tb->jmp_next[n]; |
379 |
tb1 = *ptb; |
380 |
if (tb1) {
|
381 |
/* find tb(n) in circular list */
|
382 |
for(;;) {
|
383 |
tb1 = *ptb; |
384 |
n1 = (long)tb1 & 3; |
385 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
386 |
if (n1 == n && tb1 == tb)
|
387 |
break;
|
388 |
if (n1 == 2) { |
389 |
ptb = &tb1->jmp_first; |
390 |
} else {
|
391 |
ptb = &tb1->jmp_next[n1]; |
392 |
} |
393 |
} |
394 |
/* now we can suppress tb(n) from the list */
|
395 |
*ptb = tb->jmp_next[n]; |
396 |
|
397 |
tb->jmp_next[n] = NULL;
|
398 |
} |
399 |
} |
400 |
|
401 |
/* reset the jump entry 'n' of a TB so that it is not chained to
|
402 |
another TB */
|
403 |
static inline void tb_reset_jump(TranslationBlock *tb, int n) |
404 |
{ |
405 |
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
406 |
} |
407 |
|
408 |
static inline void tb_invalidate(TranslationBlock *tb) |
409 |
{ |
410 |
unsigned int h, n1; |
411 |
TranslationBlock *tb1, *tb2, **ptb; |
412 |
|
413 |
tb_invalidated_flag = 1;
|
414 |
|
415 |
/* remove the TB from the hash list */
|
416 |
h = tb_hash_func(tb->pc); |
417 |
ptb = &tb_hash[h]; |
418 |
for(;;) {
|
419 |
tb1 = *ptb; |
420 |
/* NOTE: the TB is not necessarily linked in the hash. It
|
421 |
indicates that it is not currently used */
|
422 |
if (tb1 == NULL) |
423 |
return;
|
424 |
if (tb1 == tb) {
|
425 |
*ptb = tb1->hash_next; |
426 |
break;
|
427 |
} |
428 |
ptb = &tb1->hash_next; |
429 |
} |
430 |
|
431 |
/* suppress this TB from the two jump lists */
|
432 |
tb_jmp_remove(tb, 0);
|
433 |
tb_jmp_remove(tb, 1);
|
434 |
|
435 |
/* suppress any remaining jumps to this TB */
|
436 |
tb1 = tb->jmp_first; |
437 |
for(;;) {
|
438 |
n1 = (long)tb1 & 3; |
439 |
if (n1 == 2) |
440 |
break;
|
441 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
442 |
tb2 = tb1->jmp_next[n1]; |
443 |
tb_reset_jump(tb1, n1); |
444 |
tb1->jmp_next[n1] = NULL;
|
445 |
tb1 = tb2; |
446 |
} |
447 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
448 |
} |
449 |
|
450 |
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
451 |
{ |
452 |
PageDesc *p; |
453 |
unsigned int h; |
454 |
target_ulong phys_pc; |
455 |
|
456 |
/* remove the TB from the hash list */
|
457 |
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
|
458 |
h = tb_phys_hash_func(phys_pc); |
459 |
tb_remove(&tb_phys_hash[h], tb, |
460 |
offsetof(TranslationBlock, phys_hash_next)); |
461 |
|
462 |
/* remove the TB from the page list */
|
463 |
if (tb->page_addr[0] != page_addr) { |
464 |
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
|
465 |
tb_page_remove(&p->first_tb, tb); |
466 |
invalidate_page_bitmap(p); |
467 |
} |
468 |
if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
469 |
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
|
470 |
tb_page_remove(&p->first_tb, tb); |
471 |
invalidate_page_bitmap(p); |
472 |
} |
473 |
|
474 |
tb_invalidate(tb); |
475 |
} |
476 |
|
477 |
static inline void set_bits(uint8_t *tab, int start, int len) |
478 |
{ |
479 |
int end, mask, end1;
|
480 |
|
481 |
end = start + len; |
482 |
tab += start >> 3;
|
483 |
mask = 0xff << (start & 7); |
484 |
if ((start & ~7) == (end & ~7)) { |
485 |
if (start < end) {
|
486 |
mask &= ~(0xff << (end & 7)); |
487 |
*tab |= mask; |
488 |
} |
489 |
} else {
|
490 |
*tab++ |= mask; |
491 |
start = (start + 8) & ~7; |
492 |
end1 = end & ~7;
|
493 |
while (start < end1) {
|
494 |
*tab++ = 0xff;
|
495 |
start += 8;
|
496 |
} |
497 |
if (start < end) {
|
498 |
mask = ~(0xff << (end & 7)); |
499 |
*tab |= mask; |
500 |
} |
501 |
} |
502 |
} |
503 |
|
504 |
static void build_page_bitmap(PageDesc *p) |
505 |
{ |
506 |
int n, tb_start, tb_end;
|
507 |
TranslationBlock *tb; |
508 |
|
509 |
p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
|
510 |
if (!p->code_bitmap)
|
511 |
return;
|
512 |
memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); |
513 |
|
514 |
tb = p->first_tb; |
515 |
while (tb != NULL) { |
516 |
n = (long)tb & 3; |
517 |
tb = (TranslationBlock *)((long)tb & ~3); |
518 |
/* NOTE: this is subtle as a TB may span two physical pages */
|
519 |
if (n == 0) { |
520 |
/* NOTE: tb_end may be after the end of the page, but
|
521 |
it is not a problem */
|
522 |
tb_start = tb->pc & ~TARGET_PAGE_MASK; |
523 |
tb_end = tb_start + tb->size; |
524 |
if (tb_end > TARGET_PAGE_SIZE)
|
525 |
tb_end = TARGET_PAGE_SIZE; |
526 |
} else {
|
527 |
tb_start = 0;
|
528 |
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); |
529 |
} |
530 |
set_bits(p->code_bitmap, tb_start, tb_end - tb_start); |
531 |
tb = tb->page_next[n]; |
532 |
} |
533 |
} |
534 |
|
535 |
#ifdef TARGET_HAS_PRECISE_SMC
|
536 |
|
537 |
static void tb_gen_code(CPUState *env, |
538 |
target_ulong pc, target_ulong cs_base, int flags,
|
539 |
int cflags)
|
540 |
{ |
541 |
TranslationBlock *tb; |
542 |
uint8_t *tc_ptr; |
543 |
target_ulong phys_pc, phys_page2, virt_page2; |
544 |
int code_gen_size;
|
545 |
|
546 |
phys_pc = get_phys_addr_code(env, (unsigned long)pc); |
547 |
tb = tb_alloc((unsigned long)pc); |
548 |
if (!tb) {
|
549 |
/* flush must be done */
|
550 |
tb_flush(env); |
551 |
/* cannot fail at this point */
|
552 |
tb = tb_alloc((unsigned long)pc); |
553 |
} |
554 |
tc_ptr = code_gen_ptr; |
555 |
tb->tc_ptr = tc_ptr; |
556 |
tb->cs_base = cs_base; |
557 |
tb->flags = flags; |
558 |
tb->cflags = cflags; |
559 |
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); |
560 |
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
561 |
|
562 |
/* check next page if needed */
|
563 |
virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; |
564 |
phys_page2 = -1;
|
565 |
if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { |
566 |
phys_page2 = get_phys_addr_code(env, virt_page2); |
567 |
} |
568 |
tb_link_phys(tb, phys_pc, phys_page2); |
569 |
} |
570 |
#endif
|
571 |
|
572 |
/* invalidate all TBs which intersect with the target physical page
|
573 |
starting in range [start;end[. NOTE: start and end must refer to
|
574 |
the same physical page. 'is_cpu_write_access' should be true if called
|
575 |
from a real cpu write access: the virtual CPU will exit the current
|
576 |
TB if code is modified inside this TB. */
|
577 |
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
|
578 |
int is_cpu_write_access)
|
579 |
{ |
580 |
int n, current_tb_modified, current_tb_not_found, current_flags;
|
581 |
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
|
582 |
CPUState *env = cpu_single_env; |
583 |
#endif
|
584 |
PageDesc *p; |
585 |
TranslationBlock *tb, *tb_next, *current_tb; |
586 |
target_ulong tb_start, tb_end; |
587 |
target_ulong current_pc, current_cs_base; |
588 |
|
589 |
p = page_find(start >> TARGET_PAGE_BITS); |
590 |
if (!p)
|
591 |
return;
|
592 |
if (!p->code_bitmap &&
|
593 |
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && |
594 |
is_cpu_write_access) { |
595 |
/* build code bitmap */
|
596 |
build_page_bitmap(p); |
597 |
} |
598 |
|
599 |
/* we remove all the TBs in the range [start, end[ */
|
600 |
/* XXX: see if in some cases it could be faster to invalidate all the code */
|
601 |
current_tb_not_found = is_cpu_write_access; |
602 |
current_tb_modified = 0;
|
603 |
current_tb = NULL; /* avoid warning */ |
604 |
current_pc = 0; /* avoid warning */ |
605 |
current_cs_base = 0; /* avoid warning */ |
606 |
current_flags = 0; /* avoid warning */ |
607 |
tb = p->first_tb; |
608 |
while (tb != NULL) { |
609 |
n = (long)tb & 3; |
610 |
tb = (TranslationBlock *)((long)tb & ~3); |
611 |
tb_next = tb->page_next[n]; |
612 |
/* NOTE: this is subtle as a TB may span two physical pages */
|
613 |
if (n == 0) { |
614 |
/* NOTE: tb_end may be after the end of the page, but
|
615 |
it is not a problem */
|
616 |
tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
|
617 |
tb_end = tb_start + tb->size; |
618 |
} else {
|
619 |
tb_start = tb->page_addr[1];
|
620 |
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); |
621 |
} |
622 |
if (!(tb_end <= start || tb_start >= end)) {
|
623 |
#ifdef TARGET_HAS_PRECISE_SMC
|
624 |
if (current_tb_not_found) {
|
625 |
current_tb_not_found = 0;
|
626 |
current_tb = NULL;
|
627 |
if (env->mem_write_pc) {
|
628 |
/* now we have a real cpu fault */
|
629 |
current_tb = tb_find_pc(env->mem_write_pc); |
630 |
} |
631 |
} |
632 |
if (current_tb == tb &&
|
633 |
!(current_tb->cflags & CF_SINGLE_INSN)) { |
634 |
/* If we are modifying the current TB, we must stop
|
635 |
its execution. We could be more precise by checking
|
636 |
that the modification is after the current PC, but it
|
637 |
would require a specialized function to partially
|
638 |
restore the CPU state */
|
639 |
|
640 |
current_tb_modified = 1;
|
641 |
cpu_restore_state(current_tb, env, |
642 |
env->mem_write_pc, NULL);
|
643 |
#if defined(TARGET_I386)
|
644 |
current_flags = env->hflags; |
645 |
current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
646 |
current_cs_base = (target_ulong)env->segs[R_CS].base; |
647 |
current_pc = current_cs_base + env->eip; |
648 |
#else
|
649 |
#error unsupported CPU
|
650 |
#endif
|
651 |
} |
652 |
#endif /* TARGET_HAS_PRECISE_SMC */ |
653 |
tb_phys_invalidate(tb, -1);
|
654 |
} |
655 |
tb = tb_next; |
656 |
} |
657 |
#if !defined(CONFIG_USER_ONLY)
|
658 |
/* if no code remaining, no need to continue to use slow writes */
|
659 |
if (!p->first_tb) {
|
660 |
invalidate_page_bitmap(p); |
661 |
if (is_cpu_write_access) {
|
662 |
tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); |
663 |
} |
664 |
} |
665 |
#endif
|
666 |
#ifdef TARGET_HAS_PRECISE_SMC
|
667 |
if (current_tb_modified) {
|
668 |
/* we generate a block containing just the instruction
|
669 |
modifying the memory. It will ensure that it cannot modify
|
670 |
itself */
|
671 |
tb_gen_code(env, current_pc, current_cs_base, current_flags, |
672 |
CF_SINGLE_INSN); |
673 |
cpu_resume_from_signal(env, NULL);
|
674 |
} |
675 |
#endif
|
676 |
} |
677 |
|
678 |
/* len must be <= 8 and start must be a multiple of len */
|
679 |
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) |
680 |
{ |
681 |
PageDesc *p; |
682 |
int offset, b;
|
683 |
#if 0
|
684 |
if (cpu_single_env->cr[0] & CR0_PE_MASK) {
|
685 |
printf("modifying code at 0x%x size=%d EIP=%x\n",
|
686 |
(vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
|
687 |
cpu_single_env->eip);
|
688 |
}
|
689 |
#endif
|
690 |
p = page_find(start >> TARGET_PAGE_BITS); |
691 |
if (!p)
|
692 |
return;
|
693 |
if (p->code_bitmap) {
|
694 |
offset = start & ~TARGET_PAGE_MASK; |
695 |
b = p->code_bitmap[offset >> 3] >> (offset & 7); |
696 |
if (b & ((1 << len) - 1)) |
697 |
goto do_invalidate;
|
698 |
} else {
|
699 |
do_invalidate:
|
700 |
tb_invalidate_phys_page_range(start, start + len, 1);
|
701 |
} |
702 |
} |
703 |
|
704 |
#if !defined(CONFIG_SOFTMMU)
|
705 |
static void tb_invalidate_phys_page(target_ulong addr, |
706 |
unsigned long pc, void *puc) |
707 |
{ |
708 |
int n, current_flags, current_tb_modified;
|
709 |
target_ulong current_pc, current_cs_base; |
710 |
PageDesc *p; |
711 |
TranslationBlock *tb, *current_tb; |
712 |
#ifdef TARGET_HAS_PRECISE_SMC
|
713 |
CPUState *env = cpu_single_env; |
714 |
#endif
|
715 |
|
716 |
addr &= TARGET_PAGE_MASK; |
717 |
p = page_find(addr >> TARGET_PAGE_BITS); |
718 |
if (!p)
|
719 |
return;
|
720 |
tb = p->first_tb; |
721 |
current_tb_modified = 0;
|
722 |
current_tb = NULL;
|
723 |
current_pc = 0; /* avoid warning */ |
724 |
current_cs_base = 0; /* avoid warning */ |
725 |
current_flags = 0; /* avoid warning */ |
726 |
#ifdef TARGET_HAS_PRECISE_SMC
|
727 |
if (tb && pc != 0) { |
728 |
current_tb = tb_find_pc(pc); |
729 |
} |
730 |
#endif
|
731 |
while (tb != NULL) { |
732 |
n = (long)tb & 3; |
733 |
tb = (TranslationBlock *)((long)tb & ~3); |
734 |
#ifdef TARGET_HAS_PRECISE_SMC
|
735 |
if (current_tb == tb &&
|
736 |
!(current_tb->cflags & CF_SINGLE_INSN)) { |
737 |
/* If we are modifying the current TB, we must stop
|
738 |
its execution. We could be more precise by checking
|
739 |
that the modification is after the current PC, but it
|
740 |
would require a specialized function to partially
|
741 |
restore the CPU state */
|
742 |
|
743 |
current_tb_modified = 1;
|
744 |
cpu_restore_state(current_tb, env, pc, puc); |
745 |
#if defined(TARGET_I386)
|
746 |
current_flags = env->hflags; |
747 |
current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
748 |
current_cs_base = (target_ulong)env->segs[R_CS].base; |
749 |
current_pc = current_cs_base + env->eip; |
750 |
#else
|
751 |
#error unsupported CPU
|
752 |
#endif
|
753 |
} |
754 |
#endif /* TARGET_HAS_PRECISE_SMC */ |
755 |
tb_phys_invalidate(tb, addr); |
756 |
tb = tb->page_next[n]; |
757 |
} |
758 |
p->first_tb = NULL;
|
759 |
#ifdef TARGET_HAS_PRECISE_SMC
|
760 |
if (current_tb_modified) {
|
761 |
/* we generate a block containing just the instruction
|
762 |
modifying the memory. It will ensure that it cannot modify
|
763 |
itself */
|
764 |
tb_gen_code(env, current_pc, current_cs_base, current_flags, |
765 |
CF_SINGLE_INSN); |
766 |
cpu_resume_from_signal(env, puc); |
767 |
} |
768 |
#endif
|
769 |
} |
770 |
#endif
|
771 |
|
772 |
/* add the tb in the target page and protect it if necessary */
|
773 |
static inline void tb_alloc_page(TranslationBlock *tb, |
774 |
unsigned int n, unsigned int page_addr) |
775 |
{ |
776 |
PageDesc *p; |
777 |
TranslationBlock *last_first_tb; |
778 |
|
779 |
tb->page_addr[n] = page_addr; |
780 |
p = page_find(page_addr >> TARGET_PAGE_BITS); |
781 |
tb->page_next[n] = p->first_tb; |
782 |
last_first_tb = p->first_tb; |
783 |
p->first_tb = (TranslationBlock *)((long)tb | n);
|
784 |
invalidate_page_bitmap(p); |
785 |
|
786 |
#ifdef TARGET_HAS_SMC
|
787 |
|
788 |
#if defined(CONFIG_USER_ONLY)
|
789 |
if (p->flags & PAGE_WRITE) {
|
790 |
unsigned long host_start, host_end, addr; |
791 |
int prot;
|
792 |
|
793 |
/* force the host page as non writable (writes will have a
|
794 |
page fault + mprotect overhead) */
|
795 |
host_start = page_addr & host_page_mask; |
796 |
host_end = host_start + host_page_size; |
797 |
prot = 0;
|
798 |
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
|
799 |
prot |= page_get_flags(addr); |
800 |
mprotect((void *)host_start, host_page_size,
|
801 |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
802 |
#ifdef DEBUG_TB_INVALIDATE
|
803 |
printf("protecting code page: 0x%08lx\n",
|
804 |
host_start); |
805 |
#endif
|
806 |
p->flags &= ~PAGE_WRITE; |
807 |
} |
808 |
#else
|
809 |
/* if some code is already present, then the pages are already
|
810 |
protected. So we handle the case where only the first TB is
|
811 |
allocated in a physical page */
|
812 |
if (!last_first_tb) {
|
813 |
target_ulong virt_addr; |
814 |
|
815 |
virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); |
816 |
tlb_protect_code(cpu_single_env, virt_addr); |
817 |
} |
818 |
#endif
|
819 |
|
820 |
#endif /* TARGET_HAS_SMC */ |
821 |
} |
822 |
|
823 |
/* Allocate a new translation block. Flush the translation buffer if
|
824 |
too many translation blocks or too much generated code. */
|
825 |
TranslationBlock *tb_alloc(unsigned long pc) |
826 |
{ |
827 |
TranslationBlock *tb; |
828 |
|
829 |
if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
|
830 |
(code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) |
831 |
return NULL; |
832 |
tb = &tbs[nb_tbs++]; |
833 |
tb->pc = pc; |
834 |
tb->cflags = 0;
|
835 |
return tb;
|
836 |
} |
837 |
|
838 |
/* add a new TB and link it to the physical page tables. phys_page2 is
|
839 |
(-1) to indicate that only one page contains the TB. */
|
840 |
void tb_link_phys(TranslationBlock *tb,
|
841 |
target_ulong phys_pc, target_ulong phys_page2) |
842 |
{ |
843 |
unsigned int h; |
844 |
TranslationBlock **ptb; |
845 |
|
846 |
/* add in the physical hash table */
|
847 |
h = tb_phys_hash_func(phys_pc); |
848 |
ptb = &tb_phys_hash[h]; |
849 |
tb->phys_hash_next = *ptb; |
850 |
*ptb = tb; |
851 |
|
852 |
/* add in the page list */
|
853 |
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
|
854 |
if (phys_page2 != -1) |
855 |
tb_alloc_page(tb, 1, phys_page2);
|
856 |
else
|
857 |
tb->page_addr[1] = -1; |
858 |
#ifdef DEBUG_TB_CHECK
|
859 |
tb_page_check(); |
860 |
#endif
|
861 |
} |
862 |
|
863 |
/* link the tb with the other TBs */
|
864 |
void tb_link(TranslationBlock *tb)
|
865 |
{ |
866 |
#if !defined(CONFIG_USER_ONLY)
|
867 |
{ |
868 |
VirtPageDesc *vp; |
869 |
target_ulong addr; |
870 |
|
871 |
/* save the code memory mappings (needed to invalidate the code) */
|
872 |
addr = tb->pc & TARGET_PAGE_MASK; |
873 |
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); |
874 |
#ifdef DEBUG_TLB_CHECK
|
875 |
if (vp->valid_tag == virt_valid_tag &&
|
876 |
vp->phys_addr != tb->page_addr[0]) {
|
877 |
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
|
878 |
addr, tb->page_addr[0], vp->phys_addr);
|
879 |
} |
880 |
#endif
|
881 |
vp->phys_addr = tb->page_addr[0];
|
882 |
if (vp->valid_tag != virt_valid_tag) {
|
883 |
vp->valid_tag = virt_valid_tag; |
884 |
#if !defined(CONFIG_SOFTMMU)
|
885 |
vp->prot = 0;
|
886 |
#endif
|
887 |
} |
888 |
|
889 |
if (tb->page_addr[1] != -1) { |
890 |
addr += TARGET_PAGE_SIZE; |
891 |
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); |
892 |
#ifdef DEBUG_TLB_CHECK
|
893 |
if (vp->valid_tag == virt_valid_tag &&
|
894 |
vp->phys_addr != tb->page_addr[1]) {
|
895 |
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
|
896 |
addr, tb->page_addr[1], vp->phys_addr);
|
897 |
} |
898 |
#endif
|
899 |
vp->phys_addr = tb->page_addr[1];
|
900 |
if (vp->valid_tag != virt_valid_tag) {
|
901 |
vp->valid_tag = virt_valid_tag; |
902 |
#if !defined(CONFIG_SOFTMMU)
|
903 |
vp->prot = 0;
|
904 |
#endif
|
905 |
} |
906 |
} |
907 |
} |
908 |
#endif
|
909 |
|
910 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
911 |
tb->jmp_next[0] = NULL; |
912 |
tb->jmp_next[1] = NULL; |
913 |
#ifdef USE_CODE_COPY
|
914 |
tb->cflags &= ~CF_FP_USED; |
915 |
if (tb->cflags & CF_TB_FP_USED)
|
916 |
tb->cflags |= CF_FP_USED; |
917 |
#endif
|
918 |
|
919 |
/* init original jump addresses */
|
920 |
if (tb->tb_next_offset[0] != 0xffff) |
921 |
tb_reset_jump(tb, 0);
|
922 |
if (tb->tb_next_offset[1] != 0xffff) |
923 |
tb_reset_jump(tb, 1);
|
924 |
} |
925 |
|
926 |
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
|
927 |
tb[1].tc_ptr. Return NULL if not found */
|
928 |
TranslationBlock *tb_find_pc(unsigned long tc_ptr) |
929 |
{ |
930 |
int m_min, m_max, m;
|
931 |
unsigned long v; |
932 |
TranslationBlock *tb; |
933 |
|
934 |
if (nb_tbs <= 0) |
935 |
return NULL; |
936 |
if (tc_ptr < (unsigned long)code_gen_buffer || |
937 |
tc_ptr >= (unsigned long)code_gen_ptr) |
938 |
return NULL; |
939 |
/* binary search (cf Knuth) */
|
940 |
m_min = 0;
|
941 |
m_max = nb_tbs - 1;
|
942 |
while (m_min <= m_max) {
|
943 |
m = (m_min + m_max) >> 1;
|
944 |
tb = &tbs[m]; |
945 |
v = (unsigned long)tb->tc_ptr; |
946 |
if (v == tc_ptr)
|
947 |
return tb;
|
948 |
else if (tc_ptr < v) { |
949 |
m_max = m - 1;
|
950 |
} else {
|
951 |
m_min = m + 1;
|
952 |
} |
953 |
} |
954 |
return &tbs[m_max];
|
955 |
} |
956 |
|
957 |
static void tb_reset_jump_recursive(TranslationBlock *tb); |
958 |
|
959 |
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) |
960 |
{ |
961 |
TranslationBlock *tb1, *tb_next, **ptb; |
962 |
unsigned int n1; |
963 |
|
964 |
tb1 = tb->jmp_next[n]; |
965 |
if (tb1 != NULL) { |
966 |
/* find head of list */
|
967 |
for(;;) {
|
968 |
n1 = (long)tb1 & 3; |
969 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
970 |
if (n1 == 2) |
971 |
break;
|
972 |
tb1 = tb1->jmp_next[n1]; |
973 |
} |
974 |
/* we are now sure now that tb jumps to tb1 */
|
975 |
tb_next = tb1; |
976 |
|
977 |
/* remove tb from the jmp_first list */
|
978 |
ptb = &tb_next->jmp_first; |
979 |
for(;;) {
|
980 |
tb1 = *ptb; |
981 |
n1 = (long)tb1 & 3; |
982 |
tb1 = (TranslationBlock *)((long)tb1 & ~3); |
983 |
if (n1 == n && tb1 == tb)
|
984 |
break;
|
985 |
ptb = &tb1->jmp_next[n1]; |
986 |
} |
987 |
*ptb = tb->jmp_next[n]; |
988 |
tb->jmp_next[n] = NULL;
|
989 |
|
990 |
/* suppress the jump to next tb in generated code */
|
991 |
tb_reset_jump(tb, n); |
992 |
|
993 |
/* suppress jumps in the tb on which we could have jumped */
|
994 |
tb_reset_jump_recursive(tb_next); |
995 |
} |
996 |
} |
997 |
|
998 |
static void tb_reset_jump_recursive(TranslationBlock *tb) |
999 |
{ |
1000 |
tb_reset_jump_recursive2(tb, 0);
|
1001 |
tb_reset_jump_recursive2(tb, 1);
|
1002 |
} |
1003 |
|
1004 |
static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
1005 |
{ |
1006 |
target_ulong phys_addr; |
1007 |
|
1008 |
phys_addr = cpu_get_phys_page_debug(env, pc); |
1009 |
tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0); |
1010 |
} |
1011 |
|
1012 |
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
|
1013 |
breakpoint is reached */
|
1014 |
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
|
1015 |
{ |
1016 |
#if defined(TARGET_I386) || defined(TARGET_PPC)
|
1017 |
int i;
|
1018 |
|
1019 |
for(i = 0; i < env->nb_breakpoints; i++) { |
1020 |
if (env->breakpoints[i] == pc)
|
1021 |
return 0; |
1022 |
} |
1023 |
|
1024 |
if (env->nb_breakpoints >= MAX_BREAKPOINTS)
|
1025 |
return -1; |
1026 |
env->breakpoints[env->nb_breakpoints++] = pc; |
1027 |
|
1028 |
breakpoint_invalidate(env, pc); |
1029 |
return 0; |
1030 |
#else
|
1031 |
return -1; |
1032 |
#endif
|
1033 |
} |
1034 |
|
1035 |
/* remove a breakpoint */
|
1036 |
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
|
1037 |
{ |
1038 |
#if defined(TARGET_I386) || defined(TARGET_PPC)
|
1039 |
int i;
|
1040 |
for(i = 0; i < env->nb_breakpoints; i++) { |
1041 |
if (env->breakpoints[i] == pc)
|
1042 |
goto found;
|
1043 |
} |
1044 |
return -1; |
1045 |
found:
|
1046 |
memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
|
1047 |
(env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0])); |
1048 |
env->nb_breakpoints--; |
1049 |
|
1050 |
breakpoint_invalidate(env, pc); |
1051 |
return 0; |
1052 |
#else
|
1053 |
return -1; |
1054 |
#endif
|
1055 |
} |
1056 |
|
1057 |
/* enable or disable single step mode. EXCP_DEBUG is returned by the
|
1058 |
CPU loop after each instruction */
|
1059 |
void cpu_single_step(CPUState *env, int enabled) |
1060 |
{ |
1061 |
#if defined(TARGET_I386) || defined(TARGET_PPC)
|
1062 |
if (env->singlestep_enabled != enabled) {
|
1063 |
env->singlestep_enabled = enabled; |
1064 |
/* must flush all the translated code to avoid inconsistancies */
|
1065 |
/* XXX: only flush what is necessary */
|
1066 |
tb_flush(env); |
1067 |
} |
1068 |
#endif
|
1069 |
} |
1070 |
|
1071 |
/* enable or disable low levels log */
|
1072 |
void cpu_set_log(int log_flags) |
1073 |
{ |
1074 |
loglevel = log_flags; |
1075 |
if (loglevel && !logfile) {
|
1076 |
logfile = fopen(logfilename, "w");
|
1077 |
if (!logfile) {
|
1078 |
perror(logfilename); |
1079 |
_exit(1);
|
1080 |
} |
1081 |
#if !defined(CONFIG_SOFTMMU)
|
1082 |
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
|
1083 |
{ |
1084 |
static uint8_t logfile_buf[4096]; |
1085 |
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
|
1086 |
} |
1087 |
#else
|
1088 |
setvbuf(logfile, NULL, _IOLBF, 0); |
1089 |
#endif
|
1090 |
} |
1091 |
} |
1092 |
|
1093 |
void cpu_set_log_filename(const char *filename) |
1094 |
{ |
1095 |
logfilename = strdup(filename); |
1096 |
} |
1097 |
|
1098 |
/* mask must never be zero, except for A20 change call */
|
1099 |
void cpu_interrupt(CPUState *env, int mask) |
1100 |
{ |
1101 |
TranslationBlock *tb; |
1102 |
static int interrupt_lock; |
1103 |
|
1104 |
env->interrupt_request |= mask; |
1105 |
/* if the cpu is currently executing code, we must unlink it and
|
1106 |
all the potentially executing TB */
|
1107 |
tb = env->current_tb; |
1108 |
if (tb && !testandset(&interrupt_lock)) {
|
1109 |
env->current_tb = NULL;
|
1110 |
tb_reset_jump_recursive(tb); |
1111 |
interrupt_lock = 0;
|
1112 |
} |
1113 |
} |
1114 |
|
1115 |
CPULogItem cpu_log_items[] = { |
1116 |
{ CPU_LOG_TB_OUT_ASM, "out_asm",
|
1117 |
"show generated host assembly code for each compiled TB" },
|
1118 |
{ CPU_LOG_TB_IN_ASM, "in_asm",
|
1119 |
"show target assembly code for each compiled TB" },
|
1120 |
{ CPU_LOG_TB_OP, "op",
|
1121 |
"show micro ops for each compiled TB (only usable if 'in_asm' used)" },
|
1122 |
#ifdef TARGET_I386
|
1123 |
{ CPU_LOG_TB_OP_OPT, "op_opt",
|
1124 |
"show micro ops after optimization for each compiled TB" },
|
1125 |
#endif
|
1126 |
{ CPU_LOG_INT, "int",
|
1127 |
"show interrupts/exceptions in short format" },
|
1128 |
{ CPU_LOG_EXEC, "exec",
|
1129 |
"show trace before each executed TB (lots of logs)" },
|
1130 |
#ifdef TARGET_I386
|
1131 |
{ CPU_LOG_PCALL, "pcall",
|
1132 |
"show protected mode far calls/returns/exceptions" },
|
1133 |
#endif
|
1134 |
{ CPU_LOG_IOPORT, "ioport",
|
1135 |
"show all i/o ports accesses" },
|
1136 |
{ 0, NULL, NULL }, |
1137 |
}; |
1138 |
|
1139 |
static int cmp1(const char *s1, int n, const char *s2) |
1140 |
{ |
1141 |
if (strlen(s2) != n)
|
1142 |
return 0; |
1143 |
return memcmp(s1, s2, n) == 0; |
1144 |
} |
1145 |
|
1146 |
/* takes a comma separated list of log masks. Return 0 if error. */
|
1147 |
int cpu_str_to_log_mask(const char *str) |
1148 |
{ |
1149 |
CPULogItem *item; |
1150 |
int mask;
|
1151 |
const char *p, *p1; |
1152 |
|
1153 |
p = str; |
1154 |
mask = 0;
|
1155 |
for(;;) {
|
1156 |
p1 = strchr(p, ',');
|
1157 |
if (!p1)
|
1158 |
p1 = p + strlen(p); |
1159 |
for(item = cpu_log_items; item->mask != 0; item++) { |
1160 |
if (cmp1(p, p1 - p, item->name))
|
1161 |
goto found;
|
1162 |
} |
1163 |
return 0; |
1164 |
found:
|
1165 |
mask |= item->mask; |
1166 |
if (*p1 != ',') |
1167 |
break;
|
1168 |
p = p1 + 1;
|
1169 |
} |
1170 |
return mask;
|
1171 |
} |
1172 |
|
1173 |
void cpu_abort(CPUState *env, const char *fmt, ...) |
1174 |
{ |
1175 |
va_list ap; |
1176 |
|
1177 |
va_start(ap, fmt); |
1178 |
fprintf(stderr, "qemu: fatal: ");
|
1179 |
vfprintf(stderr, fmt, ap); |
1180 |
fprintf(stderr, "\n");
|
1181 |
#ifdef TARGET_I386
|
1182 |
cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP); |
1183 |
#endif
|
1184 |
va_end(ap); |
1185 |
abort(); |
1186 |
} |
1187 |
|
1188 |
#if !defined(CONFIG_USER_ONLY)
|
1189 |
|
1190 |
/* NOTE: if flush_global is true, also flush global entries (not
|
1191 |
implemented yet) */
|
1192 |
void tlb_flush(CPUState *env, int flush_global) |
1193 |
{ |
1194 |
int i;
|
1195 |
|
1196 |
#if defined(DEBUG_TLB)
|
1197 |
printf("tlb_flush:\n");
|
1198 |
#endif
|
1199 |
/* must reset current TB so that interrupts cannot modify the
|
1200 |
links while we are modifying them */
|
1201 |
env->current_tb = NULL;
|
1202 |
|
1203 |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
1204 |
env->tlb_read[0][i].address = -1; |
1205 |
env->tlb_write[0][i].address = -1; |
1206 |
env->tlb_read[1][i].address = -1; |
1207 |
env->tlb_write[1][i].address = -1; |
1208 |
} |
1209 |
|
1210 |
virt_page_flush(); |
1211 |
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) |
1212 |
tb_hash[i] = NULL;
|
1213 |
|
1214 |
#if !defined(CONFIG_SOFTMMU)
|
1215 |
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
|
1216 |
#endif
|
1217 |
} |
1218 |
|
1219 |
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr) |
1220 |
{ |
1221 |
if (addr == (tlb_entry->address &
|
1222 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) |
1223 |
tlb_entry->address = -1;
|
1224 |
} |
1225 |
|
1226 |
void tlb_flush_page(CPUState *env, target_ulong addr)
|
1227 |
{ |
1228 |
int i, n;
|
1229 |
VirtPageDesc *vp; |
1230 |
PageDesc *p; |
1231 |
TranslationBlock *tb; |
1232 |
|
1233 |
#if defined(DEBUG_TLB)
|
1234 |
printf("tlb_flush_page: 0x%08x\n", addr);
|
1235 |
#endif
|
1236 |
/* must reset current TB so that interrupts cannot modify the
|
1237 |
links while we are modifying them */
|
1238 |
env->current_tb = NULL;
|
1239 |
|
1240 |
addr &= TARGET_PAGE_MASK; |
1241 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1242 |
tlb_flush_entry(&env->tlb_read[0][i], addr);
|
1243 |
tlb_flush_entry(&env->tlb_write[0][i], addr);
|
1244 |
tlb_flush_entry(&env->tlb_read[1][i], addr);
|
1245 |
tlb_flush_entry(&env->tlb_write[1][i], addr);
|
1246 |
|
1247 |
/* remove from the virtual pc hash table all the TB at this
|
1248 |
virtual address */
|
1249 |
|
1250 |
vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
1251 |
if (vp && vp->valid_tag == virt_valid_tag) {
|
1252 |
p = page_find(vp->phys_addr >> TARGET_PAGE_BITS); |
1253 |
if (p) {
|
1254 |
/* we remove all the links to the TBs in this virtual page */
|
1255 |
tb = p->first_tb; |
1256 |
while (tb != NULL) { |
1257 |
n = (long)tb & 3; |
1258 |
tb = (TranslationBlock *)((long)tb & ~3); |
1259 |
if ((tb->pc & TARGET_PAGE_MASK) == addr ||
|
1260 |
((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
|
1261 |
tb_invalidate(tb); |
1262 |
} |
1263 |
tb = tb->page_next[n]; |
1264 |
} |
1265 |
} |
1266 |
vp->valid_tag = 0;
|
1267 |
} |
1268 |
|
1269 |
#if !defined(CONFIG_SOFTMMU)
|
1270 |
if (addr < MMAP_AREA_END)
|
1271 |
munmap((void *)addr, TARGET_PAGE_SIZE);
|
1272 |
#endif
|
1273 |
} |
1274 |
|
1275 |
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) |
1276 |
{ |
1277 |
if (addr == (tlb_entry->address &
|
1278 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) && |
1279 |
(tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE && |
1280 |
(tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) { |
1281 |
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE; |
1282 |
} |
1283 |
} |
1284 |
|
1285 |
/* update the TLBs so that writes to code in the virtual page 'addr'
|
1286 |
can be detected */
|
1287 |
static void tlb_protect_code(CPUState *env, target_ulong addr) |
1288 |
{ |
1289 |
int i;
|
1290 |
|
1291 |
addr &= TARGET_PAGE_MASK; |
1292 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1293 |
tlb_protect_code1(&env->tlb_write[0][i], addr);
|
1294 |
tlb_protect_code1(&env->tlb_write[1][i], addr);
|
1295 |
#if !defined(CONFIG_SOFTMMU)
|
1296 |
/* NOTE: as we generated the code for this page, it is already at
|
1297 |
least readable */
|
1298 |
if (addr < MMAP_AREA_END)
|
1299 |
mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
|
1300 |
#endif
|
1301 |
} |
1302 |
|
1303 |
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, |
1304 |
unsigned long phys_addr) |
1305 |
{ |
1306 |
if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
|
1307 |
((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) { |
1308 |
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
1309 |
} |
1310 |
} |
1311 |
|
1312 |
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
1313 |
tested self modifying code */
|
1314 |
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr) |
1315 |
{ |
1316 |
int i;
|
1317 |
|
1318 |
phys_addr &= TARGET_PAGE_MASK; |
1319 |
phys_addr += (long)phys_ram_base;
|
1320 |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1321 |
tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
|
1322 |
tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
|
1323 |
} |
1324 |
|
1325 |
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
1326 |
unsigned long start, unsigned long length) |
1327 |
{ |
1328 |
unsigned long addr; |
1329 |
if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
|
1330 |
addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; |
1331 |
if ((addr - start) < length) {
|
1332 |
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
1333 |
} |
1334 |
} |
1335 |
} |
1336 |
|
1337 |
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
|
1338 |
{ |
1339 |
CPUState *env; |
1340 |
unsigned long length, start1; |
1341 |
int i;
|
1342 |
|
1343 |
start &= TARGET_PAGE_MASK; |
1344 |
end = TARGET_PAGE_ALIGN(end); |
1345 |
|
1346 |
length = end - start; |
1347 |
if (length == 0) |
1348 |
return;
|
1349 |
memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
|
1350 |
|
1351 |
env = cpu_single_env; |
1352 |
/* we modify the TLB cache so that the dirty bit will be set again
|
1353 |
when accessing the range */
|
1354 |
start1 = start + (unsigned long)phys_ram_base; |
1355 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1356 |
tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
|
1357 |
for(i = 0; i < CPU_TLB_SIZE; i++) |
1358 |
tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
|
1359 |
|
1360 |
#if !defined(CONFIG_SOFTMMU)
|
1361 |
/* XXX: this is expensive */
|
1362 |
{ |
1363 |
VirtPageDesc *p; |
1364 |
int j;
|
1365 |
target_ulong addr; |
1366 |
|
1367 |
for(i = 0; i < L1_SIZE; i++) { |
1368 |
p = l1_virt_map[i]; |
1369 |
if (p) {
|
1370 |
addr = i << (TARGET_PAGE_BITS + L2_BITS); |
1371 |
for(j = 0; j < L2_SIZE; j++) { |
1372 |
if (p->valid_tag == virt_valid_tag &&
|
1373 |
p->phys_addr >= start && p->phys_addr < end && |
1374 |
(p->prot & PROT_WRITE)) { |
1375 |
if (addr < MMAP_AREA_END) {
|
1376 |
mprotect((void *)addr, TARGET_PAGE_SIZE,
|
1377 |
p->prot & ~PROT_WRITE); |
1378 |
} |
1379 |
} |
1380 |
addr += TARGET_PAGE_SIZE; |
1381 |
p++; |
1382 |
} |
1383 |
} |
1384 |
} |
1385 |
} |
1386 |
#endif
|
1387 |
} |
1388 |
|
1389 |
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
1390 |
unsigned long start) |
1391 |
{ |
1392 |
unsigned long addr; |
1393 |
if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
|
1394 |
addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; |
1395 |
if (addr == start) {
|
1396 |
tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM; |
1397 |
} |
1398 |
} |
1399 |
} |
1400 |
|
1401 |
/* update the TLB corresponding to virtual page vaddr and phys addr
|
1402 |
addr so that it is no longer dirty */
|
1403 |
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) |
1404 |
{ |
1405 |
CPUState *env = cpu_single_env; |
1406 |
int i;
|
1407 |
|
1408 |
phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1; |
1409 |
|
1410 |
addr &= TARGET_PAGE_MASK; |
1411 |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1412 |
tlb_set_dirty1(&env->tlb_write[0][i], addr);
|
1413 |
tlb_set_dirty1(&env->tlb_write[1][i], addr);
|
1414 |
} |
1415 |
|
1416 |
/* add a new TLB entry. At most one entry for a given virtual address
|
1417 |
is permitted. Return 0 if OK or 2 if the page could not be mapped
|
1418 |
(can only happen in non SOFTMMU mode for I/O pages or pages
|
1419 |
conflicting with the host address space). */
|
1420 |
int tlb_set_page(CPUState *env, target_ulong vaddr,
|
1421 |
target_phys_addr_t paddr, int prot,
|
1422 |
int is_user, int is_softmmu) |
1423 |
{ |
1424 |
PageDesc *p; |
1425 |
unsigned long pd; |
1426 |
TranslationBlock *first_tb; |
1427 |
unsigned int index; |
1428 |
target_ulong address; |
1429 |
unsigned long addend; |
1430 |
int ret;
|
1431 |
|
1432 |
p = page_find(paddr >> TARGET_PAGE_BITS); |
1433 |
if (!p) {
|
1434 |
pd = IO_MEM_UNASSIGNED; |
1435 |
first_tb = NULL;
|
1436 |
} else {
|
1437 |
pd = p->phys_offset; |
1438 |
first_tb = p->first_tb; |
1439 |
} |
1440 |
#if defined(DEBUG_TLB)
|
1441 |
printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
|
1442 |
vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
|
1443 |
#endif
|
1444 |
|
1445 |
ret = 0;
|
1446 |
#if !defined(CONFIG_SOFTMMU)
|
1447 |
if (is_softmmu)
|
1448 |
#endif
|
1449 |
{ |
1450 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
|
1451 |
/* IO memory case */
|
1452 |
address = vaddr | pd; |
1453 |
addend = paddr; |
1454 |
} else {
|
1455 |
/* standard memory */
|
1456 |
address = vaddr; |
1457 |
addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); |
1458 |
} |
1459 |
|
1460 |
index = (vaddr >> 12) & (CPU_TLB_SIZE - 1); |
1461 |
addend -= vaddr; |
1462 |
if (prot & PAGE_READ) {
|
1463 |
env->tlb_read[is_user][index].address = address; |
1464 |
env->tlb_read[is_user][index].addend = addend; |
1465 |
} else {
|
1466 |
env->tlb_read[is_user][index].address = -1;
|
1467 |
env->tlb_read[is_user][index].addend = -1;
|
1468 |
} |
1469 |
if (prot & PAGE_WRITE) {
|
1470 |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
|
1471 |
/* ROM: access is ignored (same as unassigned) */
|
1472 |
env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; |
1473 |
env->tlb_write[is_user][index].addend = addend; |
1474 |
} else
|
1475 |
/* XXX: the PowerPC code seems not ready to handle
|
1476 |
self modifying code with DCBI */
|
1477 |
#if defined(TARGET_HAS_SMC) || 1 |
1478 |
if (first_tb) {
|
1479 |
/* if code is present, we use a specific memory
|
1480 |
handler. It works only for physical memory access */
|
1481 |
env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE; |
1482 |
env->tlb_write[is_user][index].addend = addend; |
1483 |
} else
|
1484 |
#endif
|
1485 |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
|
1486 |
!cpu_physical_memory_is_dirty(pd)) { |
1487 |
env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; |
1488 |
env->tlb_write[is_user][index].addend = addend; |
1489 |
} else {
|
1490 |
env->tlb_write[is_user][index].address = address; |
1491 |
env->tlb_write[is_user][index].addend = addend; |
1492 |
} |
1493 |
} else {
|
1494 |
env->tlb_write[is_user][index].address = -1;
|
1495 |
env->tlb_write[is_user][index].addend = -1;
|
1496 |
} |
1497 |
} |
1498 |
#if !defined(CONFIG_SOFTMMU)
|
1499 |
else {
|
1500 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
|
1501 |
/* IO access: no mapping is done as it will be handled by the
|
1502 |
soft MMU */
|
1503 |
if (!(env->hflags & HF_SOFTMMU_MASK))
|
1504 |
ret = 2;
|
1505 |
} else {
|
1506 |
void *map_addr;
|
1507 |
|
1508 |
if (vaddr >= MMAP_AREA_END) {
|
1509 |
ret = 2;
|
1510 |
} else {
|
1511 |
if (prot & PROT_WRITE) {
|
1512 |
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
|
1513 |
#if defined(TARGET_HAS_SMC) || 1 |
1514 |
first_tb || |
1515 |
#endif
|
1516 |
((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1517 |
!cpu_physical_memory_is_dirty(pd))) { |
1518 |
/* ROM: we do as if code was inside */
|
1519 |
/* if code is present, we only map as read only and save the
|
1520 |
original mapping */
|
1521 |
VirtPageDesc *vp; |
1522 |
|
1523 |
vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS); |
1524 |
vp->phys_addr = pd; |
1525 |
vp->prot = prot; |
1526 |
vp->valid_tag = virt_valid_tag; |
1527 |
prot &= ~PAGE_WRITE; |
1528 |
} |
1529 |
} |
1530 |
map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
|
1531 |
MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); |
1532 |
if (map_addr == MAP_FAILED) {
|
1533 |
cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
|
1534 |
paddr, vaddr); |
1535 |
} |
1536 |
} |
1537 |
} |
1538 |
} |
1539 |
#endif
|
1540 |
return ret;
|
1541 |
} |
1542 |
|
1543 |
/* called from signal handler: invalidate the code and unprotect the
|
1544 |
page. Return TRUE if the fault was succesfully handled. */
|
1545 |
int page_unprotect(unsigned long addr, unsigned long pc, void *puc) |
1546 |
{ |
1547 |
#if !defined(CONFIG_SOFTMMU)
|
1548 |
VirtPageDesc *vp; |
1549 |
|
1550 |
#if defined(DEBUG_TLB)
|
1551 |
printf("page_unprotect: addr=0x%08x\n", addr);
|
1552 |
#endif
|
1553 |
addr &= TARGET_PAGE_MASK; |
1554 |
|
1555 |
/* if it is not mapped, no need to worry here */
|
1556 |
if (addr >= MMAP_AREA_END)
|
1557 |
return 0; |
1558 |
vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
1559 |
if (!vp)
|
1560 |
return 0; |
1561 |
/* NOTE: in this case, validate_tag is _not_ tested as it
|
1562 |
validates only the code TLB */
|
1563 |
if (vp->valid_tag != virt_valid_tag)
|
1564 |
return 0; |
1565 |
if (!(vp->prot & PAGE_WRITE))
|
1566 |
return 0; |
1567 |
#if defined(DEBUG_TLB)
|
1568 |
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
|
1569 |
addr, vp->phys_addr, vp->prot); |
1570 |
#endif
|
1571 |
if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) |
1572 |
cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
|
1573 |
(unsigned long)addr, vp->prot); |
1574 |
/* set the dirty bit */
|
1575 |
phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
|
1576 |
/* flush the code inside */
|
1577 |
tb_invalidate_phys_page(vp->phys_addr, pc, puc); |
1578 |
return 1; |
1579 |
#else
|
1580 |
return 0; |
1581 |
#endif
|
1582 |
} |
1583 |
|
1584 |
#else
|
1585 |
|
1586 |
void tlb_flush(CPUState *env, int flush_global) |
1587 |
{ |
1588 |
} |
1589 |
|
1590 |
void tlb_flush_page(CPUState *env, target_ulong addr)
|
1591 |
{ |
1592 |
} |
1593 |
|
1594 |
int tlb_set_page(CPUState *env, target_ulong vaddr,
|
1595 |
target_phys_addr_t paddr, int prot,
|
1596 |
int is_user, int is_softmmu) |
1597 |
{ |
1598 |
return 0; |
1599 |
} |
1600 |
|
1601 |
/* dump memory mappings */
|
1602 |
void page_dump(FILE *f)
|
1603 |
{ |
1604 |
unsigned long start, end; |
1605 |
int i, j, prot, prot1;
|
1606 |
PageDesc *p; |
1607 |
|
1608 |
fprintf(f, "%-8s %-8s %-8s %s\n",
|
1609 |
"start", "end", "size", "prot"); |
1610 |
start = -1;
|
1611 |
end = -1;
|
1612 |
prot = 0;
|
1613 |
for(i = 0; i <= L1_SIZE; i++) { |
1614 |
if (i < L1_SIZE)
|
1615 |
p = l1_map[i]; |
1616 |
else
|
1617 |
p = NULL;
|
1618 |
for(j = 0;j < L2_SIZE; j++) { |
1619 |
if (!p)
|
1620 |
prot1 = 0;
|
1621 |
else
|
1622 |
prot1 = p[j].flags; |
1623 |
if (prot1 != prot) {
|
1624 |
end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
|
1625 |
if (start != -1) { |
1626 |
fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
|
1627 |
start, end, end - start, |
1628 |
prot & PAGE_READ ? 'r' : '-', |
1629 |
prot & PAGE_WRITE ? 'w' : '-', |
1630 |
prot & PAGE_EXEC ? 'x' : '-'); |
1631 |
} |
1632 |
if (prot1 != 0) |
1633 |
start = end; |
1634 |
else
|
1635 |
start = -1;
|
1636 |
prot = prot1; |
1637 |
} |
1638 |
if (!p)
|
1639 |
break;
|
1640 |
} |
1641 |
} |
1642 |
} |
1643 |
|
1644 |
int page_get_flags(unsigned long address) |
1645 |
{ |
1646 |
PageDesc *p; |
1647 |
|
1648 |
p = page_find(address >> TARGET_PAGE_BITS); |
1649 |
if (!p)
|
1650 |
return 0; |
1651 |
return p->flags;
|
1652 |
} |
1653 |
|
1654 |
/* modify the flags of a page and invalidate the code if
|
1655 |
necessary. The flag PAGE_WRITE_ORG is positionned automatically
|
1656 |
depending on PAGE_WRITE */
|
1657 |
void page_set_flags(unsigned long start, unsigned long end, int flags) |
1658 |
{ |
1659 |
PageDesc *p; |
1660 |
unsigned long addr; |
1661 |
|
1662 |
start = start & TARGET_PAGE_MASK; |
1663 |
end = TARGET_PAGE_ALIGN(end); |
1664 |
if (flags & PAGE_WRITE)
|
1665 |
flags |= PAGE_WRITE_ORG; |
1666 |
spin_lock(&tb_lock); |
1667 |
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
1668 |
p = page_find_alloc(addr >> TARGET_PAGE_BITS); |
1669 |
/* if the write protection is set, then we invalidate the code
|
1670 |
inside */
|
1671 |
if (!(p->flags & PAGE_WRITE) &&
|
1672 |
(flags & PAGE_WRITE) && |
1673 |
p->first_tb) { |
1674 |
tb_invalidate_phys_page(addr, 0, NULL); |
1675 |
} |
1676 |
p->flags = flags; |
1677 |
} |
1678 |
spin_unlock(&tb_lock); |
1679 |
} |
1680 |
|
1681 |
/* called from signal handler: invalidate the code and unprotect the
|
1682 |
page. Return TRUE if the fault was succesfully handled. */
|
1683 |
int page_unprotect(unsigned long address, unsigned long pc, void *puc) |
1684 |
{ |
1685 |
unsigned int page_index, prot, pindex; |
1686 |
PageDesc *p, *p1; |
1687 |
unsigned long host_start, host_end, addr; |
1688 |
|
1689 |
host_start = address & host_page_mask; |
1690 |
page_index = host_start >> TARGET_PAGE_BITS; |
1691 |
p1 = page_find(page_index); |
1692 |
if (!p1)
|
1693 |
return 0; |
1694 |
host_end = host_start + host_page_size; |
1695 |
p = p1; |
1696 |
prot = 0;
|
1697 |
for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
|
1698 |
prot |= p->flags; |
1699 |
p++; |
1700 |
} |
1701 |
/* if the page was really writable, then we change its
|
1702 |
protection back to writable */
|
1703 |
if (prot & PAGE_WRITE_ORG) {
|
1704 |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
1705 |
if (!(p1[pindex].flags & PAGE_WRITE)) {
|
1706 |
mprotect((void *)host_start, host_page_size,
|
1707 |
(prot & PAGE_BITS) | PAGE_WRITE); |
1708 |
p1[pindex].flags |= PAGE_WRITE; |
1709 |
/* and since the content will be modified, we must invalidate
|
1710 |
the corresponding translated code. */
|
1711 |
tb_invalidate_phys_page(address, pc, puc); |
1712 |
#ifdef DEBUG_TB_CHECK
|
1713 |
tb_invalidate_check(address); |
1714 |
#endif
|
1715 |
return 1; |
1716 |
} |
1717 |
} |
1718 |
return 0; |
1719 |
} |
1720 |
|
1721 |
/* call this function when system calls directly modify a memory area */
|
1722 |
void page_unprotect_range(uint8_t *data, unsigned long data_size) |
1723 |
{ |
1724 |
unsigned long start, end, addr; |
1725 |
|
1726 |
start = (unsigned long)data; |
1727 |
end = start + data_size; |
1728 |
start &= TARGET_PAGE_MASK; |
1729 |
end = TARGET_PAGE_ALIGN(end); |
1730 |
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
1731 |
page_unprotect(addr, 0, NULL); |
1732 |
} |
1733 |
} |
1734 |
|
1735 |
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) |
1736 |
{ |
1737 |
} |
1738 |
#endif /* defined(CONFIG_USER_ONLY) */ |
1739 |
|
1740 |
/* register physical memory. 'size' must be a multiple of the target
|
1741 |
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
|
1742 |
io memory page */
|
1743 |
void cpu_register_physical_memory(target_phys_addr_t start_addr,
|
1744 |
unsigned long size, |
1745 |
unsigned long phys_offset) |
1746 |
{ |
1747 |
unsigned long addr, end_addr; |
1748 |
PageDesc *p; |
1749 |
|
1750 |
end_addr = start_addr + size; |
1751 |
for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
|
1752 |
p = page_find_alloc(addr >> TARGET_PAGE_BITS); |
1753 |
p->phys_offset = phys_offset; |
1754 |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
|
1755 |
phys_offset += TARGET_PAGE_SIZE; |
1756 |
} |
1757 |
} |
1758 |
|
1759 |
static uint32_t unassigned_mem_readb(target_phys_addr_t addr)
|
1760 |
{ |
1761 |
return 0; |
1762 |
} |
1763 |
|
1764 |
static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val) |
1765 |
{ |
1766 |
} |
1767 |
|
1768 |
static CPUReadMemoryFunc *unassigned_mem_read[3] = { |
1769 |
unassigned_mem_readb, |
1770 |
unassigned_mem_readb, |
1771 |
unassigned_mem_readb, |
1772 |
}; |
1773 |
|
1774 |
static CPUWriteMemoryFunc *unassigned_mem_write[3] = { |
1775 |
unassigned_mem_writeb, |
1776 |
unassigned_mem_writeb, |
1777 |
unassigned_mem_writeb, |
1778 |
}; |
1779 |
|
1780 |
/* self modifying code support in soft mmu mode : writing to a page
|
1781 |
containing code comes to these functions */
|
1782 |
|
1783 |
static void code_mem_writeb(target_phys_addr_t addr, uint32_t val) |
1784 |
{ |
1785 |
unsigned long phys_addr; |
1786 |
|
1787 |
phys_addr = addr - (long)phys_ram_base;
|
1788 |
#if !defined(CONFIG_USER_ONLY)
|
1789 |
tb_invalidate_phys_page_fast(phys_addr, 1);
|
1790 |
#endif
|
1791 |
stb_raw((uint8_t *)addr, val); |
1792 |
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
1793 |
} |
1794 |
|
1795 |
static void code_mem_writew(target_phys_addr_t addr, uint32_t val) |
1796 |
{ |
1797 |
unsigned long phys_addr; |
1798 |
|
1799 |
phys_addr = addr - (long)phys_ram_base;
|
1800 |
#if !defined(CONFIG_USER_ONLY)
|
1801 |
tb_invalidate_phys_page_fast(phys_addr, 2);
|
1802 |
#endif
|
1803 |
stw_raw((uint8_t *)addr, val); |
1804 |
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
1805 |
} |
1806 |
|
1807 |
static void code_mem_writel(target_phys_addr_t addr, uint32_t val) |
1808 |
{ |
1809 |
unsigned long phys_addr; |
1810 |
|
1811 |
phys_addr = addr - (long)phys_ram_base;
|
1812 |
#if !defined(CONFIG_USER_ONLY)
|
1813 |
tb_invalidate_phys_page_fast(phys_addr, 4);
|
1814 |
#endif
|
1815 |
stl_raw((uint8_t *)addr, val); |
1816 |
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
|
1817 |
} |
1818 |
|
1819 |
static CPUReadMemoryFunc *code_mem_read[3] = { |
1820 |
NULL, /* never used */ |
1821 |
NULL, /* never used */ |
1822 |
NULL, /* never used */ |
1823 |
}; |
1824 |
|
1825 |
static CPUWriteMemoryFunc *code_mem_write[3] = { |
1826 |
code_mem_writeb, |
1827 |
code_mem_writew, |
1828 |
code_mem_writel, |
1829 |
}; |
1830 |
|
1831 |
static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val) |
1832 |
{ |
1833 |
stb_raw((uint8_t *)addr, val); |
1834 |
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); |
1835 |
} |
1836 |
|
1837 |
static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val) |
1838 |
{ |
1839 |
stw_raw((uint8_t *)addr, val); |
1840 |
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); |
1841 |
} |
1842 |
|
1843 |
static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val) |
1844 |
{ |
1845 |
stl_raw((uint8_t *)addr, val); |
1846 |
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); |
1847 |
} |
1848 |
|
1849 |
static CPUWriteMemoryFunc *notdirty_mem_write[3] = { |
1850 |
notdirty_mem_writeb, |
1851 |
notdirty_mem_writew, |
1852 |
notdirty_mem_writel, |
1853 |
}; |
1854 |
|
1855 |
static void io_mem_init(void) |
1856 |
{ |
1857 |
cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write); |
1858 |
cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write); |
1859 |
cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write); |
1860 |
cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write); |
1861 |
io_mem_nb = 5;
|
1862 |
|
1863 |
/* alloc dirty bits array */
|
1864 |
phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS); |
1865 |
} |
1866 |
|
1867 |
/* mem_read and mem_write are arrays of functions containing the
|
1868 |
function to access byte (index 0), word (index 1) and dword (index
|
1869 |
2). All functions must be supplied. If io_index is non zero, the
|
1870 |
corresponding io zone is modified. If it is zero, a new io zone is
|
1871 |
allocated. The return value can be used with
|
1872 |
cpu_register_physical_memory(). (-1) is returned if error. */
|
1873 |
int cpu_register_io_memory(int io_index, |
1874 |
CPUReadMemoryFunc **mem_read, |
1875 |
CPUWriteMemoryFunc **mem_write) |
1876 |
{ |
1877 |
int i;
|
1878 |
|
1879 |
if (io_index <= 0) { |
1880 |
if (io_index >= IO_MEM_NB_ENTRIES)
|
1881 |
return -1; |
1882 |
io_index = io_mem_nb++; |
1883 |
} else {
|
1884 |
if (io_index >= IO_MEM_NB_ENTRIES)
|
1885 |
return -1; |
1886 |
} |
1887 |
|
1888 |
for(i = 0;i < 3; i++) { |
1889 |
io_mem_read[io_index][i] = mem_read[i]; |
1890 |
io_mem_write[io_index][i] = mem_write[i]; |
1891 |
} |
1892 |
return io_index << IO_MEM_SHIFT;
|
1893 |
} |
1894 |
|
1895 |
/* physical memory access (slow version, mainly for debug) */
|
1896 |
#if defined(CONFIG_USER_ONLY)
|
1897 |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
1898 |
int len, int is_write) |
1899 |
{ |
1900 |
int l, flags;
|
1901 |
target_ulong page; |
1902 |
|
1903 |
while (len > 0) { |
1904 |
page = addr & TARGET_PAGE_MASK; |
1905 |
l = (page + TARGET_PAGE_SIZE) - addr; |
1906 |
if (l > len)
|
1907 |
l = len; |
1908 |
flags = page_get_flags(page); |
1909 |
if (!(flags & PAGE_VALID))
|
1910 |
return;
|
1911 |
if (is_write) {
|
1912 |
if (!(flags & PAGE_WRITE))
|
1913 |
return;
|
1914 |
memcpy((uint8_t *)addr, buf, len); |
1915 |
} else {
|
1916 |
if (!(flags & PAGE_READ))
|
1917 |
return;
|
1918 |
memcpy(buf, (uint8_t *)addr, len); |
1919 |
} |
1920 |
len -= l; |
1921 |
buf += l; |
1922 |
addr += l; |
1923 |
} |
1924 |
} |
1925 |
#else
|
1926 |
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
1927 |
int len, int is_write) |
1928 |
{ |
1929 |
int l, io_index;
|
1930 |
uint8_t *ptr; |
1931 |
uint32_t val; |
1932 |
target_phys_addr_t page; |
1933 |
unsigned long pd; |
1934 |
PageDesc *p; |
1935 |
|
1936 |
while (len > 0) { |
1937 |
page = addr & TARGET_PAGE_MASK; |
1938 |
l = (page + TARGET_PAGE_SIZE) - addr; |
1939 |
if (l > len)
|
1940 |
l = len; |
1941 |
p = page_find(page >> TARGET_PAGE_BITS); |
1942 |
if (!p) {
|
1943 |
pd = IO_MEM_UNASSIGNED; |
1944 |
} else {
|
1945 |
pd = p->phys_offset; |
1946 |
} |
1947 |
|
1948 |
if (is_write) {
|
1949 |
if ((pd & ~TARGET_PAGE_MASK) != 0) { |
1950 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
1951 |
if (l >= 4 && ((addr & 3) == 0)) { |
1952 |
/* 32 bit read access */
|
1953 |
val = ldl_raw(buf); |
1954 |
io_mem_write[io_index][2](addr, val);
|
1955 |
l = 4;
|
1956 |
} else if (l >= 2 && ((addr & 1) == 0)) { |
1957 |
/* 16 bit read access */
|
1958 |
val = lduw_raw(buf); |
1959 |
io_mem_write[io_index][1](addr, val);
|
1960 |
l = 2;
|
1961 |
} else {
|
1962 |
/* 8 bit access */
|
1963 |
val = ldub_raw(buf); |
1964 |
io_mem_write[io_index][0](addr, val);
|
1965 |
l = 1;
|
1966 |
} |
1967 |
} else {
|
1968 |
unsigned long addr1; |
1969 |
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
1970 |
/* RAM case */
|
1971 |
ptr = phys_ram_base + addr1; |
1972 |
memcpy(ptr, buf, l); |
1973 |
/* invalidate code */
|
1974 |
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
|
1975 |
/* set dirty bit */
|
1976 |
phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
|
1977 |
} |
1978 |
} else {
|
1979 |
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
|
1980 |
(pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) { |
1981 |
/* I/O case */
|
1982 |
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
1983 |
if (l >= 4 && ((addr & 3) == 0)) { |
1984 |
/* 32 bit read access */
|
1985 |
val = io_mem_read[io_index][2](addr);
|
1986 |
stl_raw(buf, val); |
1987 |
l = 4;
|
1988 |
} else if (l >= 2 && ((addr & 1) == 0)) { |
1989 |
/* 16 bit read access */
|
1990 |
val = io_mem_read[io_index][1](addr);
|
1991 |
stw_raw(buf, val); |
1992 |
l = 2;
|
1993 |
} else {
|
1994 |
/* 8 bit access */
|
1995 |
val = io_mem_read[io_index][0](addr);
|
1996 |
stb_raw(buf, val); |
1997 |
l = 1;
|
1998 |
} |
1999 |
} else {
|
2000 |
/* RAM case */
|
2001 |
ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + |
2002 |
(addr & ~TARGET_PAGE_MASK); |
2003 |
memcpy(buf, ptr, l); |
2004 |
} |
2005 |
} |
2006 |
len -= l; |
2007 |
buf += l; |
2008 |
addr += l; |
2009 |
} |
2010 |
} |
2011 |
#endif
|
2012 |
|
2013 |
/* virtual memory access for debug */
|
2014 |
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
2015 |
uint8_t *buf, int len, int is_write) |
2016 |
{ |
2017 |
int l;
|
2018 |
target_ulong page, phys_addr; |
2019 |
|
2020 |
while (len > 0) { |
2021 |
page = addr & TARGET_PAGE_MASK; |
2022 |
phys_addr = cpu_get_phys_page_debug(env, page); |
2023 |
/* if no physical page mapped, return an error */
|
2024 |
if (phys_addr == -1) |
2025 |
return -1; |
2026 |
l = (page + TARGET_PAGE_SIZE) - addr; |
2027 |
if (l > len)
|
2028 |
l = len; |
2029 |
cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), |
2030 |
buf, l, is_write); |
2031 |
len -= l; |
2032 |
buf += l; |
2033 |
addr += l; |
2034 |
} |
2035 |
return 0; |
2036 |
} |
2037 |
|
2038 |
#if !defined(CONFIG_USER_ONLY)
|
2039 |
|
2040 |
#define MMUSUFFIX _cmmu
|
2041 |
#define GETPC() NULL |
2042 |
#define env cpu_single_env
|
2043 |
|
2044 |
#define SHIFT 0 |
2045 |
#include "softmmu_template.h" |
2046 |
|
2047 |
#define SHIFT 1 |
2048 |
#include "softmmu_template.h" |
2049 |
|
2050 |
#define SHIFT 2 |
2051 |
#include "softmmu_template.h" |
2052 |
|
2053 |
#define SHIFT 3 |
2054 |
#include "softmmu_template.h" |
2055 |
|
2056 |
#undef env
|
2057 |
|
2058 |
#endif
|