Statistics
| Branch: | Revision:

root / exec.c @ 4ad5b06d

History | View | Annotate | Download (71.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
49

    
50
#if !defined(CONFIG_USER_ONLY)
51
/* TB consistency checks only implemented for usermode emulation.  */
52
#undef DEBUG_TB_CHECK
53
#endif
54

    
55
/* threshold to flush the translated code buffer */
56
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57

    
58
#define SMC_BITMAP_USE_THRESHOLD 10
59

    
60
#define MMAP_AREA_START        0x00000000
61
#define MMAP_AREA_END          0xa8000000
62

    
63
#if defined(TARGET_SPARC64)
64
#define TARGET_PHYS_ADDR_SPACE_BITS 41
65
#elif defined(TARGET_PPC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 42
67
#else
68
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69
#define TARGET_PHYS_ADDR_SPACE_BITS 32
70
#endif
71

    
72
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
73
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
74
int nb_tbs;
75
/* any access to the tbs or the page table must use this lock */
76
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
77

    
78
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
79
uint8_t *code_gen_ptr;
80

    
81
int phys_ram_size;
82
int phys_ram_fd;
83
uint8_t *phys_ram_base;
84
uint8_t *phys_ram_dirty;
85
static ram_addr_t phys_ram_alloc_offset = 0;
86

    
87
CPUState *first_cpu;
88
/* current CPU in the current thread. It is only valid inside
89
   cpu_exec() */
90
CPUState *cpu_single_env; 
91

    
92
typedef struct PageDesc {
93
    /* list of TBs intersecting this ram page */
94
    TranslationBlock *first_tb;
95
    /* in order to optimize self modifying code, we count the number
96
       of lookups we do to a given page to use a bitmap */
97
    unsigned int code_write_count;
98
    uint8_t *code_bitmap;
99
#if defined(CONFIG_USER_ONLY)
100
    unsigned long flags;
101
#endif
102
} PageDesc;
103

    
104
typedef struct PhysPageDesc {
105
    /* offset in host memory of the page + io_index in the low 12 bits */
106
    uint32_t phys_offset;
107
} PhysPageDesc;
108

    
109
#define L2_BITS 10
110
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111

    
112
#define L1_SIZE (1 << L1_BITS)
113
#define L2_SIZE (1 << L2_BITS)
114

    
115
static void io_mem_init(void);
116

    
117
unsigned long qemu_real_host_page_size;
118
unsigned long qemu_host_page_bits;
119
unsigned long qemu_host_page_size;
120
unsigned long qemu_host_page_mask;
121

    
122
/* XXX: for system emulation, it could just be an array */
123
static PageDesc *l1_map[L1_SIZE];
124
PhysPageDesc **l1_phys_map;
125

    
126
/* io memory support */
127
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
130
static int io_mem_nb;
131

    
132
/* log support */
133
char *logfilename = "/tmp/qemu.log";
134
FILE *logfile;
135
int loglevel;
136

    
137
/* statistics */
138
static int tlb_flush_count;
139
static int tb_flush_count;
140
static int tb_phys_invalidate_count;
141

    
142
static void page_init(void)
143
{
144
    /* NOTE: we can always suppose that qemu_host_page_size >=
145
       TARGET_PAGE_SIZE */
146
#ifdef _WIN32
147
    {
148
        SYSTEM_INFO system_info;
149
        DWORD old_protect;
150
        
151
        GetSystemInfo(&system_info);
152
        qemu_real_host_page_size = system_info.dwPageSize;
153
        
154
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
155
                       PAGE_EXECUTE_READWRITE, &old_protect);
156
    }
157
#else
158
    qemu_real_host_page_size = getpagesize();
159
    {
160
        unsigned long start, end;
161

    
162
        start = (unsigned long)code_gen_buffer;
163
        start &= ~(qemu_real_host_page_size - 1);
164
        
165
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
166
        end += qemu_real_host_page_size - 1;
167
        end &= ~(qemu_real_host_page_size - 1);
168
        
169
        mprotect((void *)start, end - start, 
170
                 PROT_READ | PROT_WRITE | PROT_EXEC);
171
    }
172
#endif
173

    
174
    if (qemu_host_page_size == 0)
175
        qemu_host_page_size = qemu_real_host_page_size;
176
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
177
        qemu_host_page_size = TARGET_PAGE_SIZE;
178
    qemu_host_page_bits = 0;
179
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
180
        qemu_host_page_bits++;
181
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
182
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
183
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
184
}
185

    
186
static inline PageDesc *page_find_alloc(unsigned int index)
187
{
188
    PageDesc **lp, *p;
189

    
190
    lp = &l1_map[index >> L2_BITS];
191
    p = *lp;
192
    if (!p) {
193
        /* allocate if not found */
194
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
195
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
196
        *lp = p;
197
    }
198
    return p + (index & (L2_SIZE - 1));
199
}
200

    
201
static inline PageDesc *page_find(unsigned int index)
202
{
203
    PageDesc *p;
204

    
205
    p = l1_map[index >> L2_BITS];
206
    if (!p)
207
        return 0;
208
    return p + (index & (L2_SIZE - 1));
209
}
210

    
211
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
212
{
213
    void **lp, **p;
214
    PhysPageDesc *pd;
215

    
216
    p = (void **)l1_phys_map;
217
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
218

    
219
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
220
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
221
#endif
222
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
223
    p = *lp;
224
    if (!p) {
225
        /* allocate if not found */
226
        if (!alloc)
227
            return NULL;
228
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
229
        memset(p, 0, sizeof(void *) * L1_SIZE);
230
        *lp = p;
231
    }
232
#endif
233
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
234
    pd = *lp;
235
    if (!pd) {
236
        int i;
237
        /* allocate if not found */
238
        if (!alloc)
239
            return NULL;
240
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
241
        *lp = pd;
242
        for (i = 0; i < L2_SIZE; i++)
243
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
244
    }
245
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
246
}
247

    
248
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
249
{
250
    return phys_page_find_alloc(index, 0);
251
}
252

    
253
#if !defined(CONFIG_USER_ONLY)
254
static void tlb_protect_code(ram_addr_t ram_addr);
255
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
256
                                    target_ulong vaddr);
257
#endif
258

    
259
void cpu_exec_init(CPUState *env)
260
{
261
    CPUState **penv;
262
    int cpu_index;
263

    
264
    if (!code_gen_ptr) {
265
        code_gen_ptr = code_gen_buffer;
266
        page_init();
267
        io_mem_init();
268
    }
269
    env->next_cpu = NULL;
270
    penv = &first_cpu;
271
    cpu_index = 0;
272
    while (*penv != NULL) {
273
        penv = (CPUState **)&(*penv)->next_cpu;
274
        cpu_index++;
275
    }
276
    env->cpu_index = cpu_index;
277
    *penv = env;
278
}
279

    
280
static inline void invalidate_page_bitmap(PageDesc *p)
281
{
282
    if (p->code_bitmap) {
283
        qemu_free(p->code_bitmap);
284
        p->code_bitmap = NULL;
285
    }
286
    p->code_write_count = 0;
287
}
288

    
289
/* set to NULL all the 'first_tb' fields in all PageDescs */
290
static void page_flush_tb(void)
291
{
292
    int i, j;
293
    PageDesc *p;
294

    
295
    for(i = 0; i < L1_SIZE; i++) {
296
        p = l1_map[i];
297
        if (p) {
298
            for(j = 0; j < L2_SIZE; j++) {
299
                p->first_tb = NULL;
300
                invalidate_page_bitmap(p);
301
                p++;
302
            }
303
        }
304
    }
305
}
306

    
307
/* flush all the translation blocks */
308
/* XXX: tb_flush is currently not thread safe */
309
void tb_flush(CPUState *env1)
310
{
311
    CPUState *env;
312
#if defined(DEBUG_FLUSH)
313
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
314
           code_gen_ptr - code_gen_buffer, 
315
           nb_tbs, 
316
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
317
#endif
318
    nb_tbs = 0;
319
    
320
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
321
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
322
    }
323

    
324
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
325
    page_flush_tb();
326

    
327
    code_gen_ptr = code_gen_buffer;
328
    /* XXX: flush processor icache at this point if cache flush is
329
       expensive */
330
    tb_flush_count++;
331
}
332

    
333
#ifdef DEBUG_TB_CHECK
334

    
335
static void tb_invalidate_check(unsigned long address)
336
{
337
    TranslationBlock *tb;
338
    int i;
339
    address &= TARGET_PAGE_MASK;
340
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
341
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
342
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
343
                  address >= tb->pc + tb->size)) {
344
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
345
                       address, (long)tb->pc, tb->size);
346
            }
347
        }
348
    }
349
}
350

    
351
/* verify that all the pages have correct rights for code */
352
static void tb_page_check(void)
353
{
354
    TranslationBlock *tb;
355
    int i, flags1, flags2;
356
    
357
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359
            flags1 = page_get_flags(tb->pc);
360
            flags2 = page_get_flags(tb->pc + tb->size - 1);
361
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
362
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
363
                       (long)tb->pc, tb->size, flags1, flags2);
364
            }
365
        }
366
    }
367
}
368

    
369
void tb_jmp_check(TranslationBlock *tb)
370
{
371
    TranslationBlock *tb1;
372
    unsigned int n1;
373

    
374
    /* suppress any remaining jumps to this TB */
375
    tb1 = tb->jmp_first;
376
    for(;;) {
377
        n1 = (long)tb1 & 3;
378
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
379
        if (n1 == 2)
380
            break;
381
        tb1 = tb1->jmp_next[n1];
382
    }
383
    /* check end of list */
384
    if (tb1 != tb) {
385
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
386
    }
387
}
388

    
389
#endif
390

    
391
/* invalidate one TB */
392
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
393
                             int next_offset)
394
{
395
    TranslationBlock *tb1;
396
    for(;;) {
397
        tb1 = *ptb;
398
        if (tb1 == tb) {
399
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
400
            break;
401
        }
402
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
403
    }
404
}
405

    
406
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
407
{
408
    TranslationBlock *tb1;
409
    unsigned int n1;
410

    
411
    for(;;) {
412
        tb1 = *ptb;
413
        n1 = (long)tb1 & 3;
414
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
415
        if (tb1 == tb) {
416
            *ptb = tb1->page_next[n1];
417
            break;
418
        }
419
        ptb = &tb1->page_next[n1];
420
    }
421
}
422

    
423
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
424
{
425
    TranslationBlock *tb1, **ptb;
426
    unsigned int n1;
427

    
428
    ptb = &tb->jmp_next[n];
429
    tb1 = *ptb;
430
    if (tb1) {
431
        /* find tb(n) in circular list */
432
        for(;;) {
433
            tb1 = *ptb;
434
            n1 = (long)tb1 & 3;
435
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
436
            if (n1 == n && tb1 == tb)
437
                break;
438
            if (n1 == 2) {
439
                ptb = &tb1->jmp_first;
440
            } else {
441
                ptb = &tb1->jmp_next[n1];
442
            }
443
        }
444
        /* now we can suppress tb(n) from the list */
445
        *ptb = tb->jmp_next[n];
446

    
447
        tb->jmp_next[n] = NULL;
448
    }
449
}
450

    
451
/* reset the jump entry 'n' of a TB so that it is not chained to
452
   another TB */
453
static inline void tb_reset_jump(TranslationBlock *tb, int n)
454
{
455
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
456
}
457

    
458
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
459
{
460
    CPUState *env;
461
    PageDesc *p;
462
    unsigned int h, n1;
463
    target_ulong phys_pc;
464
    TranslationBlock *tb1, *tb2;
465
    
466
    /* remove the TB from the hash list */
467
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
468
    h = tb_phys_hash_func(phys_pc);
469
    tb_remove(&tb_phys_hash[h], tb, 
470
              offsetof(TranslationBlock, phys_hash_next));
471

    
472
    /* remove the TB from the page list */
473
    if (tb->page_addr[0] != page_addr) {
474
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
475
        tb_page_remove(&p->first_tb, tb);
476
        invalidate_page_bitmap(p);
477
    }
478
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
479
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
480
        tb_page_remove(&p->first_tb, tb);
481
        invalidate_page_bitmap(p);
482
    }
483

    
484
    tb_invalidated_flag = 1;
485

    
486
    /* remove the TB from the hash list */
487
    h = tb_jmp_cache_hash_func(tb->pc);
488
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
489
        if (env->tb_jmp_cache[h] == tb)
490
            env->tb_jmp_cache[h] = NULL;
491
    }
492

    
493
    /* suppress this TB from the two jump lists */
494
    tb_jmp_remove(tb, 0);
495
    tb_jmp_remove(tb, 1);
496

    
497
    /* suppress any remaining jumps to this TB */
498
    tb1 = tb->jmp_first;
499
    for(;;) {
500
        n1 = (long)tb1 & 3;
501
        if (n1 == 2)
502
            break;
503
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
504
        tb2 = tb1->jmp_next[n1];
505
        tb_reset_jump(tb1, n1);
506
        tb1->jmp_next[n1] = NULL;
507
        tb1 = tb2;
508
    }
509
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
510

    
511
    tb_phys_invalidate_count++;
512
}
513

    
514
static inline void set_bits(uint8_t *tab, int start, int len)
515
{
516
    int end, mask, end1;
517

    
518
    end = start + len;
519
    tab += start >> 3;
520
    mask = 0xff << (start & 7);
521
    if ((start & ~7) == (end & ~7)) {
522
        if (start < end) {
523
            mask &= ~(0xff << (end & 7));
524
            *tab |= mask;
525
        }
526
    } else {
527
        *tab++ |= mask;
528
        start = (start + 8) & ~7;
529
        end1 = end & ~7;
530
        while (start < end1) {
531
            *tab++ = 0xff;
532
            start += 8;
533
        }
534
        if (start < end) {
535
            mask = ~(0xff << (end & 7));
536
            *tab |= mask;
537
        }
538
    }
539
}
540

    
541
static void build_page_bitmap(PageDesc *p)
542
{
543
    int n, tb_start, tb_end;
544
    TranslationBlock *tb;
545
    
546
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
547
    if (!p->code_bitmap)
548
        return;
549
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
550

    
551
    tb = p->first_tb;
552
    while (tb != NULL) {
553
        n = (long)tb & 3;
554
        tb = (TranslationBlock *)((long)tb & ~3);
555
        /* NOTE: this is subtle as a TB may span two physical pages */
556
        if (n == 0) {
557
            /* NOTE: tb_end may be after the end of the page, but
558
               it is not a problem */
559
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
560
            tb_end = tb_start + tb->size;
561
            if (tb_end > TARGET_PAGE_SIZE)
562
                tb_end = TARGET_PAGE_SIZE;
563
        } else {
564
            tb_start = 0;
565
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
566
        }
567
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
568
        tb = tb->page_next[n];
569
    }
570
}
571

    
572
#ifdef TARGET_HAS_PRECISE_SMC
573

    
574
static void tb_gen_code(CPUState *env, 
575
                        target_ulong pc, target_ulong cs_base, int flags,
576
                        int cflags)
577
{
578
    TranslationBlock *tb;
579
    uint8_t *tc_ptr;
580
    target_ulong phys_pc, phys_page2, virt_page2;
581
    int code_gen_size;
582

    
583
    phys_pc = get_phys_addr_code(env, pc);
584
    tb = tb_alloc(pc);
585
    if (!tb) {
586
        /* flush must be done */
587
        tb_flush(env);
588
        /* cannot fail at this point */
589
        tb = tb_alloc(pc);
590
    }
591
    tc_ptr = code_gen_ptr;
592
    tb->tc_ptr = tc_ptr;
593
    tb->cs_base = cs_base;
594
    tb->flags = flags;
595
    tb->cflags = cflags;
596
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
597
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
598
    
599
    /* check next page if needed */
600
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
601
    phys_page2 = -1;
602
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
603
        phys_page2 = get_phys_addr_code(env, virt_page2);
604
    }
605
    tb_link_phys(tb, phys_pc, phys_page2);
606
}
607
#endif
608
    
609
/* invalidate all TBs which intersect with the target physical page
610
   starting in range [start;end[. NOTE: start and end must refer to
611
   the same physical page. 'is_cpu_write_access' should be true if called
612
   from a real cpu write access: the virtual CPU will exit the current
613
   TB if code is modified inside this TB. */
614
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
615
                                   int is_cpu_write_access)
616
{
617
    int n, current_tb_modified, current_tb_not_found, current_flags;
618
    CPUState *env = cpu_single_env;
619
    PageDesc *p;
620
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
621
    target_ulong tb_start, tb_end;
622
    target_ulong current_pc, current_cs_base;
623

    
624
    p = page_find(start >> TARGET_PAGE_BITS);
625
    if (!p) 
626
        return;
627
    if (!p->code_bitmap && 
628
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
629
        is_cpu_write_access) {
630
        /* build code bitmap */
631
        build_page_bitmap(p);
632
    }
633

    
634
    /* we remove all the TBs in the range [start, end[ */
635
    /* XXX: see if in some cases it could be faster to invalidate all the code */
636
    current_tb_not_found = is_cpu_write_access;
637
    current_tb_modified = 0;
638
    current_tb = NULL; /* avoid warning */
639
    current_pc = 0; /* avoid warning */
640
    current_cs_base = 0; /* avoid warning */
641
    current_flags = 0; /* avoid warning */
642
    tb = p->first_tb;
643
    while (tb != NULL) {
644
        n = (long)tb & 3;
645
        tb = (TranslationBlock *)((long)tb & ~3);
646
        tb_next = tb->page_next[n];
647
        /* NOTE: this is subtle as a TB may span two physical pages */
648
        if (n == 0) {
649
            /* NOTE: tb_end may be after the end of the page, but
650
               it is not a problem */
651
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
652
            tb_end = tb_start + tb->size;
653
        } else {
654
            tb_start = tb->page_addr[1];
655
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
656
        }
657
        if (!(tb_end <= start || tb_start >= end)) {
658
#ifdef TARGET_HAS_PRECISE_SMC
659
            if (current_tb_not_found) {
660
                current_tb_not_found = 0;
661
                current_tb = NULL;
662
                if (env->mem_write_pc) {
663
                    /* now we have a real cpu fault */
664
                    current_tb = tb_find_pc(env->mem_write_pc);
665
                }
666
            }
667
            if (current_tb == tb &&
668
                !(current_tb->cflags & CF_SINGLE_INSN)) {
669
                /* If we are modifying the current TB, we must stop
670
                its execution. We could be more precise by checking
671
                that the modification is after the current PC, but it
672
                would require a specialized function to partially
673
                restore the CPU state */
674
                
675
                current_tb_modified = 1;
676
                cpu_restore_state(current_tb, env, 
677
                                  env->mem_write_pc, NULL);
678
#if defined(TARGET_I386)
679
                current_flags = env->hflags;
680
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
681
                current_cs_base = (target_ulong)env->segs[R_CS].base;
682
                current_pc = current_cs_base + env->eip;
683
#else
684
#error unsupported CPU
685
#endif
686
            }
687
#endif /* TARGET_HAS_PRECISE_SMC */
688
            /* we need to do that to handle the case where a signal
689
               occurs while doing tb_phys_invalidate() */
690
            saved_tb = NULL;
691
            if (env) {
692
                saved_tb = env->current_tb;
693
                env->current_tb = NULL;
694
            }
695
            tb_phys_invalidate(tb, -1);
696
            if (env) {
697
                env->current_tb = saved_tb;
698
                if (env->interrupt_request && env->current_tb)
699
                    cpu_interrupt(env, env->interrupt_request);
700
            }
701
        }
702
        tb = tb_next;
703
    }
704
#if !defined(CONFIG_USER_ONLY)
705
    /* if no code remaining, no need to continue to use slow writes */
706
    if (!p->first_tb) {
707
        invalidate_page_bitmap(p);
708
        if (is_cpu_write_access) {
709
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
710
        }
711
    }
712
#endif
713
#ifdef TARGET_HAS_PRECISE_SMC
714
    if (current_tb_modified) {
715
        /* we generate a block containing just the instruction
716
           modifying the memory. It will ensure that it cannot modify
717
           itself */
718
        env->current_tb = NULL;
719
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
720
                    CF_SINGLE_INSN);
721
        cpu_resume_from_signal(env, NULL);
722
    }
723
#endif
724
}
725

    
726
/* len must be <= 8 and start must be a multiple of len */
727
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
728
{
729
    PageDesc *p;
730
    int offset, b;
731
#if 0
732
    if (1) {
733
        if (loglevel) {
734
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
735
                   cpu_single_env->mem_write_vaddr, len, 
736
                   cpu_single_env->eip, 
737
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
738
        }
739
    }
740
#endif
741
    p = page_find(start >> TARGET_PAGE_BITS);
742
    if (!p) 
743
        return;
744
    if (p->code_bitmap) {
745
        offset = start & ~TARGET_PAGE_MASK;
746
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
747
        if (b & ((1 << len) - 1))
748
            goto do_invalidate;
749
    } else {
750
    do_invalidate:
751
        tb_invalidate_phys_page_range(start, start + len, 1);
752
    }
753
}
754

    
755
#if !defined(CONFIG_SOFTMMU)
756
static void tb_invalidate_phys_page(target_ulong addr, 
757
                                    unsigned long pc, void *puc)
758
{
759
    int n, current_flags, current_tb_modified;
760
    target_ulong current_pc, current_cs_base;
761
    PageDesc *p;
762
    TranslationBlock *tb, *current_tb;
763
#ifdef TARGET_HAS_PRECISE_SMC
764
    CPUState *env = cpu_single_env;
765
#endif
766

    
767
    addr &= TARGET_PAGE_MASK;
768
    p = page_find(addr >> TARGET_PAGE_BITS);
769
    if (!p) 
770
        return;
771
    tb = p->first_tb;
772
    current_tb_modified = 0;
773
    current_tb = NULL;
774
    current_pc = 0; /* avoid warning */
775
    current_cs_base = 0; /* avoid warning */
776
    current_flags = 0; /* avoid warning */
777
#ifdef TARGET_HAS_PRECISE_SMC
778
    if (tb && pc != 0) {
779
        current_tb = tb_find_pc(pc);
780
    }
781
#endif
782
    while (tb != NULL) {
783
        n = (long)tb & 3;
784
        tb = (TranslationBlock *)((long)tb & ~3);
785
#ifdef TARGET_HAS_PRECISE_SMC
786
        if (current_tb == tb &&
787
            !(current_tb->cflags & CF_SINGLE_INSN)) {
788
                /* If we are modifying the current TB, we must stop
789
                   its execution. We could be more precise by checking
790
                   that the modification is after the current PC, but it
791
                   would require a specialized function to partially
792
                   restore the CPU state */
793
            
794
            current_tb_modified = 1;
795
            cpu_restore_state(current_tb, env, pc, puc);
796
#if defined(TARGET_I386)
797
            current_flags = env->hflags;
798
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
799
            current_cs_base = (target_ulong)env->segs[R_CS].base;
800
            current_pc = current_cs_base + env->eip;
801
#else
802
#error unsupported CPU
803
#endif
804
        }
805
#endif /* TARGET_HAS_PRECISE_SMC */
806
        tb_phys_invalidate(tb, addr);
807
        tb = tb->page_next[n];
808
    }
809
    p->first_tb = NULL;
810
#ifdef TARGET_HAS_PRECISE_SMC
811
    if (current_tb_modified) {
812
        /* we generate a block containing just the instruction
813
           modifying the memory. It will ensure that it cannot modify
814
           itself */
815
        env->current_tb = NULL;
816
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
817
                    CF_SINGLE_INSN);
818
        cpu_resume_from_signal(env, puc);
819
    }
820
#endif
821
}
822
#endif
823

    
824
/* add the tb in the target page and protect it if necessary */
825
static inline void tb_alloc_page(TranslationBlock *tb, 
826
                                 unsigned int n, target_ulong page_addr)
827
{
828
    PageDesc *p;
829
    TranslationBlock *last_first_tb;
830

    
831
    tb->page_addr[n] = page_addr;
832
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
833
    tb->page_next[n] = p->first_tb;
834
    last_first_tb = p->first_tb;
835
    p->first_tb = (TranslationBlock *)((long)tb | n);
836
    invalidate_page_bitmap(p);
837

    
838
#if defined(TARGET_HAS_SMC) || 1
839

    
840
#if defined(CONFIG_USER_ONLY)
841
    if (p->flags & PAGE_WRITE) {
842
        target_ulong addr;
843
        PageDesc *p2;
844
        int prot;
845

    
846
        /* force the host page as non writable (writes will have a
847
           page fault + mprotect overhead) */
848
        page_addr &= qemu_host_page_mask;
849
        prot = 0;
850
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
851
            addr += TARGET_PAGE_SIZE) {
852

    
853
            p2 = page_find (addr >> TARGET_PAGE_BITS);
854
            if (!p2)
855
                continue;
856
            prot |= p2->flags;
857
            p2->flags &= ~PAGE_WRITE;
858
            page_get_flags(addr);
859
          }
860
        mprotect(g2h(page_addr), qemu_host_page_size, 
861
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
862
#ifdef DEBUG_TB_INVALIDATE
863
        printf("protecting code page: 0x%08lx\n", 
864
               page_addr);
865
#endif
866
    }
867
#else
868
    /* if some code is already present, then the pages are already
869
       protected. So we handle the case where only the first TB is
870
       allocated in a physical page */
871
    if (!last_first_tb) {
872
        tlb_protect_code(page_addr);
873
    }
874
#endif
875

    
876
#endif /* TARGET_HAS_SMC */
877
}
878

    
879
/* Allocate a new translation block. Flush the translation buffer if
880
   too many translation blocks or too much generated code. */
881
TranslationBlock *tb_alloc(target_ulong pc)
882
{
883
    TranslationBlock *tb;
884

    
885
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
886
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
887
        return NULL;
888
    tb = &tbs[nb_tbs++];
889
    tb->pc = pc;
890
    tb->cflags = 0;
891
    return tb;
892
}
893

    
894
/* add a new TB and link it to the physical page tables. phys_page2 is
895
   (-1) to indicate that only one page contains the TB. */
896
void tb_link_phys(TranslationBlock *tb, 
897
                  target_ulong phys_pc, target_ulong phys_page2)
898
{
899
    unsigned int h;
900
    TranslationBlock **ptb;
901

    
902
    /* add in the physical hash table */
903
    h = tb_phys_hash_func(phys_pc);
904
    ptb = &tb_phys_hash[h];
905
    tb->phys_hash_next = *ptb;
906
    *ptb = tb;
907

    
908
    /* add in the page list */
909
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
910
    if (phys_page2 != -1)
911
        tb_alloc_page(tb, 1, phys_page2);
912
    else
913
        tb->page_addr[1] = -1;
914

    
915
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
916
    tb->jmp_next[0] = NULL;
917
    tb->jmp_next[1] = NULL;
918
#ifdef USE_CODE_COPY
919
    tb->cflags &= ~CF_FP_USED;
920
    if (tb->cflags & CF_TB_FP_USED)
921
        tb->cflags |= CF_FP_USED;
922
#endif
923

    
924
    /* init original jump addresses */
925
    if (tb->tb_next_offset[0] != 0xffff)
926
        tb_reset_jump(tb, 0);
927
    if (tb->tb_next_offset[1] != 0xffff)
928
        tb_reset_jump(tb, 1);
929

    
930
#ifdef DEBUG_TB_CHECK
931
    tb_page_check();
932
#endif
933
}
934

    
935
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
936
   tb[1].tc_ptr. Return NULL if not found */
937
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
938
{
939
    int m_min, m_max, m;
940
    unsigned long v;
941
    TranslationBlock *tb;
942

    
943
    if (nb_tbs <= 0)
944
        return NULL;
945
    if (tc_ptr < (unsigned long)code_gen_buffer ||
946
        tc_ptr >= (unsigned long)code_gen_ptr)
947
        return NULL;
948
    /* binary search (cf Knuth) */
949
    m_min = 0;
950
    m_max = nb_tbs - 1;
951
    while (m_min <= m_max) {
952
        m = (m_min + m_max) >> 1;
953
        tb = &tbs[m];
954
        v = (unsigned long)tb->tc_ptr;
955
        if (v == tc_ptr)
956
            return tb;
957
        else if (tc_ptr < v) {
958
            m_max = m - 1;
959
        } else {
960
            m_min = m + 1;
961
        }
962
    } 
963
    return &tbs[m_max];
964
}
965

    
966
static void tb_reset_jump_recursive(TranslationBlock *tb);
967

    
968
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
969
{
970
    TranslationBlock *tb1, *tb_next, **ptb;
971
    unsigned int n1;
972

    
973
    tb1 = tb->jmp_next[n];
974
    if (tb1 != NULL) {
975
        /* find head of list */
976
        for(;;) {
977
            n1 = (long)tb1 & 3;
978
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
979
            if (n1 == 2)
980
                break;
981
            tb1 = tb1->jmp_next[n1];
982
        }
983
        /* we are now sure now that tb jumps to tb1 */
984
        tb_next = tb1;
985

    
986
        /* remove tb from the jmp_first list */
987
        ptb = &tb_next->jmp_first;
988
        for(;;) {
989
            tb1 = *ptb;
990
            n1 = (long)tb1 & 3;
991
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
992
            if (n1 == n && tb1 == tb)
993
                break;
994
            ptb = &tb1->jmp_next[n1];
995
        }
996
        *ptb = tb->jmp_next[n];
997
        tb->jmp_next[n] = NULL;
998
        
999
        /* suppress the jump to next tb in generated code */
1000
        tb_reset_jump(tb, n);
1001

    
1002
        /* suppress jumps in the tb on which we could have jumped */
1003
        tb_reset_jump_recursive(tb_next);
1004
    }
1005
}
1006

    
1007
static void tb_reset_jump_recursive(TranslationBlock *tb)
1008
{
1009
    tb_reset_jump_recursive2(tb, 0);
1010
    tb_reset_jump_recursive2(tb, 1);
1011
}
1012

    
1013
#if defined(TARGET_HAS_ICE)
1014
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1015
{
1016
    target_ulong addr, pd;
1017
    ram_addr_t ram_addr;
1018
    PhysPageDesc *p;
1019

    
1020
    addr = cpu_get_phys_page_debug(env, pc);
1021
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1022
    if (!p) {
1023
        pd = IO_MEM_UNASSIGNED;
1024
    } else {
1025
        pd = p->phys_offset;
1026
    }
1027
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1028
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1029
}
1030
#endif
1031

    
1032
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1033
   breakpoint is reached */
1034
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1035
{
1036
#if defined(TARGET_HAS_ICE)
1037
    int i;
1038
    
1039
    for(i = 0; i < env->nb_breakpoints; i++) {
1040
        if (env->breakpoints[i] == pc)
1041
            return 0;
1042
    }
1043

    
1044
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1045
        return -1;
1046
    env->breakpoints[env->nb_breakpoints++] = pc;
1047
    
1048
    breakpoint_invalidate(env, pc);
1049
    return 0;
1050
#else
1051
    return -1;
1052
#endif
1053
}
1054

    
1055
/* remove a breakpoint */
1056
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1057
{
1058
#if defined(TARGET_HAS_ICE)
1059
    int i;
1060
    for(i = 0; i < env->nb_breakpoints; i++) {
1061
        if (env->breakpoints[i] == pc)
1062
            goto found;
1063
    }
1064
    return -1;
1065
 found:
1066
    env->nb_breakpoints--;
1067
    if (i < env->nb_breakpoints)
1068
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1069

    
1070
    breakpoint_invalidate(env, pc);
1071
    return 0;
1072
#else
1073
    return -1;
1074
#endif
1075
}
1076

    
1077
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1078
   CPU loop after each instruction */
1079
void cpu_single_step(CPUState *env, int enabled)
1080
{
1081
#if defined(TARGET_HAS_ICE)
1082
    if (env->singlestep_enabled != enabled) {
1083
        env->singlestep_enabled = enabled;
1084
        /* must flush all the translated code to avoid inconsistancies */
1085
        /* XXX: only flush what is necessary */
1086
        tb_flush(env);
1087
    }
1088
#endif
1089
}
1090

    
1091
/* enable or disable low levels log */
1092
void cpu_set_log(int log_flags)
1093
{
1094
    loglevel = log_flags;
1095
    if (loglevel && !logfile) {
1096
        logfile = fopen(logfilename, "w");
1097
        if (!logfile) {
1098
            perror(logfilename);
1099
            _exit(1);
1100
        }
1101
#if !defined(CONFIG_SOFTMMU)
1102
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1103
        {
1104
            static uint8_t logfile_buf[4096];
1105
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1106
        }
1107
#else
1108
        setvbuf(logfile, NULL, _IOLBF, 0);
1109
#endif
1110
    }
1111
}
1112

    
1113
void cpu_set_log_filename(const char *filename)
1114
{
1115
    logfilename = strdup(filename);
1116
}
1117

    
1118
/* mask must never be zero, except for A20 change call */
1119
void cpu_interrupt(CPUState *env, int mask)
1120
{
1121
    TranslationBlock *tb;
1122
    static int interrupt_lock;
1123

    
1124
    env->interrupt_request |= mask;
1125
    /* if the cpu is currently executing code, we must unlink it and
1126
       all the potentially executing TB */
1127
    tb = env->current_tb;
1128
    if (tb && !testandset(&interrupt_lock)) {
1129
        env->current_tb = NULL;
1130
        tb_reset_jump_recursive(tb);
1131
        interrupt_lock = 0;
1132
    }
1133
}
1134

    
1135
void cpu_reset_interrupt(CPUState *env, int mask)
1136
{
1137
    env->interrupt_request &= ~mask;
1138
}
1139

    
1140
CPULogItem cpu_log_items[] = {
1141
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1142
      "show generated host assembly code for each compiled TB" },
1143
    { CPU_LOG_TB_IN_ASM, "in_asm",
1144
      "show target assembly code for each compiled TB" },
1145
    { CPU_LOG_TB_OP, "op", 
1146
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1147
#ifdef TARGET_I386
1148
    { CPU_LOG_TB_OP_OPT, "op_opt",
1149
      "show micro ops after optimization for each compiled TB" },
1150
#endif
1151
    { CPU_LOG_INT, "int",
1152
      "show interrupts/exceptions in short format" },
1153
    { CPU_LOG_EXEC, "exec",
1154
      "show trace before each executed TB (lots of logs)" },
1155
    { CPU_LOG_TB_CPU, "cpu",
1156
      "show CPU state before bloc translation" },
1157
#ifdef TARGET_I386
1158
    { CPU_LOG_PCALL, "pcall",
1159
      "show protected mode far calls/returns/exceptions" },
1160
#endif
1161
#ifdef DEBUG_IOPORT
1162
    { CPU_LOG_IOPORT, "ioport",
1163
      "show all i/o ports accesses" },
1164
#endif
1165
    { 0, NULL, NULL },
1166
};
1167

    
1168
static int cmp1(const char *s1, int n, const char *s2)
1169
{
1170
    if (strlen(s2) != n)
1171
        return 0;
1172
    return memcmp(s1, s2, n) == 0;
1173
}
1174
      
1175
/* takes a comma separated list of log masks. Return 0 if error. */
1176
int cpu_str_to_log_mask(const char *str)
1177
{
1178
    CPULogItem *item;
1179
    int mask;
1180
    const char *p, *p1;
1181

    
1182
    p = str;
1183
    mask = 0;
1184
    for(;;) {
1185
        p1 = strchr(p, ',');
1186
        if (!p1)
1187
            p1 = p + strlen(p);
1188
        if(cmp1(p,p1-p,"all")) {
1189
                for(item = cpu_log_items; item->mask != 0; item++) {
1190
                        mask |= item->mask;
1191
                }
1192
        } else {
1193
        for(item = cpu_log_items; item->mask != 0; item++) {
1194
            if (cmp1(p, p1 - p, item->name))
1195
                goto found;
1196
        }
1197
        return 0;
1198
        }
1199
    found:
1200
        mask |= item->mask;
1201
        if (*p1 != ',')
1202
            break;
1203
        p = p1 + 1;
1204
    }
1205
    return mask;
1206
}
1207

    
1208
void cpu_abort(CPUState *env, const char *fmt, ...)
1209
{
1210
    va_list ap;
1211

    
1212
    va_start(ap, fmt);
1213
    fprintf(stderr, "qemu: fatal: ");
1214
    vfprintf(stderr, fmt, ap);
1215
    fprintf(stderr, "\n");
1216
#ifdef TARGET_I386
1217
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1218
#else
1219
    cpu_dump_state(env, stderr, fprintf, 0);
1220
#endif
1221
    va_end(ap);
1222
    abort();
1223
}
1224

    
1225
CPUState *cpu_copy(CPUState *env)
1226
{
1227
    CPUState *new_env = cpu_init();
1228
    /* preserve chaining and index */
1229
    CPUState *next_cpu = new_env->next_cpu;
1230
    int cpu_index = new_env->cpu_index;
1231
    memcpy(new_env, env, sizeof(CPUState));
1232
    new_env->next_cpu = next_cpu;
1233
    new_env->cpu_index = cpu_index;
1234
    return new_env;
1235
}
1236

    
1237
#if !defined(CONFIG_USER_ONLY)
1238

    
1239
/* NOTE: if flush_global is true, also flush global entries (not
1240
   implemented yet) */
1241
void tlb_flush(CPUState *env, int flush_global)
1242
{
1243
    int i;
1244

    
1245
#if defined(DEBUG_TLB)
1246
    printf("tlb_flush:\n");
1247
#endif
1248
    /* must reset current TB so that interrupts cannot modify the
1249
       links while we are modifying them */
1250
    env->current_tb = NULL;
1251

    
1252
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1253
        env->tlb_table[0][i].addr_read = -1;
1254
        env->tlb_table[0][i].addr_write = -1;
1255
        env->tlb_table[0][i].addr_code = -1;
1256
        env->tlb_table[1][i].addr_read = -1;
1257
        env->tlb_table[1][i].addr_write = -1;
1258
        env->tlb_table[1][i].addr_code = -1;
1259
    }
1260

    
1261
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1262

    
1263
#if !defined(CONFIG_SOFTMMU)
1264
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1265
#endif
1266
#ifdef USE_KQEMU
1267
    if (env->kqemu_enabled) {
1268
        kqemu_flush(env, flush_global);
1269
    }
1270
#endif
1271
    tlb_flush_count++;
1272
}
1273

    
1274
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1275
{
1276
    if (addr == (tlb_entry->addr_read & 
1277
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1278
        addr == (tlb_entry->addr_write & 
1279
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1280
        addr == (tlb_entry->addr_code & 
1281
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1282
        tlb_entry->addr_read = -1;
1283
        tlb_entry->addr_write = -1;
1284
        tlb_entry->addr_code = -1;
1285
    }
1286
}
1287

    
1288
void tlb_flush_page(CPUState *env, target_ulong addr)
1289
{
1290
    int i;
1291
    TranslationBlock *tb;
1292

    
1293
#if defined(DEBUG_TLB)
1294
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1295
#endif
1296
    /* must reset current TB so that interrupts cannot modify the
1297
       links while we are modifying them */
1298
    env->current_tb = NULL;
1299

    
1300
    addr &= TARGET_PAGE_MASK;
1301
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1302
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1303
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1304

    
1305
    /* Discard jump cache entries for any tb which might potentially
1306
       overlap the flushed page.  */
1307
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1308
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1309

    
1310
    i = tb_jmp_cache_hash_page(addr);
1311
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1312

    
1313
#if !defined(CONFIG_SOFTMMU)
1314
    if (addr < MMAP_AREA_END)
1315
        munmap((void *)addr, TARGET_PAGE_SIZE);
1316
#endif
1317
#ifdef USE_KQEMU
1318
    if (env->kqemu_enabled) {
1319
        kqemu_flush_page(env, addr);
1320
    }
1321
#endif
1322
}
1323

    
1324
/* update the TLBs so that writes to code in the virtual page 'addr'
1325
   can be detected */
1326
static void tlb_protect_code(ram_addr_t ram_addr)
1327
{
1328
    cpu_physical_memory_reset_dirty(ram_addr, 
1329
                                    ram_addr + TARGET_PAGE_SIZE,
1330
                                    CODE_DIRTY_FLAG);
1331
}
1332

    
1333
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1334
   tested for self modifying code */
1335
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1336
                                    target_ulong vaddr)
1337
{
1338
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1339
}
1340

    
1341
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1342
                                         unsigned long start, unsigned long length)
1343
{
1344
    unsigned long addr;
1345
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1346
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1347
        if ((addr - start) < length) {
1348
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1349
        }
1350
    }
1351
}
1352

    
1353
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1354
                                     int dirty_flags)
1355
{
1356
    CPUState *env;
1357
    unsigned long length, start1;
1358
    int i, mask, len;
1359
    uint8_t *p;
1360

    
1361
    start &= TARGET_PAGE_MASK;
1362
    end = TARGET_PAGE_ALIGN(end);
1363

    
1364
    length = end - start;
1365
    if (length == 0)
1366
        return;
1367
    len = length >> TARGET_PAGE_BITS;
1368
#ifdef USE_KQEMU
1369
    /* XXX: should not depend on cpu context */
1370
    env = first_cpu;
1371
    if (env->kqemu_enabled) {
1372
        ram_addr_t addr;
1373
        addr = start;
1374
        for(i = 0; i < len; i++) {
1375
            kqemu_set_notdirty(env, addr);
1376
            addr += TARGET_PAGE_SIZE;
1377
        }
1378
    }
1379
#endif
1380
    mask = ~dirty_flags;
1381
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1382
    for(i = 0; i < len; i++)
1383
        p[i] &= mask;
1384

    
1385
    /* we modify the TLB cache so that the dirty bit will be set again
1386
       when accessing the range */
1387
    start1 = start + (unsigned long)phys_ram_base;
1388
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1389
        for(i = 0; i < CPU_TLB_SIZE; i++)
1390
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1391
        for(i = 0; i < CPU_TLB_SIZE; i++)
1392
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1393
    }
1394

    
1395
#if !defined(CONFIG_SOFTMMU)
1396
    /* XXX: this is expensive */
1397
    {
1398
        VirtPageDesc *p;
1399
        int j;
1400
        target_ulong addr;
1401

    
1402
        for(i = 0; i < L1_SIZE; i++) {
1403
            p = l1_virt_map[i];
1404
            if (p) {
1405
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1406
                for(j = 0; j < L2_SIZE; j++) {
1407
                    if (p->valid_tag == virt_valid_tag &&
1408
                        p->phys_addr >= start && p->phys_addr < end &&
1409
                        (p->prot & PROT_WRITE)) {
1410
                        if (addr < MMAP_AREA_END) {
1411
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1412
                                     p->prot & ~PROT_WRITE);
1413
                        }
1414
                    }
1415
                    addr += TARGET_PAGE_SIZE;
1416
                    p++;
1417
                }
1418
            }
1419
        }
1420
    }
1421
#endif
1422
}
1423

    
1424
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1425
{
1426
    ram_addr_t ram_addr;
1427

    
1428
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1429
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1430
            tlb_entry->addend - (unsigned long)phys_ram_base;
1431
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1432
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1433
        }
1434
    }
1435
}
1436

    
1437
/* update the TLB according to the current state of the dirty bits */
1438
void cpu_tlb_update_dirty(CPUState *env)
1439
{
1440
    int i;
1441
    for(i = 0; i < CPU_TLB_SIZE; i++)
1442
        tlb_update_dirty(&env->tlb_table[0][i]);
1443
    for(i = 0; i < CPU_TLB_SIZE; i++)
1444
        tlb_update_dirty(&env->tlb_table[1][i]);
1445
}
1446

    
1447
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1448
                                  unsigned long start)
1449
{
1450
    unsigned long addr;
1451
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1452
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1453
        if (addr == start) {
1454
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1455
        }
1456
    }
1457
}
1458

    
1459
/* update the TLB corresponding to virtual page vaddr and phys addr
1460
   addr so that it is no longer dirty */
1461
static inline void tlb_set_dirty(CPUState *env,
1462
                                 unsigned long addr, target_ulong vaddr)
1463
{
1464
    int i;
1465

    
1466
    addr &= TARGET_PAGE_MASK;
1467
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1468
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1469
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1470
}
1471

    
1472
/* add a new TLB entry. At most one entry for a given virtual address
1473
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1474
   (can only happen in non SOFTMMU mode for I/O pages or pages
1475
   conflicting with the host address space). */
1476
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1477
                      target_phys_addr_t paddr, int prot, 
1478
                      int is_user, int is_softmmu)
1479
{
1480
    PhysPageDesc *p;
1481
    unsigned long pd;
1482
    unsigned int index;
1483
    target_ulong address;
1484
    target_phys_addr_t addend;
1485
    int ret;
1486
    CPUTLBEntry *te;
1487

    
1488
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1489
    if (!p) {
1490
        pd = IO_MEM_UNASSIGNED;
1491
    } else {
1492
        pd = p->phys_offset;
1493
    }
1494
#if defined(DEBUG_TLB)
1495
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1496
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1497
#endif
1498

    
1499
    ret = 0;
1500
#if !defined(CONFIG_SOFTMMU)
1501
    if (is_softmmu) 
1502
#endif
1503
    {
1504
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1505
            /* IO memory case */
1506
            address = vaddr | pd;
1507
            addend = paddr;
1508
        } else {
1509
            /* standard memory */
1510
            address = vaddr;
1511
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1512
        }
1513
        
1514
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1515
        addend -= vaddr;
1516
        te = &env->tlb_table[is_user][index];
1517
        te->addend = addend;
1518
        if (prot & PAGE_READ) {
1519
            te->addr_read = address;
1520
        } else {
1521
            te->addr_read = -1;
1522
        }
1523
        if (prot & PAGE_EXEC) {
1524
            te->addr_code = address;
1525
        } else {
1526
            te->addr_code = -1;
1527
        }
1528
        if (prot & PAGE_WRITE) {
1529
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1530
                (pd & IO_MEM_ROMD)) {
1531
                /* write access calls the I/O callback */
1532
                te->addr_write = vaddr | 
1533
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1534
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1535
                       !cpu_physical_memory_is_dirty(pd)) {
1536
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1537
            } else {
1538
                te->addr_write = address;
1539
            }
1540
        } else {
1541
            te->addr_write = -1;
1542
        }
1543
    }
1544
#if !defined(CONFIG_SOFTMMU)
1545
    else {
1546
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1547
            /* IO access: no mapping is done as it will be handled by the
1548
               soft MMU */
1549
            if (!(env->hflags & HF_SOFTMMU_MASK))
1550
                ret = 2;
1551
        } else {
1552
            void *map_addr;
1553

    
1554
            if (vaddr >= MMAP_AREA_END) {
1555
                ret = 2;
1556
            } else {
1557
                if (prot & PROT_WRITE) {
1558
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1559
#if defined(TARGET_HAS_SMC) || 1
1560
                        first_tb ||
1561
#endif
1562
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1563
                         !cpu_physical_memory_is_dirty(pd))) {
1564
                        /* ROM: we do as if code was inside */
1565
                        /* if code is present, we only map as read only and save the
1566
                           original mapping */
1567
                        VirtPageDesc *vp;
1568
                        
1569
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1570
                        vp->phys_addr = pd;
1571
                        vp->prot = prot;
1572
                        vp->valid_tag = virt_valid_tag;
1573
                        prot &= ~PAGE_WRITE;
1574
                    }
1575
                }
1576
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1577
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1578
                if (map_addr == MAP_FAILED) {
1579
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1580
                              paddr, vaddr);
1581
                }
1582
            }
1583
        }
1584
    }
1585
#endif
1586
    return ret;
1587
}
1588

    
1589
/* called from signal handler: invalidate the code and unprotect the
1590
   page. Return TRUE if the fault was succesfully handled. */
1591
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1592
{
1593
#if !defined(CONFIG_SOFTMMU)
1594
    VirtPageDesc *vp;
1595

    
1596
#if defined(DEBUG_TLB)
1597
    printf("page_unprotect: addr=0x%08x\n", addr);
1598
#endif
1599
    addr &= TARGET_PAGE_MASK;
1600

    
1601
    /* if it is not mapped, no need to worry here */
1602
    if (addr >= MMAP_AREA_END)
1603
        return 0;
1604
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1605
    if (!vp)
1606
        return 0;
1607
    /* NOTE: in this case, validate_tag is _not_ tested as it
1608
       validates only the code TLB */
1609
    if (vp->valid_tag != virt_valid_tag)
1610
        return 0;
1611
    if (!(vp->prot & PAGE_WRITE))
1612
        return 0;
1613
#if defined(DEBUG_TLB)
1614
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1615
           addr, vp->phys_addr, vp->prot);
1616
#endif
1617
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1618
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1619
                  (unsigned long)addr, vp->prot);
1620
    /* set the dirty bit */
1621
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1622
    /* flush the code inside */
1623
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1624
    return 1;
1625
#else
1626
    return 0;
1627
#endif
1628
}
1629

    
1630
#else
1631

    
1632
void tlb_flush(CPUState *env, int flush_global)
1633
{
1634
}
1635

    
1636
void tlb_flush_page(CPUState *env, target_ulong addr)
1637
{
1638
}
1639

    
1640
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1641
                      target_phys_addr_t paddr, int prot, 
1642
                      int is_user, int is_softmmu)
1643
{
1644
    return 0;
1645
}
1646

    
1647
/* dump memory mappings */
1648
void page_dump(FILE *f)
1649
{
1650
    unsigned long start, end;
1651
    int i, j, prot, prot1;
1652
    PageDesc *p;
1653

    
1654
    fprintf(f, "%-8s %-8s %-8s %s\n",
1655
            "start", "end", "size", "prot");
1656
    start = -1;
1657
    end = -1;
1658
    prot = 0;
1659
    for(i = 0; i <= L1_SIZE; i++) {
1660
        if (i < L1_SIZE)
1661
            p = l1_map[i];
1662
        else
1663
            p = NULL;
1664
        for(j = 0;j < L2_SIZE; j++) {
1665
            if (!p)
1666
                prot1 = 0;
1667
            else
1668
                prot1 = p[j].flags;
1669
            if (prot1 != prot) {
1670
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1671
                if (start != -1) {
1672
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1673
                            start, end, end - start, 
1674
                            prot & PAGE_READ ? 'r' : '-',
1675
                            prot & PAGE_WRITE ? 'w' : '-',
1676
                            prot & PAGE_EXEC ? 'x' : '-');
1677
                }
1678
                if (prot1 != 0)
1679
                    start = end;
1680
                else
1681
                    start = -1;
1682
                prot = prot1;
1683
            }
1684
            if (!p)
1685
                break;
1686
        }
1687
    }
1688
}
1689

    
1690
int page_get_flags(target_ulong address)
1691
{
1692
    PageDesc *p;
1693

    
1694
    p = page_find(address >> TARGET_PAGE_BITS);
1695
    if (!p)
1696
        return 0;
1697
    return p->flags;
1698
}
1699

    
1700
/* modify the flags of a page and invalidate the code if
1701
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1702
   depending on PAGE_WRITE */
1703
void page_set_flags(target_ulong start, target_ulong end, int flags)
1704
{
1705
    PageDesc *p;
1706
    target_ulong addr;
1707

    
1708
    start = start & TARGET_PAGE_MASK;
1709
    end = TARGET_PAGE_ALIGN(end);
1710
    if (flags & PAGE_WRITE)
1711
        flags |= PAGE_WRITE_ORG;
1712
    spin_lock(&tb_lock);
1713
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1714
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1715
        /* if the write protection is set, then we invalidate the code
1716
           inside */
1717
        if (!(p->flags & PAGE_WRITE) && 
1718
            (flags & PAGE_WRITE) &&
1719
            p->first_tb) {
1720
            tb_invalidate_phys_page(addr, 0, NULL);
1721
        }
1722
        p->flags = flags;
1723
    }
1724
    spin_unlock(&tb_lock);
1725
}
1726

    
1727
/* called from signal handler: invalidate the code and unprotect the
1728
   page. Return TRUE if the fault was succesfully handled. */
1729
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1730
{
1731
    unsigned int page_index, prot, pindex;
1732
    PageDesc *p, *p1;
1733
    target_ulong host_start, host_end, addr;
1734

    
1735
    host_start = address & qemu_host_page_mask;
1736
    page_index = host_start >> TARGET_PAGE_BITS;
1737
    p1 = page_find(page_index);
1738
    if (!p1)
1739
        return 0;
1740
    host_end = host_start + qemu_host_page_size;
1741
    p = p1;
1742
    prot = 0;
1743
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1744
        prot |= p->flags;
1745
        p++;
1746
    }
1747
    /* if the page was really writable, then we change its
1748
       protection back to writable */
1749
    if (prot & PAGE_WRITE_ORG) {
1750
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1751
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1752
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1753
                     (prot & PAGE_BITS) | PAGE_WRITE);
1754
            p1[pindex].flags |= PAGE_WRITE;
1755
            /* and since the content will be modified, we must invalidate
1756
               the corresponding translated code. */
1757
            tb_invalidate_phys_page(address, pc, puc);
1758
#ifdef DEBUG_TB_CHECK
1759
            tb_invalidate_check(address);
1760
#endif
1761
            return 1;
1762
        }
1763
    }
1764
    return 0;
1765
}
1766

    
1767
/* call this function when system calls directly modify a memory area */
1768
/* ??? This should be redundant now we have lock_user.  */
1769
void page_unprotect_range(target_ulong data, target_ulong data_size)
1770
{
1771
    target_ulong start, end, addr;
1772

    
1773
    start = data;
1774
    end = start + data_size;
1775
    start &= TARGET_PAGE_MASK;
1776
    end = TARGET_PAGE_ALIGN(end);
1777
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1778
        page_unprotect(addr, 0, NULL);
1779
    }
1780
}
1781

    
1782
static inline void tlb_set_dirty(CPUState *env,
1783
                                 unsigned long addr, target_ulong vaddr)
1784
{
1785
}
1786
#endif /* defined(CONFIG_USER_ONLY) */
1787

    
1788
/* register physical memory. 'size' must be a multiple of the target
1789
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1790
   io memory page */
1791
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1792
                                  unsigned long size,
1793
                                  unsigned long phys_offset)
1794
{
1795
    target_phys_addr_t addr, end_addr;
1796
    PhysPageDesc *p;
1797
    CPUState *env;
1798

    
1799
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1800
    end_addr = start_addr + size;
1801
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1802
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1803
        p->phys_offset = phys_offset;
1804
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1805
            (phys_offset & IO_MEM_ROMD))
1806
            phys_offset += TARGET_PAGE_SIZE;
1807
    }
1808
    
1809
    /* since each CPU stores ram addresses in its TLB cache, we must
1810
       reset the modified entries */
1811
    /* XXX: slow ! */
1812
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1813
        tlb_flush(env, 1);
1814
    }
1815
}
1816

    
1817
/* XXX: temporary until new memory mapping API */
1818
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1819
{
1820
    PhysPageDesc *p;
1821

    
1822
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1823
    if (!p)
1824
        return IO_MEM_UNASSIGNED;
1825
    return p->phys_offset;
1826
}
1827

    
1828
/* XXX: better than nothing */
1829
ram_addr_t qemu_ram_alloc(unsigned int size)
1830
{
1831
    ram_addr_t addr;
1832
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1833
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
1834
                size, phys_ram_size);
1835
        abort();
1836
    }
1837
    addr = phys_ram_alloc_offset;
1838
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1839
    return addr;
1840
}
1841

    
1842
void qemu_ram_free(ram_addr_t addr)
1843
{
1844
}
1845

    
1846
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1847
{
1848
#ifdef DEBUG_UNASSIGNED
1849
    printf("Unassigned mem read  0x%08x\n", (int)addr);
1850
#endif
1851
    return 0;
1852
}
1853

    
1854
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1855
{
1856
#ifdef DEBUG_UNASSIGNED
1857
    printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1858
#endif
1859
}
1860

    
1861
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1862
    unassigned_mem_readb,
1863
    unassigned_mem_readb,
1864
    unassigned_mem_readb,
1865
};
1866

    
1867
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1868
    unassigned_mem_writeb,
1869
    unassigned_mem_writeb,
1870
    unassigned_mem_writeb,
1871
};
1872

    
1873
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1874
{
1875
    unsigned long ram_addr;
1876
    int dirty_flags;
1877
    ram_addr = addr - (unsigned long)phys_ram_base;
1878
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1879
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1880
#if !defined(CONFIG_USER_ONLY)
1881
        tb_invalidate_phys_page_fast(ram_addr, 1);
1882
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1883
#endif
1884
    }
1885
    stb_p((uint8_t *)(long)addr, val);
1886
#ifdef USE_KQEMU
1887
    if (cpu_single_env->kqemu_enabled &&
1888
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1889
        kqemu_modify_page(cpu_single_env, ram_addr);
1890
#endif
1891
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1892
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1893
    /* we remove the notdirty callback only if the code has been
1894
       flushed */
1895
    if (dirty_flags == 0xff)
1896
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1897
}
1898

    
1899
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1900
{
1901
    unsigned long ram_addr;
1902
    int dirty_flags;
1903
    ram_addr = addr - (unsigned long)phys_ram_base;
1904
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1905
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1906
#if !defined(CONFIG_USER_ONLY)
1907
        tb_invalidate_phys_page_fast(ram_addr, 2);
1908
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1909
#endif
1910
    }
1911
    stw_p((uint8_t *)(long)addr, val);
1912
#ifdef USE_KQEMU
1913
    if (cpu_single_env->kqemu_enabled &&
1914
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1915
        kqemu_modify_page(cpu_single_env, ram_addr);
1916
#endif
1917
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1918
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1919
    /* we remove the notdirty callback only if the code has been
1920
       flushed */
1921
    if (dirty_flags == 0xff)
1922
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1923
}
1924

    
1925
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1926
{
1927
    unsigned long ram_addr;
1928
    int dirty_flags;
1929
    ram_addr = addr - (unsigned long)phys_ram_base;
1930
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1931
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1932
#if !defined(CONFIG_USER_ONLY)
1933
        tb_invalidate_phys_page_fast(ram_addr, 4);
1934
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1935
#endif
1936
    }
1937
    stl_p((uint8_t *)(long)addr, val);
1938
#ifdef USE_KQEMU
1939
    if (cpu_single_env->kqemu_enabled &&
1940
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1941
        kqemu_modify_page(cpu_single_env, ram_addr);
1942
#endif
1943
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1944
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1945
    /* we remove the notdirty callback only if the code has been
1946
       flushed */
1947
    if (dirty_flags == 0xff)
1948
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1949
}
1950

    
1951
static CPUReadMemoryFunc *error_mem_read[3] = {
1952
    NULL, /* never used */
1953
    NULL, /* never used */
1954
    NULL, /* never used */
1955
};
1956

    
1957
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1958
    notdirty_mem_writeb,
1959
    notdirty_mem_writew,
1960
    notdirty_mem_writel,
1961
};
1962

    
1963
static void io_mem_init(void)
1964
{
1965
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1966
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1967
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1968
    io_mem_nb = 5;
1969

    
1970
    /* alloc dirty bits array */
1971
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1972
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1973
}
1974

    
1975
/* mem_read and mem_write are arrays of functions containing the
1976
   function to access byte (index 0), word (index 1) and dword (index
1977
   2). All functions must be supplied. If io_index is non zero, the
1978
   corresponding io zone is modified. If it is zero, a new io zone is
1979
   allocated. The return value can be used with
1980
   cpu_register_physical_memory(). (-1) is returned if error. */
1981
int cpu_register_io_memory(int io_index,
1982
                           CPUReadMemoryFunc **mem_read,
1983
                           CPUWriteMemoryFunc **mem_write,
1984
                           void *opaque)
1985
{
1986
    int i;
1987

    
1988
    if (io_index <= 0) {
1989
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1990
            return -1;
1991
        io_index = io_mem_nb++;
1992
    } else {
1993
        if (io_index >= IO_MEM_NB_ENTRIES)
1994
            return -1;
1995
    }
1996

    
1997
    for(i = 0;i < 3; i++) {
1998
        io_mem_read[io_index][i] = mem_read[i];
1999
        io_mem_write[io_index][i] = mem_write[i];
2000
    }
2001
    io_mem_opaque[io_index] = opaque;
2002
    return io_index << IO_MEM_SHIFT;
2003
}
2004

    
2005
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2006
{
2007
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2008
}
2009

    
2010
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2011
{
2012
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2013
}
2014

    
2015
/* physical memory access (slow version, mainly for debug) */
2016
#if defined(CONFIG_USER_ONLY)
2017
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2018
                            int len, int is_write)
2019
{
2020
    int l, flags;
2021
    target_ulong page;
2022
    void * p;
2023

    
2024
    while (len > 0) {
2025
        page = addr & TARGET_PAGE_MASK;
2026
        l = (page + TARGET_PAGE_SIZE) - addr;
2027
        if (l > len)
2028
            l = len;
2029
        flags = page_get_flags(page);
2030
        if (!(flags & PAGE_VALID))
2031
            return;
2032
        if (is_write) {
2033
            if (!(flags & PAGE_WRITE))
2034
                return;
2035
            p = lock_user(addr, len, 0);
2036
            memcpy(p, buf, len);
2037
            unlock_user(p, addr, len);
2038
        } else {
2039
            if (!(flags & PAGE_READ))
2040
                return;
2041
            p = lock_user(addr, len, 1);
2042
            memcpy(buf, p, len);
2043
            unlock_user(p, addr, 0);
2044
        }
2045
        len -= l;
2046
        buf += l;
2047
        addr += l;
2048
    }
2049
}
2050

    
2051
#else
2052
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2053
                            int len, int is_write)
2054
{
2055
    int l, io_index;
2056
    uint8_t *ptr;
2057
    uint32_t val;
2058
    target_phys_addr_t page;
2059
    unsigned long pd;
2060
    PhysPageDesc *p;
2061
    
2062
    while (len > 0) {
2063
        page = addr & TARGET_PAGE_MASK;
2064
        l = (page + TARGET_PAGE_SIZE) - addr;
2065
        if (l > len)
2066
            l = len;
2067
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2068
        if (!p) {
2069
            pd = IO_MEM_UNASSIGNED;
2070
        } else {
2071
            pd = p->phys_offset;
2072
        }
2073
        
2074
        if (is_write) {
2075
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2076
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2077
                /* XXX: could force cpu_single_env to NULL to avoid
2078
                   potential bugs */
2079
                if (l >= 4 && ((addr & 3) == 0)) {
2080
                    /* 32 bit write access */
2081
                    val = ldl_p(buf);
2082
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2083
                    l = 4;
2084
                } else if (l >= 2 && ((addr & 1) == 0)) {
2085
                    /* 16 bit write access */
2086
                    val = lduw_p(buf);
2087
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2088
                    l = 2;
2089
                } else {
2090
                    /* 8 bit write access */
2091
                    val = ldub_p(buf);
2092
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2093
                    l = 1;
2094
                }
2095
            } else {
2096
                unsigned long addr1;
2097
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2098
                /* RAM case */
2099
                ptr = phys_ram_base + addr1;
2100
                memcpy(ptr, buf, l);
2101
                if (!cpu_physical_memory_is_dirty(addr1)) {
2102
                    /* invalidate code */
2103
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2104
                    /* set dirty bit */
2105
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2106
                        (0xff & ~CODE_DIRTY_FLAG);
2107
                }
2108
            }
2109
        } else {
2110
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2111
                !(pd & IO_MEM_ROMD)) {
2112
                /* I/O case */
2113
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2114
                if (l >= 4 && ((addr & 3) == 0)) {
2115
                    /* 32 bit read access */
2116
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2117
                    stl_p(buf, val);
2118
                    l = 4;
2119
                } else if (l >= 2 && ((addr & 1) == 0)) {
2120
                    /* 16 bit read access */
2121
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2122
                    stw_p(buf, val);
2123
                    l = 2;
2124
                } else {
2125
                    /* 8 bit read access */
2126
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2127
                    stb_p(buf, val);
2128
                    l = 1;
2129
                }
2130
            } else {
2131
                /* RAM case */
2132
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2133
                    (addr & ~TARGET_PAGE_MASK);
2134
                memcpy(buf, ptr, l);
2135
            }
2136
        }
2137
        len -= l;
2138
        buf += l;
2139
        addr += l;
2140
    }
2141
}
2142

    
2143
/* used for ROM loading : can write in RAM and ROM */
2144
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2145
                                   const uint8_t *buf, int len)
2146
{
2147
    int l;
2148
    uint8_t *ptr;
2149
    target_phys_addr_t page;
2150
    unsigned long pd;
2151
    PhysPageDesc *p;
2152
    
2153
    while (len > 0) {
2154
        page = addr & TARGET_PAGE_MASK;
2155
        l = (page + TARGET_PAGE_SIZE) - addr;
2156
        if (l > len)
2157
            l = len;
2158
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2159
        if (!p) {
2160
            pd = IO_MEM_UNASSIGNED;
2161
        } else {
2162
            pd = p->phys_offset;
2163
        }
2164
        
2165
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2166
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2167
            !(pd & IO_MEM_ROMD)) {
2168
            /* do nothing */
2169
        } else {
2170
            unsigned long addr1;
2171
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2172
            /* ROM/RAM case */
2173
            ptr = phys_ram_base + addr1;
2174
            memcpy(ptr, buf, l);
2175
        }
2176
        len -= l;
2177
        buf += l;
2178
        addr += l;
2179
    }
2180
}
2181

    
2182

    
2183
/* warning: addr must be aligned */
2184
uint32_t ldl_phys(target_phys_addr_t addr)
2185
{
2186
    int io_index;
2187
    uint8_t *ptr;
2188
    uint32_t val;
2189
    unsigned long pd;
2190
    PhysPageDesc *p;
2191

    
2192
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2193
    if (!p) {
2194
        pd = IO_MEM_UNASSIGNED;
2195
    } else {
2196
        pd = p->phys_offset;
2197
    }
2198
        
2199
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2200
        !(pd & IO_MEM_ROMD)) {
2201
        /* I/O case */
2202
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2203
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2204
    } else {
2205
        /* RAM case */
2206
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2207
            (addr & ~TARGET_PAGE_MASK);
2208
        val = ldl_p(ptr);
2209
    }
2210
    return val;
2211
}
2212

    
2213
/* warning: addr must be aligned */
2214
uint64_t ldq_phys(target_phys_addr_t addr)
2215
{
2216
    int io_index;
2217
    uint8_t *ptr;
2218
    uint64_t val;
2219
    unsigned long pd;
2220
    PhysPageDesc *p;
2221

    
2222
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2223
    if (!p) {
2224
        pd = IO_MEM_UNASSIGNED;
2225
    } else {
2226
        pd = p->phys_offset;
2227
    }
2228
        
2229
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2230
        !(pd & IO_MEM_ROMD)) {
2231
        /* I/O case */
2232
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2233
#ifdef TARGET_WORDS_BIGENDIAN
2234
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2235
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2236
#else
2237
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2238
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2239
#endif
2240
    } else {
2241
        /* RAM case */
2242
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2243
            (addr & ~TARGET_PAGE_MASK);
2244
        val = ldq_p(ptr);
2245
    }
2246
    return val;
2247
}
2248

    
2249
/* XXX: optimize */
2250
uint32_t ldub_phys(target_phys_addr_t addr)
2251
{
2252
    uint8_t val;
2253
    cpu_physical_memory_read(addr, &val, 1);
2254
    return val;
2255
}
2256

    
2257
/* XXX: optimize */
2258
uint32_t lduw_phys(target_phys_addr_t addr)
2259
{
2260
    uint16_t val;
2261
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2262
    return tswap16(val);
2263
}
2264

    
2265
/* warning: addr must be aligned. The ram page is not masked as dirty
2266
   and the code inside is not invalidated. It is useful if the dirty
2267
   bits are used to track modified PTEs */
2268
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2269
{
2270
    int io_index;
2271
    uint8_t *ptr;
2272
    unsigned long pd;
2273
    PhysPageDesc *p;
2274

    
2275
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2276
    if (!p) {
2277
        pd = IO_MEM_UNASSIGNED;
2278
    } else {
2279
        pd = p->phys_offset;
2280
    }
2281
        
2282
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2283
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2284
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2285
    } else {
2286
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2287
            (addr & ~TARGET_PAGE_MASK);
2288
        stl_p(ptr, val);
2289
    }
2290
}
2291

    
2292
/* warning: addr must be aligned */
2293
void stl_phys(target_phys_addr_t addr, uint32_t val)
2294
{
2295
    int io_index;
2296
    uint8_t *ptr;
2297
    unsigned long pd;
2298
    PhysPageDesc *p;
2299

    
2300
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2301
    if (!p) {
2302
        pd = IO_MEM_UNASSIGNED;
2303
    } else {
2304
        pd = p->phys_offset;
2305
    }
2306
        
2307
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2308
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2309
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2310
    } else {
2311
        unsigned long addr1;
2312
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2313
        /* RAM case */
2314
        ptr = phys_ram_base + addr1;
2315
        stl_p(ptr, val);
2316
        if (!cpu_physical_memory_is_dirty(addr1)) {
2317
            /* invalidate code */
2318
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2319
            /* set dirty bit */
2320
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2321
                (0xff & ~CODE_DIRTY_FLAG);
2322
        }
2323
    }
2324
}
2325

    
2326
/* XXX: optimize */
2327
void stb_phys(target_phys_addr_t addr, uint32_t val)
2328
{
2329
    uint8_t v = val;
2330
    cpu_physical_memory_write(addr, &v, 1);
2331
}
2332

    
2333
/* XXX: optimize */
2334
void stw_phys(target_phys_addr_t addr, uint32_t val)
2335
{
2336
    uint16_t v = tswap16(val);
2337
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2338
}
2339

    
2340
/* XXX: optimize */
2341
void stq_phys(target_phys_addr_t addr, uint64_t val)
2342
{
2343
    val = tswap64(val);
2344
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2345
}
2346

    
2347
#endif
2348

    
2349
/* virtual memory access for debug */
2350
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2351
                        uint8_t *buf, int len, int is_write)
2352
{
2353
    int l;
2354
    target_ulong page, phys_addr;
2355

    
2356
    while (len > 0) {
2357
        page = addr & TARGET_PAGE_MASK;
2358
        phys_addr = cpu_get_phys_page_debug(env, page);
2359
        /* if no physical page mapped, return an error */
2360
        if (phys_addr == -1)
2361
            return -1;
2362
        l = (page + TARGET_PAGE_SIZE) - addr;
2363
        if (l > len)
2364
            l = len;
2365
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2366
                               buf, l, is_write);
2367
        len -= l;
2368
        buf += l;
2369
        addr += l;
2370
    }
2371
    return 0;
2372
}
2373

    
2374
void dump_exec_info(FILE *f,
2375
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2376
{
2377
    int i, target_code_size, max_target_code_size;
2378
    int direct_jmp_count, direct_jmp2_count, cross_page;
2379
    TranslationBlock *tb;
2380
    
2381
    target_code_size = 0;
2382
    max_target_code_size = 0;
2383
    cross_page = 0;
2384
    direct_jmp_count = 0;
2385
    direct_jmp2_count = 0;
2386
    for(i = 0; i < nb_tbs; i++) {
2387
        tb = &tbs[i];
2388
        target_code_size += tb->size;
2389
        if (tb->size > max_target_code_size)
2390
            max_target_code_size = tb->size;
2391
        if (tb->page_addr[1] != -1)
2392
            cross_page++;
2393
        if (tb->tb_next_offset[0] != 0xffff) {
2394
            direct_jmp_count++;
2395
            if (tb->tb_next_offset[1] != 0xffff) {
2396
                direct_jmp2_count++;
2397
            }
2398
        }
2399
    }
2400
    /* XXX: avoid using doubles ? */
2401
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2402
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2403
                nb_tbs ? target_code_size / nb_tbs : 0,
2404
                max_target_code_size);
2405
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2406
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2407
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2408
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2409
            cross_page, 
2410
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2411
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2412
                direct_jmp_count, 
2413
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2414
                direct_jmp2_count,
2415
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2416
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2417
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2418
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2419
}
2420

    
2421
#if !defined(CONFIG_USER_ONLY) 
2422

    
2423
#define MMUSUFFIX _cmmu
2424
#define GETPC() NULL
2425
#define env cpu_single_env
2426
#define SOFTMMU_CODE_ACCESS
2427

    
2428
#define SHIFT 0
2429
#include "softmmu_template.h"
2430

    
2431
#define SHIFT 1
2432
#include "softmmu_template.h"
2433

    
2434
#define SHIFT 2
2435
#include "softmmu_template.h"
2436

    
2437
#define SHIFT 3
2438
#include "softmmu_template.h"
2439

    
2440
#undef env
2441

    
2442
#endif