Statistics
| Branch: | Revision:

root / exec.c @ ea785922

History | View | Annotate | Download (71.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
49

    
50
#if !defined(CONFIG_USER_ONLY)
51
/* TB consistency checks only implemented for usermode emulation.  */
52
#undef DEBUG_TB_CHECK
53
#endif
54

    
55
/* threshold to flush the translated code buffer */
56
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57

    
58
#define SMC_BITMAP_USE_THRESHOLD 10
59

    
60
#define MMAP_AREA_START        0x00000000
61
#define MMAP_AREA_END          0xa8000000
62

    
63
#if defined(TARGET_SPARC64)
64
#define TARGET_PHYS_ADDR_SPACE_BITS 41
65
#elif defined(TARGET_PPC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 42
67
#else
68
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69
#define TARGET_PHYS_ADDR_SPACE_BITS 32
70
#endif
71

    
72
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
73
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
74
int nb_tbs;
75
/* any access to the tbs or the page table must use this lock */
76
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
77

    
78
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
79
uint8_t *code_gen_ptr;
80

    
81
int phys_ram_size;
82
int phys_ram_fd;
83
uint8_t *phys_ram_base;
84
uint8_t *phys_ram_dirty;
85
static ram_addr_t phys_ram_alloc_offset = 0;
86

    
87
CPUState *first_cpu;
88
/* current CPU in the current thread. It is only valid inside
89
   cpu_exec() */
90
CPUState *cpu_single_env; 
91

    
92
typedef struct PageDesc {
93
    /* list of TBs intersecting this ram page */
94
    TranslationBlock *first_tb;
95
    /* in order to optimize self modifying code, we count the number
96
       of lookups we do to a given page to use a bitmap */
97
    unsigned int code_write_count;
98
    uint8_t *code_bitmap;
99
#if defined(CONFIG_USER_ONLY)
100
    unsigned long flags;
101
#endif
102
} PageDesc;
103

    
104
typedef struct PhysPageDesc {
105
    /* offset in host memory of the page + io_index in the low 12 bits */
106
    uint32_t phys_offset;
107
} PhysPageDesc;
108

    
109
#define L2_BITS 10
110
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111

    
112
#define L1_SIZE (1 << L1_BITS)
113
#define L2_SIZE (1 << L2_BITS)
114

    
115
static void io_mem_init(void);
116

    
117
unsigned long qemu_real_host_page_size;
118
unsigned long qemu_host_page_bits;
119
unsigned long qemu_host_page_size;
120
unsigned long qemu_host_page_mask;
121

    
122
/* XXX: for system emulation, it could just be an array */
123
static PageDesc *l1_map[L1_SIZE];
124
PhysPageDesc **l1_phys_map;
125

    
126
/* io memory support */
127
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
130
static int io_mem_nb;
131

    
132
/* log support */
133
char *logfilename = "/tmp/qemu.log";
134
FILE *logfile;
135
int loglevel;
136

    
137
/* statistics */
138
static int tlb_flush_count;
139
static int tb_flush_count;
140
static int tb_phys_invalidate_count;
141

    
142
static void page_init(void)
143
{
144
    /* NOTE: we can always suppose that qemu_host_page_size >=
145
       TARGET_PAGE_SIZE */
146
#ifdef _WIN32
147
    {
148
        SYSTEM_INFO system_info;
149
        DWORD old_protect;
150
        
151
        GetSystemInfo(&system_info);
152
        qemu_real_host_page_size = system_info.dwPageSize;
153
        
154
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
155
                       PAGE_EXECUTE_READWRITE, &old_protect);
156
    }
157
#else
158
    qemu_real_host_page_size = getpagesize();
159
    {
160
        unsigned long start, end;
161

    
162
        start = (unsigned long)code_gen_buffer;
163
        start &= ~(qemu_real_host_page_size - 1);
164
        
165
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
166
        end += qemu_real_host_page_size - 1;
167
        end &= ~(qemu_real_host_page_size - 1);
168
        
169
        mprotect((void *)start, end - start, 
170
                 PROT_READ | PROT_WRITE | PROT_EXEC);
171
    }
172
#endif
173

    
174
    if (qemu_host_page_size == 0)
175
        qemu_host_page_size = qemu_real_host_page_size;
176
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
177
        qemu_host_page_size = TARGET_PAGE_SIZE;
178
    qemu_host_page_bits = 0;
179
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
180
        qemu_host_page_bits++;
181
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
182
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
183
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
184
}
185

    
186
static inline PageDesc *page_find_alloc(unsigned int index)
187
{
188
    PageDesc **lp, *p;
189

    
190
    lp = &l1_map[index >> L2_BITS];
191
    p = *lp;
192
    if (!p) {
193
        /* allocate if not found */
194
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
195
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
196
        *lp = p;
197
    }
198
    return p + (index & (L2_SIZE - 1));
199
}
200

    
201
static inline PageDesc *page_find(unsigned int index)
202
{
203
    PageDesc *p;
204

    
205
    p = l1_map[index >> L2_BITS];
206
    if (!p)
207
        return 0;
208
    return p + (index & (L2_SIZE - 1));
209
}
210

    
211
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
212
{
213
    void **lp, **p;
214
    PhysPageDesc *pd;
215

    
216
    p = (void **)l1_phys_map;
217
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
218

    
219
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
220
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
221
#endif
222
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
223
    p = *lp;
224
    if (!p) {
225
        /* allocate if not found */
226
        if (!alloc)
227
            return NULL;
228
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
229
        memset(p, 0, sizeof(void *) * L1_SIZE);
230
        *lp = p;
231
    }
232
#endif
233
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
234
    pd = *lp;
235
    if (!pd) {
236
        int i;
237
        /* allocate if not found */
238
        if (!alloc)
239
            return NULL;
240
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
241
        *lp = pd;
242
        for (i = 0; i < L2_SIZE; i++)
243
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
244
    }
245
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
246
}
247

    
248
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
249
{
250
    return phys_page_find_alloc(index, 0);
251
}
252

    
253
#if !defined(CONFIG_USER_ONLY)
254
static void tlb_protect_code(ram_addr_t ram_addr);
255
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
256
                                    target_ulong vaddr);
257
#endif
258

    
259
void cpu_exec_init(CPUState *env)
260
{
261
    CPUState **penv;
262
    int cpu_index;
263

    
264
    if (!code_gen_ptr) {
265
        code_gen_ptr = code_gen_buffer;
266
        page_init();
267
        io_mem_init();
268
    }
269
    env->next_cpu = NULL;
270
    penv = &first_cpu;
271
    cpu_index = 0;
272
    while (*penv != NULL) {
273
        penv = (CPUState **)&(*penv)->next_cpu;
274
        cpu_index++;
275
    }
276
    env->cpu_index = cpu_index;
277
    *penv = env;
278
}
279

    
280
static inline void invalidate_page_bitmap(PageDesc *p)
281
{
282
    if (p->code_bitmap) {
283
        qemu_free(p->code_bitmap);
284
        p->code_bitmap = NULL;
285
    }
286
    p->code_write_count = 0;
287
}
288

    
289
/* set to NULL all the 'first_tb' fields in all PageDescs */
290
static void page_flush_tb(void)
291
{
292
    int i, j;
293
    PageDesc *p;
294

    
295
    for(i = 0; i < L1_SIZE; i++) {
296
        p = l1_map[i];
297
        if (p) {
298
            for(j = 0; j < L2_SIZE; j++) {
299
                p->first_tb = NULL;
300
                invalidate_page_bitmap(p);
301
                p++;
302
            }
303
        }
304
    }
305
}
306

    
307
/* flush all the translation blocks */
308
/* XXX: tb_flush is currently not thread safe */
309
void tb_flush(CPUState *env1)
310
{
311
    CPUState *env;
312
#if defined(DEBUG_FLUSH)
313
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
314
           code_gen_ptr - code_gen_buffer, 
315
           nb_tbs, 
316
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
317
#endif
318
    nb_tbs = 0;
319
    
320
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
321
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
322
    }
323

    
324
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
325
    page_flush_tb();
326

    
327
    code_gen_ptr = code_gen_buffer;
328
    /* XXX: flush processor icache at this point if cache flush is
329
       expensive */
330
    tb_flush_count++;
331
}
332

    
333
#ifdef DEBUG_TB_CHECK
334

    
335
static void tb_invalidate_check(unsigned long address)
336
{
337
    TranslationBlock *tb;
338
    int i;
339
    address &= TARGET_PAGE_MASK;
340
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
341
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
342
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
343
                  address >= tb->pc + tb->size)) {
344
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
345
                       address, (long)tb->pc, tb->size);
346
            }
347
        }
348
    }
349
}
350

    
351
/* verify that all the pages have correct rights for code */
352
static void tb_page_check(void)
353
{
354
    TranslationBlock *tb;
355
    int i, flags1, flags2;
356
    
357
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359
            flags1 = page_get_flags(tb->pc);
360
            flags2 = page_get_flags(tb->pc + tb->size - 1);
361
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
362
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
363
                       (long)tb->pc, tb->size, flags1, flags2);
364
            }
365
        }
366
    }
367
}
368

    
369
void tb_jmp_check(TranslationBlock *tb)
370
{
371
    TranslationBlock *tb1;
372
    unsigned int n1;
373

    
374
    /* suppress any remaining jumps to this TB */
375
    tb1 = tb->jmp_first;
376
    for(;;) {
377
        n1 = (long)tb1 & 3;
378
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
379
        if (n1 == 2)
380
            break;
381
        tb1 = tb1->jmp_next[n1];
382
    }
383
    /* check end of list */
384
    if (tb1 != tb) {
385
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
386
    }
387
}
388

    
389
#endif
390

    
391
/* invalidate one TB */
392
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
393
                             int next_offset)
394
{
395
    TranslationBlock *tb1;
396
    for(;;) {
397
        tb1 = *ptb;
398
        if (tb1 == tb) {
399
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
400
            break;
401
        }
402
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
403
    }
404
}
405

    
406
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
407
{
408
    TranslationBlock *tb1;
409
    unsigned int n1;
410

    
411
    for(;;) {
412
        tb1 = *ptb;
413
        n1 = (long)tb1 & 3;
414
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
415
        if (tb1 == tb) {
416
            *ptb = tb1->page_next[n1];
417
            break;
418
        }
419
        ptb = &tb1->page_next[n1];
420
    }
421
}
422

    
423
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
424
{
425
    TranslationBlock *tb1, **ptb;
426
    unsigned int n1;
427

    
428
    ptb = &tb->jmp_next[n];
429
    tb1 = *ptb;
430
    if (tb1) {
431
        /* find tb(n) in circular list */
432
        for(;;) {
433
            tb1 = *ptb;
434
            n1 = (long)tb1 & 3;
435
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
436
            if (n1 == n && tb1 == tb)
437
                break;
438
            if (n1 == 2) {
439
                ptb = &tb1->jmp_first;
440
            } else {
441
                ptb = &tb1->jmp_next[n1];
442
            }
443
        }
444
        /* now we can suppress tb(n) from the list */
445
        *ptb = tb->jmp_next[n];
446

    
447
        tb->jmp_next[n] = NULL;
448
    }
449
}
450

    
451
/* reset the jump entry 'n' of a TB so that it is not chained to
452
   another TB */
453
static inline void tb_reset_jump(TranslationBlock *tb, int n)
454
{
455
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
456
}
457

    
458
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
459
{
460
    CPUState *env;
461
    PageDesc *p;
462
    unsigned int h, n1;
463
    target_ulong phys_pc;
464
    TranslationBlock *tb1, *tb2;
465
    
466
    /* remove the TB from the hash list */
467
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
468
    h = tb_phys_hash_func(phys_pc);
469
    tb_remove(&tb_phys_hash[h], tb, 
470
              offsetof(TranslationBlock, phys_hash_next));
471

    
472
    /* remove the TB from the page list */
473
    if (tb->page_addr[0] != page_addr) {
474
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
475
        tb_page_remove(&p->first_tb, tb);
476
        invalidate_page_bitmap(p);
477
    }
478
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
479
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
480
        tb_page_remove(&p->first_tb, tb);
481
        invalidate_page_bitmap(p);
482
    }
483

    
484
    tb_invalidated_flag = 1;
485

    
486
    /* remove the TB from the hash list */
487
    h = tb_jmp_cache_hash_func(tb->pc);
488
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
489
        if (env->tb_jmp_cache[h] == tb)
490
            env->tb_jmp_cache[h] = NULL;
491
    }
492

    
493
    /* suppress this TB from the two jump lists */
494
    tb_jmp_remove(tb, 0);
495
    tb_jmp_remove(tb, 1);
496

    
497
    /* suppress any remaining jumps to this TB */
498
    tb1 = tb->jmp_first;
499
    for(;;) {
500
        n1 = (long)tb1 & 3;
501
        if (n1 == 2)
502
            break;
503
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
504
        tb2 = tb1->jmp_next[n1];
505
        tb_reset_jump(tb1, n1);
506
        tb1->jmp_next[n1] = NULL;
507
        tb1 = tb2;
508
    }
509
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
510

    
511
    tb_phys_invalidate_count++;
512
}
513

    
514
static inline void set_bits(uint8_t *tab, int start, int len)
515
{
516
    int end, mask, end1;
517

    
518
    end = start + len;
519
    tab += start >> 3;
520
    mask = 0xff << (start & 7);
521
    if ((start & ~7) == (end & ~7)) {
522
        if (start < end) {
523
            mask &= ~(0xff << (end & 7));
524
            *tab |= mask;
525
        }
526
    } else {
527
        *tab++ |= mask;
528
        start = (start + 8) & ~7;
529
        end1 = end & ~7;
530
        while (start < end1) {
531
            *tab++ = 0xff;
532
            start += 8;
533
        }
534
        if (start < end) {
535
            mask = ~(0xff << (end & 7));
536
            *tab |= mask;
537
        }
538
    }
539
}
540

    
541
static void build_page_bitmap(PageDesc *p)
542
{
543
    int n, tb_start, tb_end;
544
    TranslationBlock *tb;
545
    
546
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
547
    if (!p->code_bitmap)
548
        return;
549
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
550

    
551
    tb = p->first_tb;
552
    while (tb != NULL) {
553
        n = (long)tb & 3;
554
        tb = (TranslationBlock *)((long)tb & ~3);
555
        /* NOTE: this is subtle as a TB may span two physical pages */
556
        if (n == 0) {
557
            /* NOTE: tb_end may be after the end of the page, but
558
               it is not a problem */
559
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
560
            tb_end = tb_start + tb->size;
561
            if (tb_end > TARGET_PAGE_SIZE)
562
                tb_end = TARGET_PAGE_SIZE;
563
        } else {
564
            tb_start = 0;
565
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
566
        }
567
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
568
        tb = tb->page_next[n];
569
    }
570
}
571

    
572
#ifdef TARGET_HAS_PRECISE_SMC
573

    
574
static void tb_gen_code(CPUState *env, 
575
                        target_ulong pc, target_ulong cs_base, int flags,
576
                        int cflags)
577
{
578
    TranslationBlock *tb;
579
    uint8_t *tc_ptr;
580
    target_ulong phys_pc, phys_page2, virt_page2;
581
    int code_gen_size;
582

    
583
    phys_pc = get_phys_addr_code(env, pc);
584
    tb = tb_alloc(pc);
585
    if (!tb) {
586
        /* flush must be done */
587
        tb_flush(env);
588
        /* cannot fail at this point */
589
        tb = tb_alloc(pc);
590
    }
591
    tc_ptr = code_gen_ptr;
592
    tb->tc_ptr = tc_ptr;
593
    tb->cs_base = cs_base;
594
    tb->flags = flags;
595
    tb->cflags = cflags;
596
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
597
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
598
    
599
    /* check next page if needed */
600
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
601
    phys_page2 = -1;
602
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
603
        phys_page2 = get_phys_addr_code(env, virt_page2);
604
    }
605
    tb_link_phys(tb, phys_pc, phys_page2);
606
}
607
#endif
608
    
609
/* invalidate all TBs which intersect with the target physical page
610
   starting in range [start;end[. NOTE: start and end must refer to
611
   the same physical page. 'is_cpu_write_access' should be true if called
612
   from a real cpu write access: the virtual CPU will exit the current
613
   TB if code is modified inside this TB. */
614
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
615
                                   int is_cpu_write_access)
616
{
617
    int n, current_tb_modified, current_tb_not_found, current_flags;
618
    CPUState *env = cpu_single_env;
619
    PageDesc *p;
620
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
621
    target_ulong tb_start, tb_end;
622
    target_ulong current_pc, current_cs_base;
623

    
624
    p = page_find(start >> TARGET_PAGE_BITS);
625
    if (!p) 
626
        return;
627
    if (!p->code_bitmap && 
628
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
629
        is_cpu_write_access) {
630
        /* build code bitmap */
631
        build_page_bitmap(p);
632
    }
633

    
634
    /* we remove all the TBs in the range [start, end[ */
635
    /* XXX: see if in some cases it could be faster to invalidate all the code */
636
    current_tb_not_found = is_cpu_write_access;
637
    current_tb_modified = 0;
638
    current_tb = NULL; /* avoid warning */
639
    current_pc = 0; /* avoid warning */
640
    current_cs_base = 0; /* avoid warning */
641
    current_flags = 0; /* avoid warning */
642
    tb = p->first_tb;
643
    while (tb != NULL) {
644
        n = (long)tb & 3;
645
        tb = (TranslationBlock *)((long)tb & ~3);
646
        tb_next = tb->page_next[n];
647
        /* NOTE: this is subtle as a TB may span two physical pages */
648
        if (n == 0) {
649
            /* NOTE: tb_end may be after the end of the page, but
650
               it is not a problem */
651
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
652
            tb_end = tb_start + tb->size;
653
        } else {
654
            tb_start = tb->page_addr[1];
655
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
656
        }
657
        if (!(tb_end <= start || tb_start >= end)) {
658
#ifdef TARGET_HAS_PRECISE_SMC
659
            if (current_tb_not_found) {
660
                current_tb_not_found = 0;
661
                current_tb = NULL;
662
                if (env->mem_write_pc) {
663
                    /* now we have a real cpu fault */
664
                    current_tb = tb_find_pc(env->mem_write_pc);
665
                }
666
            }
667
            if (current_tb == tb &&
668
                !(current_tb->cflags & CF_SINGLE_INSN)) {
669
                /* If we are modifying the current TB, we must stop
670
                its execution. We could be more precise by checking
671
                that the modification is after the current PC, but it
672
                would require a specialized function to partially
673
                restore the CPU state */
674
                
675
                current_tb_modified = 1;
676
                cpu_restore_state(current_tb, env, 
677
                                  env->mem_write_pc, NULL);
678
#if defined(TARGET_I386)
679
                current_flags = env->hflags;
680
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
681
                current_cs_base = (target_ulong)env->segs[R_CS].base;
682
                current_pc = current_cs_base + env->eip;
683
#else
684
#error unsupported CPU
685
#endif
686
            }
687
#endif /* TARGET_HAS_PRECISE_SMC */
688
            /* we need to do that to handle the case where a signal
689
               occurs while doing tb_phys_invalidate() */
690
            saved_tb = NULL;
691
            if (env) {
692
                saved_tb = env->current_tb;
693
                env->current_tb = NULL;
694
            }
695
            tb_phys_invalidate(tb, -1);
696
            if (env) {
697
                env->current_tb = saved_tb;
698
                if (env->interrupt_request && env->current_tb)
699
                    cpu_interrupt(env, env->interrupt_request);
700
            }
701
        }
702
        tb = tb_next;
703
    }
704
#if !defined(CONFIG_USER_ONLY)
705
    /* if no code remaining, no need to continue to use slow writes */
706
    if (!p->first_tb) {
707
        invalidate_page_bitmap(p);
708
        if (is_cpu_write_access) {
709
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
710
        }
711
    }
712
#endif
713
#ifdef TARGET_HAS_PRECISE_SMC
714
    if (current_tb_modified) {
715
        /* we generate a block containing just the instruction
716
           modifying the memory. It will ensure that it cannot modify
717
           itself */
718
        env->current_tb = NULL;
719
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
720
                    CF_SINGLE_INSN);
721
        cpu_resume_from_signal(env, NULL);
722
    }
723
#endif
724
}
725

    
726
/* len must be <= 8 and start must be a multiple of len */
727
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
728
{
729
    PageDesc *p;
730
    int offset, b;
731
#if 0
732
    if (1) {
733
        if (loglevel) {
734
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
735
                   cpu_single_env->mem_write_vaddr, len, 
736
                   cpu_single_env->eip, 
737
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
738
        }
739
    }
740
#endif
741
    p = page_find(start >> TARGET_PAGE_BITS);
742
    if (!p) 
743
        return;
744
    if (p->code_bitmap) {
745
        offset = start & ~TARGET_PAGE_MASK;
746
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
747
        if (b & ((1 << len) - 1))
748
            goto do_invalidate;
749
    } else {
750
    do_invalidate:
751
        tb_invalidate_phys_page_range(start, start + len, 1);
752
    }
753
}
754

    
755
#if !defined(CONFIG_SOFTMMU)
756
static void tb_invalidate_phys_page(target_ulong addr, 
757
                                    unsigned long pc, void *puc)
758
{
759
    int n, current_flags, current_tb_modified;
760
    target_ulong current_pc, current_cs_base;
761
    PageDesc *p;
762
    TranslationBlock *tb, *current_tb;
763
#ifdef TARGET_HAS_PRECISE_SMC
764
    CPUState *env = cpu_single_env;
765
#endif
766

    
767
    addr &= TARGET_PAGE_MASK;
768
    p = page_find(addr >> TARGET_PAGE_BITS);
769
    if (!p) 
770
        return;
771
    tb = p->first_tb;
772
    current_tb_modified = 0;
773
    current_tb = NULL;
774
    current_pc = 0; /* avoid warning */
775
    current_cs_base = 0; /* avoid warning */
776
    current_flags = 0; /* avoid warning */
777
#ifdef TARGET_HAS_PRECISE_SMC
778
    if (tb && pc != 0) {
779
        current_tb = tb_find_pc(pc);
780
    }
781
#endif
782
    while (tb != NULL) {
783
        n = (long)tb & 3;
784
        tb = (TranslationBlock *)((long)tb & ~3);
785
#ifdef TARGET_HAS_PRECISE_SMC
786
        if (current_tb == tb &&
787
            !(current_tb->cflags & CF_SINGLE_INSN)) {
788
                /* If we are modifying the current TB, we must stop
789
                   its execution. We could be more precise by checking
790
                   that the modification is after the current PC, but it
791
                   would require a specialized function to partially
792
                   restore the CPU state */
793
            
794
            current_tb_modified = 1;
795
            cpu_restore_state(current_tb, env, pc, puc);
796
#if defined(TARGET_I386)
797
            current_flags = env->hflags;
798
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
799
            current_cs_base = (target_ulong)env->segs[R_CS].base;
800
            current_pc = current_cs_base + env->eip;
801
#else
802
#error unsupported CPU
803
#endif
804
        }
805
#endif /* TARGET_HAS_PRECISE_SMC */
806
        tb_phys_invalidate(tb, addr);
807
        tb = tb->page_next[n];
808
    }
809
    p->first_tb = NULL;
810
#ifdef TARGET_HAS_PRECISE_SMC
811
    if (current_tb_modified) {
812
        /* we generate a block containing just the instruction
813
           modifying the memory. It will ensure that it cannot modify
814
           itself */
815
        env->current_tb = NULL;
816
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
817
                    CF_SINGLE_INSN);
818
        cpu_resume_from_signal(env, puc);
819
    }
820
#endif
821
}
822
#endif
823

    
824
/* add the tb in the target page and protect it if necessary */
825
static inline void tb_alloc_page(TranslationBlock *tb, 
826
                                 unsigned int n, target_ulong page_addr)
827
{
828
    PageDesc *p;
829
    TranslationBlock *last_first_tb;
830

    
831
    tb->page_addr[n] = page_addr;
832
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
833
    tb->page_next[n] = p->first_tb;
834
    last_first_tb = p->first_tb;
835
    p->first_tb = (TranslationBlock *)((long)tb | n);
836
    invalidate_page_bitmap(p);
837

    
838
#if defined(TARGET_HAS_SMC) || 1
839

    
840
#if defined(CONFIG_USER_ONLY)
841
    if (p->flags & PAGE_WRITE) {
842
        target_ulong addr;
843
        PageDesc *p2;
844
        int prot;
845

    
846
        /* force the host page as non writable (writes will have a
847
           page fault + mprotect overhead) */
848
        page_addr &= qemu_host_page_mask;
849
        prot = 0;
850
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
851
            addr += TARGET_PAGE_SIZE) {
852

    
853
            p2 = page_find (addr >> TARGET_PAGE_BITS);
854
            if (!p2)
855
                continue;
856
            prot |= p2->flags;
857
            p2->flags &= ~PAGE_WRITE;
858
            page_get_flags(addr);
859
          }
860
        mprotect(g2h(page_addr), qemu_host_page_size, 
861
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
862
#ifdef DEBUG_TB_INVALIDATE
863
        printf("protecting code page: 0x%08lx\n", 
864
               page_addr);
865
#endif
866
    }
867
#else
868
    /* if some code is already present, then the pages are already
869
       protected. So we handle the case where only the first TB is
870
       allocated in a physical page */
871
    if (!last_first_tb) {
872
        tlb_protect_code(page_addr);
873
    }
874
#endif
875

    
876
#endif /* TARGET_HAS_SMC */
877
}
878

    
879
/* Allocate a new translation block. Flush the translation buffer if
880
   too many translation blocks or too much generated code. */
881
TranslationBlock *tb_alloc(target_ulong pc)
882
{
883
    TranslationBlock *tb;
884

    
885
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
886
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
887
        return NULL;
888
    tb = &tbs[nb_tbs++];
889
    tb->pc = pc;
890
    tb->cflags = 0;
891
    return tb;
892
}
893

    
894
/* add a new TB and link it to the physical page tables. phys_page2 is
895
   (-1) to indicate that only one page contains the TB. */
896
void tb_link_phys(TranslationBlock *tb, 
897
                  target_ulong phys_pc, target_ulong phys_page2)
898
{
899
    unsigned int h;
900
    TranslationBlock **ptb;
901

    
902
    /* add in the physical hash table */
903
    h = tb_phys_hash_func(phys_pc);
904
    ptb = &tb_phys_hash[h];
905
    tb->phys_hash_next = *ptb;
906
    *ptb = tb;
907

    
908
    /* add in the page list */
909
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
910
    if (phys_page2 != -1)
911
        tb_alloc_page(tb, 1, phys_page2);
912
    else
913
        tb->page_addr[1] = -1;
914

    
915
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
916
    tb->jmp_next[0] = NULL;
917
    tb->jmp_next[1] = NULL;
918
#ifdef USE_CODE_COPY
919
    tb->cflags &= ~CF_FP_USED;
920
    if (tb->cflags & CF_TB_FP_USED)
921
        tb->cflags |= CF_FP_USED;
922
#endif
923

    
924
    /* init original jump addresses */
925
    if (tb->tb_next_offset[0] != 0xffff)
926
        tb_reset_jump(tb, 0);
927
    if (tb->tb_next_offset[1] != 0xffff)
928
        tb_reset_jump(tb, 1);
929

    
930
#ifdef DEBUG_TB_CHECK
931
    tb_page_check();
932
#endif
933
}
934

    
935
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
936
   tb[1].tc_ptr. Return NULL if not found */
937
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
938
{
939
    int m_min, m_max, m;
940
    unsigned long v;
941
    TranslationBlock *tb;
942

    
943
    if (nb_tbs <= 0)
944
        return NULL;
945
    if (tc_ptr < (unsigned long)code_gen_buffer ||
946
        tc_ptr >= (unsigned long)code_gen_ptr)
947
        return NULL;
948
    /* binary search (cf Knuth) */
949
    m_min = 0;
950
    m_max = nb_tbs - 1;
951
    while (m_min <= m_max) {
952
        m = (m_min + m_max) >> 1;
953
        tb = &tbs[m];
954
        v = (unsigned long)tb->tc_ptr;
955
        if (v == tc_ptr)
956
            return tb;
957
        else if (tc_ptr < v) {
958
            m_max = m - 1;
959
        } else {
960
            m_min = m + 1;
961
        }
962
    } 
963
    return &tbs[m_max];
964
}
965

    
966
static void tb_reset_jump_recursive(TranslationBlock *tb);
967

    
968
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
969
{
970
    TranslationBlock *tb1, *tb_next, **ptb;
971
    unsigned int n1;
972

    
973
    tb1 = tb->jmp_next[n];
974
    if (tb1 != NULL) {
975
        /* find head of list */
976
        for(;;) {
977
            n1 = (long)tb1 & 3;
978
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
979
            if (n1 == 2)
980
                break;
981
            tb1 = tb1->jmp_next[n1];
982
        }
983
        /* we are now sure now that tb jumps to tb1 */
984
        tb_next = tb1;
985

    
986
        /* remove tb from the jmp_first list */
987
        ptb = &tb_next->jmp_first;
988
        for(;;) {
989
            tb1 = *ptb;
990
            n1 = (long)tb1 & 3;
991
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
992
            if (n1 == n && tb1 == tb)
993
                break;
994
            ptb = &tb1->jmp_next[n1];
995
        }
996
        *ptb = tb->jmp_next[n];
997
        tb->jmp_next[n] = NULL;
998
        
999
        /* suppress the jump to next tb in generated code */
1000
        tb_reset_jump(tb, n);
1001

    
1002
        /* suppress jumps in the tb on which we could have jumped */
1003
        tb_reset_jump_recursive(tb_next);
1004
    }
1005
}
1006

    
1007
static void tb_reset_jump_recursive(TranslationBlock *tb)
1008
{
1009
    tb_reset_jump_recursive2(tb, 0);
1010
    tb_reset_jump_recursive2(tb, 1);
1011
}
1012

    
1013
#if defined(TARGET_HAS_ICE)
1014
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1015
{
1016
    target_ulong addr, pd;
1017
    ram_addr_t ram_addr;
1018
    PhysPageDesc *p;
1019

    
1020
    addr = cpu_get_phys_page_debug(env, pc);
1021
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1022
    if (!p) {
1023
        pd = IO_MEM_UNASSIGNED;
1024
    } else {
1025
        pd = p->phys_offset;
1026
    }
1027
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1028
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1029
}
1030
#endif
1031

    
1032
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1033
   breakpoint is reached */
1034
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1035
{
1036
#if defined(TARGET_HAS_ICE)
1037
    int i;
1038
    
1039
    for(i = 0; i < env->nb_breakpoints; i++) {
1040
        if (env->breakpoints[i] == pc)
1041
            return 0;
1042
    }
1043

    
1044
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1045
        return -1;
1046
    env->breakpoints[env->nb_breakpoints++] = pc;
1047
    
1048
    breakpoint_invalidate(env, pc);
1049
    return 0;
1050
#else
1051
    return -1;
1052
#endif
1053
}
1054

    
1055
/* remove a breakpoint */
1056
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1057
{
1058
#if defined(TARGET_HAS_ICE)
1059
    int i;
1060
    for(i = 0; i < env->nb_breakpoints; i++) {
1061
        if (env->breakpoints[i] == pc)
1062
            goto found;
1063
    }
1064
    return -1;
1065
 found:
1066
    env->nb_breakpoints--;
1067
    if (i < env->nb_breakpoints)
1068
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1069

    
1070
    breakpoint_invalidate(env, pc);
1071
    return 0;
1072
#else
1073
    return -1;
1074
#endif
1075
}
1076

    
1077
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1078
   CPU loop after each instruction */
1079
void cpu_single_step(CPUState *env, int enabled)
1080
{
1081
#if defined(TARGET_HAS_ICE)
1082
    if (env->singlestep_enabled != enabled) {
1083
        env->singlestep_enabled = enabled;
1084
        /* must flush all the translated code to avoid inconsistancies */
1085
        /* XXX: only flush what is necessary */
1086
        tb_flush(env);
1087
    }
1088
#endif
1089
}
1090

    
1091
/* enable or disable low levels log */
1092
void cpu_set_log(int log_flags)
1093
{
1094
    loglevel = log_flags;
1095
    if (loglevel && !logfile) {
1096
        logfile = fopen(logfilename, "w");
1097
        if (!logfile) {
1098
            perror(logfilename);
1099
            _exit(1);
1100
        }
1101
#if !defined(CONFIG_SOFTMMU)
1102
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1103
        {
1104
            static uint8_t logfile_buf[4096];
1105
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1106
        }
1107
#else
1108
        setvbuf(logfile, NULL, _IOLBF, 0);
1109
#endif
1110
    }
1111
}
1112

    
1113
void cpu_set_log_filename(const char *filename)
1114
{
1115
    logfilename = strdup(filename);
1116
}
1117

    
1118
/* mask must never be zero, except for A20 change call */
1119
void cpu_interrupt(CPUState *env, int mask)
1120
{
1121
    TranslationBlock *tb;
1122
    static int interrupt_lock;
1123

    
1124
    env->interrupt_request |= mask;
1125
    /* if the cpu is currently executing code, we must unlink it and
1126
       all the potentially executing TB */
1127
    tb = env->current_tb;
1128
    if (tb && !testandset(&interrupt_lock)) {
1129
        env->current_tb = NULL;
1130
        tb_reset_jump_recursive(tb);
1131
        interrupt_lock = 0;
1132
    }
1133
}
1134

    
1135
void cpu_reset_interrupt(CPUState *env, int mask)
1136
{
1137
    env->interrupt_request &= ~mask;
1138
}
1139

    
1140
CPULogItem cpu_log_items[] = {
1141
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1142
      "show generated host assembly code for each compiled TB" },
1143
    { CPU_LOG_TB_IN_ASM, "in_asm",
1144
      "show target assembly code for each compiled TB" },
1145
    { CPU_LOG_TB_OP, "op", 
1146
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1147
#ifdef TARGET_I386
1148
    { CPU_LOG_TB_OP_OPT, "op_opt",
1149
      "show micro ops after optimization for each compiled TB" },
1150
#endif
1151
    { CPU_LOG_INT, "int",
1152
      "show interrupts/exceptions in short format" },
1153
    { CPU_LOG_EXEC, "exec",
1154
      "show trace before each executed TB (lots of logs)" },
1155
    { CPU_LOG_TB_CPU, "cpu",
1156
      "show CPU state before bloc translation" },
1157
#ifdef TARGET_I386
1158
    { CPU_LOG_PCALL, "pcall",
1159
      "show protected mode far calls/returns/exceptions" },
1160
#endif
1161
#ifdef DEBUG_IOPORT
1162
    { CPU_LOG_IOPORT, "ioport",
1163
      "show all i/o ports accesses" },
1164
#endif
1165
    { 0, NULL, NULL },
1166
};
1167

    
1168
static int cmp1(const char *s1, int n, const char *s2)
1169
{
1170
    if (strlen(s2) != n)
1171
        return 0;
1172
    return memcmp(s1, s2, n) == 0;
1173
}
1174
      
1175
/* takes a comma separated list of log masks. Return 0 if error. */
1176
int cpu_str_to_log_mask(const char *str)
1177
{
1178
    CPULogItem *item;
1179
    int mask;
1180
    const char *p, *p1;
1181

    
1182
    p = str;
1183
    mask = 0;
1184
    for(;;) {
1185
        p1 = strchr(p, ',');
1186
        if (!p1)
1187
            p1 = p + strlen(p);
1188
        if(cmp1(p,p1-p,"all")) {
1189
                for(item = cpu_log_items; item->mask != 0; item++) {
1190
                        mask |= item->mask;
1191
                }
1192
        } else {
1193
        for(item = cpu_log_items; item->mask != 0; item++) {
1194
            if (cmp1(p, p1 - p, item->name))
1195
                goto found;
1196
        }
1197
        return 0;
1198
        }
1199
    found:
1200
        mask |= item->mask;
1201
        if (*p1 != ',')
1202
            break;
1203
        p = p1 + 1;
1204
    }
1205
    return mask;
1206
}
1207

    
1208
void cpu_abort(CPUState *env, const char *fmt, ...)
1209
{
1210
    va_list ap;
1211

    
1212
    va_start(ap, fmt);
1213
    fprintf(stderr, "qemu: fatal: ");
1214
    vfprintf(stderr, fmt, ap);
1215
    fprintf(stderr, "\n");
1216
#ifdef TARGET_I386
1217
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1218
#else
1219
    cpu_dump_state(env, stderr, fprintf, 0);
1220
#endif
1221
    va_end(ap);
1222
    abort();
1223
}
1224

    
1225
#if !defined(CONFIG_USER_ONLY)
1226

    
1227
/* NOTE: if flush_global is true, also flush global entries (not
1228
   implemented yet) */
1229
void tlb_flush(CPUState *env, int flush_global)
1230
{
1231
    int i;
1232

    
1233
#if defined(DEBUG_TLB)
1234
    printf("tlb_flush:\n");
1235
#endif
1236
    /* must reset current TB so that interrupts cannot modify the
1237
       links while we are modifying them */
1238
    env->current_tb = NULL;
1239

    
1240
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1241
        env->tlb_table[0][i].addr_read = -1;
1242
        env->tlb_table[0][i].addr_write = -1;
1243
        env->tlb_table[0][i].addr_code = -1;
1244
        env->tlb_table[1][i].addr_read = -1;
1245
        env->tlb_table[1][i].addr_write = -1;
1246
        env->tlb_table[1][i].addr_code = -1;
1247
    }
1248

    
1249
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1250

    
1251
#if !defined(CONFIG_SOFTMMU)
1252
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1253
#endif
1254
#ifdef USE_KQEMU
1255
    if (env->kqemu_enabled) {
1256
        kqemu_flush(env, flush_global);
1257
    }
1258
#endif
1259
    tlb_flush_count++;
1260
}
1261

    
1262
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1263
{
1264
    if (addr == (tlb_entry->addr_read & 
1265
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1266
        addr == (tlb_entry->addr_write & 
1267
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1268
        addr == (tlb_entry->addr_code & 
1269
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1270
        tlb_entry->addr_read = -1;
1271
        tlb_entry->addr_write = -1;
1272
        tlb_entry->addr_code = -1;
1273
    }
1274
}
1275

    
1276
void tlb_flush_page(CPUState *env, target_ulong addr)
1277
{
1278
    int i;
1279
    TranslationBlock *tb;
1280

    
1281
#if defined(DEBUG_TLB)
1282
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1283
#endif
1284
    /* must reset current TB so that interrupts cannot modify the
1285
       links while we are modifying them */
1286
    env->current_tb = NULL;
1287

    
1288
    addr &= TARGET_PAGE_MASK;
1289
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1290
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1291
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1292

    
1293
    /* Discard jump cache entries for any tb which might potentially
1294
       overlap the flushed page.  */
1295
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1296
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1297

    
1298
    i = tb_jmp_cache_hash_page(addr);
1299
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1300

    
1301
#if !defined(CONFIG_SOFTMMU)
1302
    if (addr < MMAP_AREA_END)
1303
        munmap((void *)addr, TARGET_PAGE_SIZE);
1304
#endif
1305
#ifdef USE_KQEMU
1306
    if (env->kqemu_enabled) {
1307
        kqemu_flush_page(env, addr);
1308
    }
1309
#endif
1310
}
1311

    
1312
/* update the TLBs so that writes to code in the virtual page 'addr'
1313
   can be detected */
1314
static void tlb_protect_code(ram_addr_t ram_addr)
1315
{
1316
    cpu_physical_memory_reset_dirty(ram_addr, 
1317
                                    ram_addr + TARGET_PAGE_SIZE,
1318
                                    CODE_DIRTY_FLAG);
1319
}
1320

    
1321
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1322
   tested for self modifying code */
1323
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1324
                                    target_ulong vaddr)
1325
{
1326
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1327
}
1328

    
1329
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1330
                                         unsigned long start, unsigned long length)
1331
{
1332
    unsigned long addr;
1333
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1334
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1335
        if ((addr - start) < length) {
1336
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1337
        }
1338
    }
1339
}
1340

    
1341
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1342
                                     int dirty_flags)
1343
{
1344
    CPUState *env;
1345
    unsigned long length, start1;
1346
    int i, mask, len;
1347
    uint8_t *p;
1348

    
1349
    start &= TARGET_PAGE_MASK;
1350
    end = TARGET_PAGE_ALIGN(end);
1351

    
1352
    length = end - start;
1353
    if (length == 0)
1354
        return;
1355
    len = length >> TARGET_PAGE_BITS;
1356
#ifdef USE_KQEMU
1357
    /* XXX: should not depend on cpu context */
1358
    env = first_cpu;
1359
    if (env->kqemu_enabled) {
1360
        ram_addr_t addr;
1361
        addr = start;
1362
        for(i = 0; i < len; i++) {
1363
            kqemu_set_notdirty(env, addr);
1364
            addr += TARGET_PAGE_SIZE;
1365
        }
1366
    }
1367
#endif
1368
    mask = ~dirty_flags;
1369
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1370
    for(i = 0; i < len; i++)
1371
        p[i] &= mask;
1372

    
1373
    /* we modify the TLB cache so that the dirty bit will be set again
1374
       when accessing the range */
1375
    start1 = start + (unsigned long)phys_ram_base;
1376
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1377
        for(i = 0; i < CPU_TLB_SIZE; i++)
1378
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1379
        for(i = 0; i < CPU_TLB_SIZE; i++)
1380
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1381
    }
1382

    
1383
#if !defined(CONFIG_SOFTMMU)
1384
    /* XXX: this is expensive */
1385
    {
1386
        VirtPageDesc *p;
1387
        int j;
1388
        target_ulong addr;
1389

    
1390
        for(i = 0; i < L1_SIZE; i++) {
1391
            p = l1_virt_map[i];
1392
            if (p) {
1393
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1394
                for(j = 0; j < L2_SIZE; j++) {
1395
                    if (p->valid_tag == virt_valid_tag &&
1396
                        p->phys_addr >= start && p->phys_addr < end &&
1397
                        (p->prot & PROT_WRITE)) {
1398
                        if (addr < MMAP_AREA_END) {
1399
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1400
                                     p->prot & ~PROT_WRITE);
1401
                        }
1402
                    }
1403
                    addr += TARGET_PAGE_SIZE;
1404
                    p++;
1405
                }
1406
            }
1407
        }
1408
    }
1409
#endif
1410
}
1411

    
1412
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1413
{
1414
    ram_addr_t ram_addr;
1415

    
1416
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1417
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1418
            tlb_entry->addend - (unsigned long)phys_ram_base;
1419
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1420
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1421
        }
1422
    }
1423
}
1424

    
1425
/* update the TLB according to the current state of the dirty bits */
1426
void cpu_tlb_update_dirty(CPUState *env)
1427
{
1428
    int i;
1429
    for(i = 0; i < CPU_TLB_SIZE; i++)
1430
        tlb_update_dirty(&env->tlb_table[0][i]);
1431
    for(i = 0; i < CPU_TLB_SIZE; i++)
1432
        tlb_update_dirty(&env->tlb_table[1][i]);
1433
}
1434

    
1435
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1436
                                  unsigned long start)
1437
{
1438
    unsigned long addr;
1439
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1440
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1441
        if (addr == start) {
1442
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1443
        }
1444
    }
1445
}
1446

    
1447
/* update the TLB corresponding to virtual page vaddr and phys addr
1448
   addr so that it is no longer dirty */
1449
static inline void tlb_set_dirty(CPUState *env,
1450
                                 unsigned long addr, target_ulong vaddr)
1451
{
1452
    int i;
1453

    
1454
    addr &= TARGET_PAGE_MASK;
1455
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1456
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1457
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1458
}
1459

    
1460
/* add a new TLB entry. At most one entry for a given virtual address
1461
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1462
   (can only happen in non SOFTMMU mode for I/O pages or pages
1463
   conflicting with the host address space). */
1464
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1465
                      target_phys_addr_t paddr, int prot, 
1466
                      int is_user, int is_softmmu)
1467
{
1468
    PhysPageDesc *p;
1469
    unsigned long pd;
1470
    unsigned int index;
1471
    target_ulong address;
1472
    target_phys_addr_t addend;
1473
    int ret;
1474
    CPUTLBEntry *te;
1475

    
1476
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1477
    if (!p) {
1478
        pd = IO_MEM_UNASSIGNED;
1479
    } else {
1480
        pd = p->phys_offset;
1481
    }
1482
#if defined(DEBUG_TLB)
1483
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1484
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1485
#endif
1486

    
1487
    ret = 0;
1488
#if !defined(CONFIG_SOFTMMU)
1489
    if (is_softmmu) 
1490
#endif
1491
    {
1492
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1493
            /* IO memory case */
1494
            address = vaddr | pd;
1495
            addend = paddr;
1496
        } else {
1497
            /* standard memory */
1498
            address = vaddr;
1499
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1500
        }
1501
        
1502
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1503
        addend -= vaddr;
1504
        te = &env->tlb_table[is_user][index];
1505
        te->addend = addend;
1506
        if (prot & PAGE_READ) {
1507
            te->addr_read = address;
1508
        } else {
1509
            te->addr_read = -1;
1510
        }
1511
        if (prot & PAGE_EXEC) {
1512
            te->addr_code = address;
1513
        } else {
1514
            te->addr_code = -1;
1515
        }
1516
        if (prot & PAGE_WRITE) {
1517
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1518
                (pd & IO_MEM_ROMD)) {
1519
                /* write access calls the I/O callback */
1520
                te->addr_write = vaddr | 
1521
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1522
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1523
                       !cpu_physical_memory_is_dirty(pd)) {
1524
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1525
            } else {
1526
                te->addr_write = address;
1527
            }
1528
        } else {
1529
            te->addr_write = -1;
1530
        }
1531
    }
1532
#if !defined(CONFIG_SOFTMMU)
1533
    else {
1534
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1535
            /* IO access: no mapping is done as it will be handled by the
1536
               soft MMU */
1537
            if (!(env->hflags & HF_SOFTMMU_MASK))
1538
                ret = 2;
1539
        } else {
1540
            void *map_addr;
1541

    
1542
            if (vaddr >= MMAP_AREA_END) {
1543
                ret = 2;
1544
            } else {
1545
                if (prot & PROT_WRITE) {
1546
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1547
#if defined(TARGET_HAS_SMC) || 1
1548
                        first_tb ||
1549
#endif
1550
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1551
                         !cpu_physical_memory_is_dirty(pd))) {
1552
                        /* ROM: we do as if code was inside */
1553
                        /* if code is present, we only map as read only and save the
1554
                           original mapping */
1555
                        VirtPageDesc *vp;
1556
                        
1557
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1558
                        vp->phys_addr = pd;
1559
                        vp->prot = prot;
1560
                        vp->valid_tag = virt_valid_tag;
1561
                        prot &= ~PAGE_WRITE;
1562
                    }
1563
                }
1564
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1565
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1566
                if (map_addr == MAP_FAILED) {
1567
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1568
                              paddr, vaddr);
1569
                }
1570
            }
1571
        }
1572
    }
1573
#endif
1574
    return ret;
1575
}
1576

    
1577
/* called from signal handler: invalidate the code and unprotect the
1578
   page. Return TRUE if the fault was succesfully handled. */
1579
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1580
{
1581
#if !defined(CONFIG_SOFTMMU)
1582
    VirtPageDesc *vp;
1583

    
1584
#if defined(DEBUG_TLB)
1585
    printf("page_unprotect: addr=0x%08x\n", addr);
1586
#endif
1587
    addr &= TARGET_PAGE_MASK;
1588

    
1589
    /* if it is not mapped, no need to worry here */
1590
    if (addr >= MMAP_AREA_END)
1591
        return 0;
1592
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1593
    if (!vp)
1594
        return 0;
1595
    /* NOTE: in this case, validate_tag is _not_ tested as it
1596
       validates only the code TLB */
1597
    if (vp->valid_tag != virt_valid_tag)
1598
        return 0;
1599
    if (!(vp->prot & PAGE_WRITE))
1600
        return 0;
1601
#if defined(DEBUG_TLB)
1602
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1603
           addr, vp->phys_addr, vp->prot);
1604
#endif
1605
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1606
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1607
                  (unsigned long)addr, vp->prot);
1608
    /* set the dirty bit */
1609
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1610
    /* flush the code inside */
1611
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1612
    return 1;
1613
#else
1614
    return 0;
1615
#endif
1616
}
1617

    
1618
#else
1619

    
1620
void tlb_flush(CPUState *env, int flush_global)
1621
{
1622
}
1623

    
1624
void tlb_flush_page(CPUState *env, target_ulong addr)
1625
{
1626
}
1627

    
1628
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1629
                      target_phys_addr_t paddr, int prot, 
1630
                      int is_user, int is_softmmu)
1631
{
1632
    return 0;
1633
}
1634

    
1635
/* dump memory mappings */
1636
void page_dump(FILE *f)
1637
{
1638
    unsigned long start, end;
1639
    int i, j, prot, prot1;
1640
    PageDesc *p;
1641

    
1642
    fprintf(f, "%-8s %-8s %-8s %s\n",
1643
            "start", "end", "size", "prot");
1644
    start = -1;
1645
    end = -1;
1646
    prot = 0;
1647
    for(i = 0; i <= L1_SIZE; i++) {
1648
        if (i < L1_SIZE)
1649
            p = l1_map[i];
1650
        else
1651
            p = NULL;
1652
        for(j = 0;j < L2_SIZE; j++) {
1653
            if (!p)
1654
                prot1 = 0;
1655
            else
1656
                prot1 = p[j].flags;
1657
            if (prot1 != prot) {
1658
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1659
                if (start != -1) {
1660
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1661
                            start, end, end - start, 
1662
                            prot & PAGE_READ ? 'r' : '-',
1663
                            prot & PAGE_WRITE ? 'w' : '-',
1664
                            prot & PAGE_EXEC ? 'x' : '-');
1665
                }
1666
                if (prot1 != 0)
1667
                    start = end;
1668
                else
1669
                    start = -1;
1670
                prot = prot1;
1671
            }
1672
            if (!p)
1673
                break;
1674
        }
1675
    }
1676
}
1677

    
1678
int page_get_flags(target_ulong address)
1679
{
1680
    PageDesc *p;
1681

    
1682
    p = page_find(address >> TARGET_PAGE_BITS);
1683
    if (!p)
1684
        return 0;
1685
    return p->flags;
1686
}
1687

    
1688
/* modify the flags of a page and invalidate the code if
1689
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1690
   depending on PAGE_WRITE */
1691
void page_set_flags(target_ulong start, target_ulong end, int flags)
1692
{
1693
    PageDesc *p;
1694
    target_ulong addr;
1695

    
1696
    start = start & TARGET_PAGE_MASK;
1697
    end = TARGET_PAGE_ALIGN(end);
1698
    if (flags & PAGE_WRITE)
1699
        flags |= PAGE_WRITE_ORG;
1700
    spin_lock(&tb_lock);
1701
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1702
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1703
        /* if the write protection is set, then we invalidate the code
1704
           inside */
1705
        if (!(p->flags & PAGE_WRITE) && 
1706
            (flags & PAGE_WRITE) &&
1707
            p->first_tb) {
1708
            tb_invalidate_phys_page(addr, 0, NULL);
1709
        }
1710
        p->flags = flags;
1711
    }
1712
    spin_unlock(&tb_lock);
1713
}
1714

    
1715
/* called from signal handler: invalidate the code and unprotect the
1716
   page. Return TRUE if the fault was succesfully handled. */
1717
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1718
{
1719
    unsigned int page_index, prot, pindex;
1720
    PageDesc *p, *p1;
1721
    target_ulong host_start, host_end, addr;
1722

    
1723
    host_start = address & qemu_host_page_mask;
1724
    page_index = host_start >> TARGET_PAGE_BITS;
1725
    p1 = page_find(page_index);
1726
    if (!p1)
1727
        return 0;
1728
    host_end = host_start + qemu_host_page_size;
1729
    p = p1;
1730
    prot = 0;
1731
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1732
        prot |= p->flags;
1733
        p++;
1734
    }
1735
    /* if the page was really writable, then we change its
1736
       protection back to writable */
1737
    if (prot & PAGE_WRITE_ORG) {
1738
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1739
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1740
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1741
                     (prot & PAGE_BITS) | PAGE_WRITE);
1742
            p1[pindex].flags |= PAGE_WRITE;
1743
            /* and since the content will be modified, we must invalidate
1744
               the corresponding translated code. */
1745
            tb_invalidate_phys_page(address, pc, puc);
1746
#ifdef DEBUG_TB_CHECK
1747
            tb_invalidate_check(address);
1748
#endif
1749
            return 1;
1750
        }
1751
    }
1752
    return 0;
1753
}
1754

    
1755
/* call this function when system calls directly modify a memory area */
1756
/* ??? This should be redundant now we have lock_user.  */
1757
void page_unprotect_range(target_ulong data, target_ulong data_size)
1758
{
1759
    target_ulong start, end, addr;
1760

    
1761
    start = data;
1762
    end = start + data_size;
1763
    start &= TARGET_PAGE_MASK;
1764
    end = TARGET_PAGE_ALIGN(end);
1765
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1766
        page_unprotect(addr, 0, NULL);
1767
    }
1768
}
1769

    
1770
static inline void tlb_set_dirty(CPUState *env,
1771
                                 unsigned long addr, target_ulong vaddr)
1772
{
1773
}
1774
#endif /* defined(CONFIG_USER_ONLY) */
1775

    
1776
/* register physical memory. 'size' must be a multiple of the target
1777
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1778
   io memory page */
1779
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1780
                                  unsigned long size,
1781
                                  unsigned long phys_offset)
1782
{
1783
    target_phys_addr_t addr, end_addr;
1784
    PhysPageDesc *p;
1785
    CPUState *env;
1786

    
1787
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1788
    end_addr = start_addr + size;
1789
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1790
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1791
        p->phys_offset = phys_offset;
1792
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1793
            (phys_offset & IO_MEM_ROMD))
1794
            phys_offset += TARGET_PAGE_SIZE;
1795
    }
1796
    
1797
    /* since each CPU stores ram addresses in its TLB cache, we must
1798
       reset the modified entries */
1799
    /* XXX: slow ! */
1800
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1801
        tlb_flush(env, 1);
1802
    }
1803
}
1804

    
1805
/* XXX: temporary until new memory mapping API */
1806
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1807
{
1808
    PhysPageDesc *p;
1809

    
1810
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1811
    if (!p)
1812
        return IO_MEM_UNASSIGNED;
1813
    return p->phys_offset;
1814
}
1815

    
1816
/* XXX: better than nothing */
1817
ram_addr_t qemu_ram_alloc(unsigned int size)
1818
{
1819
    ram_addr_t addr;
1820
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1821
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
1822
                size, phys_ram_size);
1823
        abort();
1824
    }
1825
    addr = phys_ram_alloc_offset;
1826
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1827
    return addr;
1828
}
1829

    
1830
void qemu_ram_free(ram_addr_t addr)
1831
{
1832
}
1833

    
1834
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1835
{
1836
#ifdef DEBUG_UNASSIGNED
1837
    printf("Unassigned mem read  0x%08x\n", (int)addr);
1838
#endif
1839
    return 0;
1840
}
1841

    
1842
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1843
{
1844
#ifdef DEBUG_UNASSIGNED
1845
    printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1846
#endif
1847
}
1848

    
1849
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1850
    unassigned_mem_readb,
1851
    unassigned_mem_readb,
1852
    unassigned_mem_readb,
1853
};
1854

    
1855
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1856
    unassigned_mem_writeb,
1857
    unassigned_mem_writeb,
1858
    unassigned_mem_writeb,
1859
};
1860

    
1861
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1862
{
1863
    unsigned long ram_addr;
1864
    int dirty_flags;
1865
    ram_addr = addr - (unsigned long)phys_ram_base;
1866
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1867
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1868
#if !defined(CONFIG_USER_ONLY)
1869
        tb_invalidate_phys_page_fast(ram_addr, 1);
1870
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1871
#endif
1872
    }
1873
    stb_p((uint8_t *)(long)addr, val);
1874
#ifdef USE_KQEMU
1875
    if (cpu_single_env->kqemu_enabled &&
1876
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877
        kqemu_modify_page(cpu_single_env, ram_addr);
1878
#endif
1879
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881
    /* we remove the notdirty callback only if the code has been
1882
       flushed */
1883
    if (dirty_flags == 0xff)
1884
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1885
}
1886

    
1887
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1888
{
1889
    unsigned long ram_addr;
1890
    int dirty_flags;
1891
    ram_addr = addr - (unsigned long)phys_ram_base;
1892
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1893
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1894
#if !defined(CONFIG_USER_ONLY)
1895
        tb_invalidate_phys_page_fast(ram_addr, 2);
1896
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1897
#endif
1898
    }
1899
    stw_p((uint8_t *)(long)addr, val);
1900
#ifdef USE_KQEMU
1901
    if (cpu_single_env->kqemu_enabled &&
1902
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1903
        kqemu_modify_page(cpu_single_env, ram_addr);
1904
#endif
1905
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1906
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1907
    /* we remove the notdirty callback only if the code has been
1908
       flushed */
1909
    if (dirty_flags == 0xff)
1910
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1911
}
1912

    
1913
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1914
{
1915
    unsigned long ram_addr;
1916
    int dirty_flags;
1917
    ram_addr = addr - (unsigned long)phys_ram_base;
1918
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1919
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1920
#if !defined(CONFIG_USER_ONLY)
1921
        tb_invalidate_phys_page_fast(ram_addr, 4);
1922
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1923
#endif
1924
    }
1925
    stl_p((uint8_t *)(long)addr, val);
1926
#ifdef USE_KQEMU
1927
    if (cpu_single_env->kqemu_enabled &&
1928
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1929
        kqemu_modify_page(cpu_single_env, ram_addr);
1930
#endif
1931
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1932
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1933
    /* we remove the notdirty callback only if the code has been
1934
       flushed */
1935
    if (dirty_flags == 0xff)
1936
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1937
}
1938

    
1939
static CPUReadMemoryFunc *error_mem_read[3] = {
1940
    NULL, /* never used */
1941
    NULL, /* never used */
1942
    NULL, /* never used */
1943
};
1944

    
1945
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1946
    notdirty_mem_writeb,
1947
    notdirty_mem_writew,
1948
    notdirty_mem_writel,
1949
};
1950

    
1951
static void io_mem_init(void)
1952
{
1953
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1954
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1955
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1956
    io_mem_nb = 5;
1957

    
1958
    /* alloc dirty bits array */
1959
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1960
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1961
}
1962

    
1963
/* mem_read and mem_write are arrays of functions containing the
1964
   function to access byte (index 0), word (index 1) and dword (index
1965
   2). All functions must be supplied. If io_index is non zero, the
1966
   corresponding io zone is modified. If it is zero, a new io zone is
1967
   allocated. The return value can be used with
1968
   cpu_register_physical_memory(). (-1) is returned if error. */
1969
int cpu_register_io_memory(int io_index,
1970
                           CPUReadMemoryFunc **mem_read,
1971
                           CPUWriteMemoryFunc **mem_write,
1972
                           void *opaque)
1973
{
1974
    int i;
1975

    
1976
    if (io_index <= 0) {
1977
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1978
            return -1;
1979
        io_index = io_mem_nb++;
1980
    } else {
1981
        if (io_index >= IO_MEM_NB_ENTRIES)
1982
            return -1;
1983
    }
1984

    
1985
    for(i = 0;i < 3; i++) {
1986
        io_mem_read[io_index][i] = mem_read[i];
1987
        io_mem_write[io_index][i] = mem_write[i];
1988
    }
1989
    io_mem_opaque[io_index] = opaque;
1990
    return io_index << IO_MEM_SHIFT;
1991
}
1992

    
1993
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1994
{
1995
    return io_mem_write[io_index >> IO_MEM_SHIFT];
1996
}
1997

    
1998
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1999
{
2000
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2001
}
2002

    
2003
/* physical memory access (slow version, mainly for debug) */
2004
#if defined(CONFIG_USER_ONLY)
2005
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2006
                            int len, int is_write)
2007
{
2008
    int l, flags;
2009
    target_ulong page;
2010
    void * p;
2011

    
2012
    while (len > 0) {
2013
        page = addr & TARGET_PAGE_MASK;
2014
        l = (page + TARGET_PAGE_SIZE) - addr;
2015
        if (l > len)
2016
            l = len;
2017
        flags = page_get_flags(page);
2018
        if (!(flags & PAGE_VALID))
2019
            return;
2020
        if (is_write) {
2021
            if (!(flags & PAGE_WRITE))
2022
                return;
2023
            p = lock_user(addr, len, 0);
2024
            memcpy(p, buf, len);
2025
            unlock_user(p, addr, len);
2026
        } else {
2027
            if (!(flags & PAGE_READ))
2028
                return;
2029
            p = lock_user(addr, len, 1);
2030
            memcpy(buf, p, len);
2031
            unlock_user(p, addr, 0);
2032
        }
2033
        len -= l;
2034
        buf += l;
2035
        addr += l;
2036
    }
2037
}
2038

    
2039
#else
2040
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2041
                            int len, int is_write)
2042
{
2043
    int l, io_index;
2044
    uint8_t *ptr;
2045
    uint32_t val;
2046
    target_phys_addr_t page;
2047
    unsigned long pd;
2048
    PhysPageDesc *p;
2049
    
2050
    while (len > 0) {
2051
        page = addr & TARGET_PAGE_MASK;
2052
        l = (page + TARGET_PAGE_SIZE) - addr;
2053
        if (l > len)
2054
            l = len;
2055
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2056
        if (!p) {
2057
            pd = IO_MEM_UNASSIGNED;
2058
        } else {
2059
            pd = p->phys_offset;
2060
        }
2061
        
2062
        if (is_write) {
2063
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2064
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2065
                /* XXX: could force cpu_single_env to NULL to avoid
2066
                   potential bugs */
2067
                if (l >= 4 && ((addr & 3) == 0)) {
2068
                    /* 32 bit write access */
2069
                    val = ldl_p(buf);
2070
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2071
                    l = 4;
2072
                } else if (l >= 2 && ((addr & 1) == 0)) {
2073
                    /* 16 bit write access */
2074
                    val = lduw_p(buf);
2075
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2076
                    l = 2;
2077
                } else {
2078
                    /* 8 bit write access */
2079
                    val = ldub_p(buf);
2080
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2081
                    l = 1;
2082
                }
2083
            } else {
2084
                unsigned long addr1;
2085
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2086
                /* RAM case */
2087
                ptr = phys_ram_base + addr1;
2088
                memcpy(ptr, buf, l);
2089
                if (!cpu_physical_memory_is_dirty(addr1)) {
2090
                    /* invalidate code */
2091
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2092
                    /* set dirty bit */
2093
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2094
                        (0xff & ~CODE_DIRTY_FLAG);
2095
                }
2096
            }
2097
        } else {
2098
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2099
                !(pd & IO_MEM_ROMD)) {
2100
                /* I/O case */
2101
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2102
                if (l >= 4 && ((addr & 3) == 0)) {
2103
                    /* 32 bit read access */
2104
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2105
                    stl_p(buf, val);
2106
                    l = 4;
2107
                } else if (l >= 2 && ((addr & 1) == 0)) {
2108
                    /* 16 bit read access */
2109
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2110
                    stw_p(buf, val);
2111
                    l = 2;
2112
                } else {
2113
                    /* 8 bit read access */
2114
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2115
                    stb_p(buf, val);
2116
                    l = 1;
2117
                }
2118
            } else {
2119
                /* RAM case */
2120
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2121
                    (addr & ~TARGET_PAGE_MASK);
2122
                memcpy(buf, ptr, l);
2123
            }
2124
        }
2125
        len -= l;
2126
        buf += l;
2127
        addr += l;
2128
    }
2129
}
2130

    
2131
/* used for ROM loading : can write in RAM and ROM */
2132
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2133
                                   const uint8_t *buf, int len)
2134
{
2135
    int l;
2136
    uint8_t *ptr;
2137
    target_phys_addr_t page;
2138
    unsigned long pd;
2139
    PhysPageDesc *p;
2140
    
2141
    while (len > 0) {
2142
        page = addr & TARGET_PAGE_MASK;
2143
        l = (page + TARGET_PAGE_SIZE) - addr;
2144
        if (l > len)
2145
            l = len;
2146
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2147
        if (!p) {
2148
            pd = IO_MEM_UNASSIGNED;
2149
        } else {
2150
            pd = p->phys_offset;
2151
        }
2152
        
2153
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2154
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2155
            !(pd & IO_MEM_ROMD)) {
2156
            /* do nothing */
2157
        } else {
2158
            unsigned long addr1;
2159
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2160
            /* ROM/RAM case */
2161
            ptr = phys_ram_base + addr1;
2162
            memcpy(ptr, buf, l);
2163
        }
2164
        len -= l;
2165
        buf += l;
2166
        addr += l;
2167
    }
2168
}
2169

    
2170

    
2171
/* warning: addr must be aligned */
2172
uint32_t ldl_phys(target_phys_addr_t addr)
2173
{
2174
    int io_index;
2175
    uint8_t *ptr;
2176
    uint32_t val;
2177
    unsigned long pd;
2178
    PhysPageDesc *p;
2179

    
2180
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2181
    if (!p) {
2182
        pd = IO_MEM_UNASSIGNED;
2183
    } else {
2184
        pd = p->phys_offset;
2185
    }
2186
        
2187
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2188
        !(pd & IO_MEM_ROMD)) {
2189
        /* I/O case */
2190
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2191
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2192
    } else {
2193
        /* RAM case */
2194
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2195
            (addr & ~TARGET_PAGE_MASK);
2196
        val = ldl_p(ptr);
2197
    }
2198
    return val;
2199
}
2200

    
2201
/* warning: addr must be aligned */
2202
uint64_t ldq_phys(target_phys_addr_t addr)
2203
{
2204
    int io_index;
2205
    uint8_t *ptr;
2206
    uint64_t val;
2207
    unsigned long pd;
2208
    PhysPageDesc *p;
2209

    
2210
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2211
    if (!p) {
2212
        pd = IO_MEM_UNASSIGNED;
2213
    } else {
2214
        pd = p->phys_offset;
2215
    }
2216
        
2217
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2218
        !(pd & IO_MEM_ROMD)) {
2219
        /* I/O case */
2220
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221
#ifdef TARGET_WORDS_BIGENDIAN
2222
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2223
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2224
#else
2225
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2226
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2227
#endif
2228
    } else {
2229
        /* RAM case */
2230
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2231
            (addr & ~TARGET_PAGE_MASK);
2232
        val = ldq_p(ptr);
2233
    }
2234
    return val;
2235
}
2236

    
2237
/* XXX: optimize */
2238
uint32_t ldub_phys(target_phys_addr_t addr)
2239
{
2240
    uint8_t val;
2241
    cpu_physical_memory_read(addr, &val, 1);
2242
    return val;
2243
}
2244

    
2245
/* XXX: optimize */
2246
uint32_t lduw_phys(target_phys_addr_t addr)
2247
{
2248
    uint16_t val;
2249
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2250
    return tswap16(val);
2251
}
2252

    
2253
/* warning: addr must be aligned. The ram page is not masked as dirty
2254
   and the code inside is not invalidated. It is useful if the dirty
2255
   bits are used to track modified PTEs */
2256
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2257
{
2258
    int io_index;
2259
    uint8_t *ptr;
2260
    unsigned long pd;
2261
    PhysPageDesc *p;
2262

    
2263
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2264
    if (!p) {
2265
        pd = IO_MEM_UNASSIGNED;
2266
    } else {
2267
        pd = p->phys_offset;
2268
    }
2269
        
2270
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2271
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2272
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2273
    } else {
2274
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2275
            (addr & ~TARGET_PAGE_MASK);
2276
        stl_p(ptr, val);
2277
    }
2278
}
2279

    
2280
/* warning: addr must be aligned */
2281
void stl_phys(target_phys_addr_t addr, uint32_t val)
2282
{
2283
    int io_index;
2284
    uint8_t *ptr;
2285
    unsigned long pd;
2286
    PhysPageDesc *p;
2287

    
2288
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2289
    if (!p) {
2290
        pd = IO_MEM_UNASSIGNED;
2291
    } else {
2292
        pd = p->phys_offset;
2293
    }
2294
        
2295
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2296
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2298
    } else {
2299
        unsigned long addr1;
2300
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2301
        /* RAM case */
2302
        ptr = phys_ram_base + addr1;
2303
        stl_p(ptr, val);
2304
        if (!cpu_physical_memory_is_dirty(addr1)) {
2305
            /* invalidate code */
2306
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2307
            /* set dirty bit */
2308
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2309
                (0xff & ~CODE_DIRTY_FLAG);
2310
        }
2311
    }
2312
}
2313

    
2314
/* XXX: optimize */
2315
void stb_phys(target_phys_addr_t addr, uint32_t val)
2316
{
2317
    uint8_t v = val;
2318
    cpu_physical_memory_write(addr, &v, 1);
2319
}
2320

    
2321
/* XXX: optimize */
2322
void stw_phys(target_phys_addr_t addr, uint32_t val)
2323
{
2324
    uint16_t v = tswap16(val);
2325
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2326
}
2327

    
2328
/* XXX: optimize */
2329
void stq_phys(target_phys_addr_t addr, uint64_t val)
2330
{
2331
    val = tswap64(val);
2332
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2333
}
2334

    
2335
#endif
2336

    
2337
/* virtual memory access for debug */
2338
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2339
                        uint8_t *buf, int len, int is_write)
2340
{
2341
    int l;
2342
    target_ulong page, phys_addr;
2343

    
2344
    while (len > 0) {
2345
        page = addr & TARGET_PAGE_MASK;
2346
        phys_addr = cpu_get_phys_page_debug(env, page);
2347
        /* if no physical page mapped, return an error */
2348
        if (phys_addr == -1)
2349
            return -1;
2350
        l = (page + TARGET_PAGE_SIZE) - addr;
2351
        if (l > len)
2352
            l = len;
2353
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2354
                               buf, l, is_write);
2355
        len -= l;
2356
        buf += l;
2357
        addr += l;
2358
    }
2359
    return 0;
2360
}
2361

    
2362
void dump_exec_info(FILE *f,
2363
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2364
{
2365
    int i, target_code_size, max_target_code_size;
2366
    int direct_jmp_count, direct_jmp2_count, cross_page;
2367
    TranslationBlock *tb;
2368
    
2369
    target_code_size = 0;
2370
    max_target_code_size = 0;
2371
    cross_page = 0;
2372
    direct_jmp_count = 0;
2373
    direct_jmp2_count = 0;
2374
    for(i = 0; i < nb_tbs; i++) {
2375
        tb = &tbs[i];
2376
        target_code_size += tb->size;
2377
        if (tb->size > max_target_code_size)
2378
            max_target_code_size = tb->size;
2379
        if (tb->page_addr[1] != -1)
2380
            cross_page++;
2381
        if (tb->tb_next_offset[0] != 0xffff) {
2382
            direct_jmp_count++;
2383
            if (tb->tb_next_offset[1] != 0xffff) {
2384
                direct_jmp2_count++;
2385
            }
2386
        }
2387
    }
2388
    /* XXX: avoid using doubles ? */
2389
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2390
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2391
                nb_tbs ? target_code_size / nb_tbs : 0,
2392
                max_target_code_size);
2393
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2394
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2395
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2396
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2397
            cross_page, 
2398
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2399
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2400
                direct_jmp_count, 
2401
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2402
                direct_jmp2_count,
2403
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2404
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2405
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2406
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2407
}
2408

    
2409
#if !defined(CONFIG_USER_ONLY) 
2410

    
2411
#define MMUSUFFIX _cmmu
2412
#define GETPC() NULL
2413
#define env cpu_single_env
2414
#define SOFTMMU_CODE_ACCESS
2415

    
2416
#define SHIFT 0
2417
#include "softmmu_template.h"
2418

    
2419
#define SHIFT 1
2420
#include "softmmu_template.h"
2421

    
2422
#define SHIFT 2
2423
#include "softmmu_template.h"
2424

    
2425
#define SHIFT 3
2426
#include "softmmu_template.h"
2427

    
2428
#undef env
2429

    
2430
#endif