Statistics
| Branch: | Revision:

root / exec.c @ 24741ef3

History | View | Annotate | Download (72 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
#if defined(TARGET_SPARC64)
55
#define TARGET_PHYS_ADDR_SPACE_BITS 41
56
#elif defined(TARGET_PPC64)
57
#define TARGET_PHYS_ADDR_SPACE_BITS 42
58
#else
59
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60
#define TARGET_PHYS_ADDR_SPACE_BITS 32
61
#endif
62

    
63
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
65
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
66
int nb_tbs;
67
/* any access to the tbs or the page table must use this lock */
68
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
69

    
70
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
71
uint8_t *code_gen_ptr;
72

    
73
int phys_ram_size;
74
int phys_ram_fd;
75
uint8_t *phys_ram_base;
76
uint8_t *phys_ram_dirty;
77

    
78
typedef struct PageDesc {
79
    /* list of TBs intersecting this ram page */
80
    TranslationBlock *first_tb;
81
    /* in order to optimize self modifying code, we count the number
82
       of lookups we do to a given page to use a bitmap */
83
    unsigned int code_write_count;
84
    uint8_t *code_bitmap;
85
#if defined(CONFIG_USER_ONLY)
86
    unsigned long flags;
87
#endif
88
} PageDesc;
89

    
90
typedef struct PhysPageDesc {
91
    /* offset in host memory of the page + io_index in the low 12 bits */
92
    uint32_t phys_offset;
93
} PhysPageDesc;
94

    
95
/* Note: the VirtPage handling is absolete and will be suppressed
96
   ASAP */
97
typedef struct VirtPageDesc {
98
    /* physical address of code page. It is valid only if 'valid_tag'
99
       matches 'virt_valid_tag' */ 
100
    target_ulong phys_addr; 
101
    unsigned int valid_tag;
102
#if !defined(CONFIG_SOFTMMU)
103
    /* original page access rights. It is valid only if 'valid_tag'
104
       matches 'virt_valid_tag' */
105
    unsigned int prot;
106
#endif
107
} VirtPageDesc;
108

    
109
#define L2_BITS 10
110
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111

    
112
#define L1_SIZE (1 << L1_BITS)
113
#define L2_SIZE (1 << L2_BITS)
114

    
115
static void io_mem_init(void);
116

    
117
unsigned long qemu_real_host_page_size;
118
unsigned long qemu_host_page_bits;
119
unsigned long qemu_host_page_size;
120
unsigned long qemu_host_page_mask;
121

    
122
/* XXX: for system emulation, it could just be an array */
123
static PageDesc *l1_map[L1_SIZE];
124
PhysPageDesc **l1_phys_map;
125

    
126
#if !defined(CONFIG_USER_ONLY)
127
#if TARGET_LONG_BITS > 32
128
#define VIRT_L_BITS 9
129
#define VIRT_L_SIZE (1 << VIRT_L_BITS)
130
static void *l1_virt_map[VIRT_L_SIZE];
131
#else
132
static VirtPageDesc *l1_virt_map[L1_SIZE];
133
#endif
134
static unsigned int virt_valid_tag;
135
#endif
136

    
137
/* io memory support */
138
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
139
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
140
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
141
static int io_mem_nb;
142

    
143
/* log support */
144
char *logfilename = "/tmp/qemu.log";
145
FILE *logfile;
146
int loglevel;
147

    
148
/* statistics */
149
static int tlb_flush_count;
150
static int tb_flush_count;
151
static int tb_phys_invalidate_count;
152

    
153
static void page_init(void)
154
{
155
    /* NOTE: we can always suppose that qemu_host_page_size >=
156
       TARGET_PAGE_SIZE */
157
#ifdef _WIN32
158
    {
159
        SYSTEM_INFO system_info;
160
        DWORD old_protect;
161
        
162
        GetSystemInfo(&system_info);
163
        qemu_real_host_page_size = system_info.dwPageSize;
164
        
165
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
166
                       PAGE_EXECUTE_READWRITE, &old_protect);
167
    }
168
#else
169
    qemu_real_host_page_size = getpagesize();
170
    {
171
        unsigned long start, end;
172

    
173
        start = (unsigned long)code_gen_buffer;
174
        start &= ~(qemu_real_host_page_size - 1);
175
        
176
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
177
        end += qemu_real_host_page_size - 1;
178
        end &= ~(qemu_real_host_page_size - 1);
179
        
180
        mprotect((void *)start, end - start, 
181
                 PROT_READ | PROT_WRITE | PROT_EXEC);
182
    }
183
#endif
184

    
185
    if (qemu_host_page_size == 0)
186
        qemu_host_page_size = qemu_real_host_page_size;
187
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
188
        qemu_host_page_size = TARGET_PAGE_SIZE;
189
    qemu_host_page_bits = 0;
190
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
191
        qemu_host_page_bits++;
192
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
193
#if !defined(CONFIG_USER_ONLY)
194
    virt_valid_tag = 1;
195
#endif
196
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
197
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
198
}
199

    
200
static inline PageDesc *page_find_alloc(unsigned int index)
201
{
202
    PageDesc **lp, *p;
203

    
204
    lp = &l1_map[index >> L2_BITS];
205
    p = *lp;
206
    if (!p) {
207
        /* allocate if not found */
208
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
209
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
210
        *lp = p;
211
    }
212
    return p + (index & (L2_SIZE - 1));
213
}
214

    
215
static inline PageDesc *page_find(unsigned int index)
216
{
217
    PageDesc *p;
218

    
219
    p = l1_map[index >> L2_BITS];
220
    if (!p)
221
        return 0;
222
    return p + (index & (L2_SIZE - 1));
223
}
224

    
225
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
226
{
227
    void **lp, **p;
228

    
229
    p = (void **)l1_phys_map;
230
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
231

    
232
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
234
#endif
235
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
236
    p = *lp;
237
    if (!p) {
238
        /* allocate if not found */
239
        if (!alloc)
240
            return NULL;
241
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
242
        memset(p, 0, sizeof(void *) * L1_SIZE);
243
        *lp = p;
244
    }
245
#endif
246
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
247
    p = *lp;
248
    if (!p) {
249
        /* allocate if not found */
250
        if (!alloc)
251
            return NULL;
252
        p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
253
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
254
        *lp = p;
255
    }
256
    return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
257
}
258

    
259
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
260
{
261
    return phys_page_find_alloc(index, 0);
262
}
263

    
264
#if !defined(CONFIG_USER_ONLY)
265
static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, 
266
                             target_ulong vaddr);
267
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
268
                                    target_ulong vaddr);
269

    
270
static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
271
{
272
#if TARGET_LONG_BITS > 32
273
    void **p, **lp;
274

    
275
    p = l1_virt_map;
276
    lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
277
    p = *lp;
278
    if (!p) {
279
        if (!alloc)
280
            return NULL;
281
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
282
        *lp = p;
283
    }
284
    lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
285
    p = *lp;
286
    if (!p) {
287
        if (!alloc)
288
            return NULL;
289
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
290
        *lp = p;
291
    }
292
    lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
293
    p = *lp;
294
    if (!p) {
295
        if (!alloc)
296
            return NULL;
297
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
298
        *lp = p;
299
    }
300
    lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
301
    p = *lp;
302
    if (!p) {
303
        if (!alloc)
304
            return NULL;
305
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
306
        *lp = p;
307
    }
308
    lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
309
    p = *lp;
310
    if (!p) {
311
        if (!alloc)
312
            return NULL;
313
        p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
314
        *lp = p;
315
    }
316
    return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
317
#else
318
    VirtPageDesc *p, **lp;
319

    
320
    lp = &l1_virt_map[index >> L2_BITS];
321
    p = *lp;
322
    if (!p) {
323
        /* allocate if not found */
324
        if (!alloc)
325
            return NULL;
326
        p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
327
        *lp = p;
328
    }
329
    return p + (index & (L2_SIZE - 1));
330
#endif
331
}
332

    
333
static inline VirtPageDesc *virt_page_find(target_ulong index)
334
{
335
    return virt_page_find_alloc(index, 0);
336
}
337

    
338
#if TARGET_LONG_BITS > 32
339
static void virt_page_flush_internal(void **p, int level)
340
{
341
    int i; 
342
    if (level == 0) {
343
        VirtPageDesc *q = (VirtPageDesc *)p;
344
        for(i = 0; i < VIRT_L_SIZE; i++)
345
            q[i].valid_tag = 0;
346
    } else {
347
        level--;
348
        for(i = 0; i < VIRT_L_SIZE; i++) {
349
            if (p[i])
350
                virt_page_flush_internal(p[i], level);
351
        }
352
    }
353
}
354
#endif
355

    
356
static void virt_page_flush(void)
357
{
358
    virt_valid_tag++;
359

    
360
    if (virt_valid_tag == 0) {
361
        virt_valid_tag = 1;
362
#if TARGET_LONG_BITS > 32
363
        virt_page_flush_internal(l1_virt_map, 5);
364
#else
365
        {
366
            int i, j;
367
            VirtPageDesc *p;
368
            for(i = 0; i < L1_SIZE; i++) {
369
                p = l1_virt_map[i];
370
                if (p) {
371
                    for(j = 0; j < L2_SIZE; j++)
372
                        p[j].valid_tag = 0;
373
                }
374
            }
375
        }
376
#endif
377
    }
378
}
379
#else
380
static void virt_page_flush(void)
381
{
382
}
383
#endif
384

    
385
void cpu_exec_init(void)
386
{
387
    if (!code_gen_ptr) {
388
        code_gen_ptr = code_gen_buffer;
389
        page_init();
390
        io_mem_init();
391
    }
392
}
393

    
394
static inline void invalidate_page_bitmap(PageDesc *p)
395
{
396
    if (p->code_bitmap) {
397
        qemu_free(p->code_bitmap);
398
        p->code_bitmap = NULL;
399
    }
400
    p->code_write_count = 0;
401
}
402

    
403
/* set to NULL all the 'first_tb' fields in all PageDescs */
404
static void page_flush_tb(void)
405
{
406
    int i, j;
407
    PageDesc *p;
408

    
409
    for(i = 0; i < L1_SIZE; i++) {
410
        p = l1_map[i];
411
        if (p) {
412
            for(j = 0; j < L2_SIZE; j++) {
413
                p->first_tb = NULL;
414
                invalidate_page_bitmap(p);
415
                p++;
416
            }
417
        }
418
    }
419
}
420

    
421
/* flush all the translation blocks */
422
/* XXX: tb_flush is currently not thread safe */
423
void tb_flush(CPUState *env)
424
{
425
#if defined(DEBUG_FLUSH)
426
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
427
           code_gen_ptr - code_gen_buffer, 
428
           nb_tbs, 
429
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
430
#endif
431
    nb_tbs = 0;
432
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
433
    virt_page_flush();
434

    
435
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
436
    page_flush_tb();
437

    
438
    code_gen_ptr = code_gen_buffer;
439
    /* XXX: flush processor icache at this point if cache flush is
440
       expensive */
441
    tb_flush_count++;
442
}
443

    
444
#ifdef DEBUG_TB_CHECK
445

    
446
static void tb_invalidate_check(unsigned long address)
447
{
448
    TranslationBlock *tb;
449
    int i;
450
    address &= TARGET_PAGE_MASK;
451
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
452
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
453
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
454
                  address >= tb->pc + tb->size)) {
455
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
456
                       address, tb->pc, tb->size);
457
            }
458
        }
459
    }
460
}
461

    
462
/* verify that all the pages have correct rights for code */
463
static void tb_page_check(void)
464
{
465
    TranslationBlock *tb;
466
    int i, flags1, flags2;
467
    
468
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
469
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
470
            flags1 = page_get_flags(tb->pc);
471
            flags2 = page_get_flags(tb->pc + tb->size - 1);
472
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
473
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
474
                       tb->pc, tb->size, flags1, flags2);
475
            }
476
        }
477
    }
478
}
479

    
480
void tb_jmp_check(TranslationBlock *tb)
481
{
482
    TranslationBlock *tb1;
483
    unsigned int n1;
484

    
485
    /* suppress any remaining jumps to this TB */
486
    tb1 = tb->jmp_first;
487
    for(;;) {
488
        n1 = (long)tb1 & 3;
489
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
490
        if (n1 == 2)
491
            break;
492
        tb1 = tb1->jmp_next[n1];
493
    }
494
    /* check end of list */
495
    if (tb1 != tb) {
496
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
497
    }
498
}
499

    
500
#endif
501

    
502
/* invalidate one TB */
503
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
504
                             int next_offset)
505
{
506
    TranslationBlock *tb1;
507
    for(;;) {
508
        tb1 = *ptb;
509
        if (tb1 == tb) {
510
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
511
            break;
512
        }
513
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
514
    }
515
}
516

    
517
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
518
{
519
    TranslationBlock *tb1;
520
    unsigned int n1;
521

    
522
    for(;;) {
523
        tb1 = *ptb;
524
        n1 = (long)tb1 & 3;
525
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
526
        if (tb1 == tb) {
527
            *ptb = tb1->page_next[n1];
528
            break;
529
        }
530
        ptb = &tb1->page_next[n1];
531
    }
532
}
533

    
534
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
535
{
536
    TranslationBlock *tb1, **ptb;
537
    unsigned int n1;
538

    
539
    ptb = &tb->jmp_next[n];
540
    tb1 = *ptb;
541
    if (tb1) {
542
        /* find tb(n) in circular list */
543
        for(;;) {
544
            tb1 = *ptb;
545
            n1 = (long)tb1 & 3;
546
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
547
            if (n1 == n && tb1 == tb)
548
                break;
549
            if (n1 == 2) {
550
                ptb = &tb1->jmp_first;
551
            } else {
552
                ptb = &tb1->jmp_next[n1];
553
            }
554
        }
555
        /* now we can suppress tb(n) from the list */
556
        *ptb = tb->jmp_next[n];
557

    
558
        tb->jmp_next[n] = NULL;
559
    }
560
}
561

    
562
/* reset the jump entry 'n' of a TB so that it is not chained to
563
   another TB */
564
static inline void tb_reset_jump(TranslationBlock *tb, int n)
565
{
566
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
567
}
568

    
569
static inline void tb_invalidate(TranslationBlock *tb)
570
{
571
    unsigned int h, n1;
572
    TranslationBlock *tb1, *tb2, **ptb;
573
    
574
    tb_invalidated_flag = 1;
575

    
576
    /* remove the TB from the hash list */
577
    h = tb_hash_func(tb->pc);
578
    ptb = &tb_hash[h];
579
    for(;;) {
580
        tb1 = *ptb;
581
        /* NOTE: the TB is not necessarily linked in the hash. It
582
           indicates that it is not currently used */
583
        if (tb1 == NULL)
584
            return;
585
        if (tb1 == tb) {
586
            *ptb = tb1->hash_next;
587
            break;
588
        }
589
        ptb = &tb1->hash_next;
590
    }
591

    
592
    /* suppress this TB from the two jump lists */
593
    tb_jmp_remove(tb, 0);
594
    tb_jmp_remove(tb, 1);
595

    
596
    /* suppress any remaining jumps to this TB */
597
    tb1 = tb->jmp_first;
598
    for(;;) {
599
        n1 = (long)tb1 & 3;
600
        if (n1 == 2)
601
            break;
602
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
603
        tb2 = tb1->jmp_next[n1];
604
        tb_reset_jump(tb1, n1);
605
        tb1->jmp_next[n1] = NULL;
606
        tb1 = tb2;
607
    }
608
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
609
}
610

    
611
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
612
{
613
    PageDesc *p;
614
    unsigned int h;
615
    target_ulong phys_pc;
616
    
617
    /* remove the TB from the hash list */
618
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
619
    h = tb_phys_hash_func(phys_pc);
620
    tb_remove(&tb_phys_hash[h], tb, 
621
              offsetof(TranslationBlock, phys_hash_next));
622

    
623
    /* remove the TB from the page list */
624
    if (tb->page_addr[0] != page_addr) {
625
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
626
        tb_page_remove(&p->first_tb, tb);
627
        invalidate_page_bitmap(p);
628
    }
629
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
630
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
631
        tb_page_remove(&p->first_tb, tb);
632
        invalidate_page_bitmap(p);
633
    }
634

    
635
    tb_invalidate(tb);
636
    tb_phys_invalidate_count++;
637
}
638

    
639
static inline void set_bits(uint8_t *tab, int start, int len)
640
{
641
    int end, mask, end1;
642

    
643
    end = start + len;
644
    tab += start >> 3;
645
    mask = 0xff << (start & 7);
646
    if ((start & ~7) == (end & ~7)) {
647
        if (start < end) {
648
            mask &= ~(0xff << (end & 7));
649
            *tab |= mask;
650
        }
651
    } else {
652
        *tab++ |= mask;
653
        start = (start + 8) & ~7;
654
        end1 = end & ~7;
655
        while (start < end1) {
656
            *tab++ = 0xff;
657
            start += 8;
658
        }
659
        if (start < end) {
660
            mask = ~(0xff << (end & 7));
661
            *tab |= mask;
662
        }
663
    }
664
}
665

    
666
static void build_page_bitmap(PageDesc *p)
667
{
668
    int n, tb_start, tb_end;
669
    TranslationBlock *tb;
670
    
671
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
672
    if (!p->code_bitmap)
673
        return;
674
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
675

    
676
    tb = p->first_tb;
677
    while (tb != NULL) {
678
        n = (long)tb & 3;
679
        tb = (TranslationBlock *)((long)tb & ~3);
680
        /* NOTE: this is subtle as a TB may span two physical pages */
681
        if (n == 0) {
682
            /* NOTE: tb_end may be after the end of the page, but
683
               it is not a problem */
684
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
685
            tb_end = tb_start + tb->size;
686
            if (tb_end > TARGET_PAGE_SIZE)
687
                tb_end = TARGET_PAGE_SIZE;
688
        } else {
689
            tb_start = 0;
690
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
691
        }
692
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
693
        tb = tb->page_next[n];
694
    }
695
}
696

    
697
#ifdef TARGET_HAS_PRECISE_SMC
698

    
699
static void tb_gen_code(CPUState *env, 
700
                        target_ulong pc, target_ulong cs_base, int flags,
701
                        int cflags)
702
{
703
    TranslationBlock *tb;
704
    uint8_t *tc_ptr;
705
    target_ulong phys_pc, phys_page2, virt_page2;
706
    int code_gen_size;
707

    
708
    phys_pc = get_phys_addr_code(env, pc);
709
    tb = tb_alloc(pc);
710
    if (!tb) {
711
        /* flush must be done */
712
        tb_flush(env);
713
        /* cannot fail at this point */
714
        tb = tb_alloc(pc);
715
    }
716
    tc_ptr = code_gen_ptr;
717
    tb->tc_ptr = tc_ptr;
718
    tb->cs_base = cs_base;
719
    tb->flags = flags;
720
    tb->cflags = cflags;
721
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
722
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
723
    
724
    /* check next page if needed */
725
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
726
    phys_page2 = -1;
727
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
728
        phys_page2 = get_phys_addr_code(env, virt_page2);
729
    }
730
    tb_link_phys(tb, phys_pc, phys_page2);
731
}
732
#endif
733
    
734
/* invalidate all TBs which intersect with the target physical page
735
   starting in range [start;end[. NOTE: start and end must refer to
736
   the same physical page. 'is_cpu_write_access' should be true if called
737
   from a real cpu write access: the virtual CPU will exit the current
738
   TB if code is modified inside this TB. */
739
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
740
                                   int is_cpu_write_access)
741
{
742
    int n, current_tb_modified, current_tb_not_found, current_flags;
743
    CPUState *env = cpu_single_env;
744
    PageDesc *p;
745
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
746
    target_ulong tb_start, tb_end;
747
    target_ulong current_pc, current_cs_base;
748

    
749
    p = page_find(start >> TARGET_PAGE_BITS);
750
    if (!p) 
751
        return;
752
    if (!p->code_bitmap && 
753
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
754
        is_cpu_write_access) {
755
        /* build code bitmap */
756
        build_page_bitmap(p);
757
    }
758

    
759
    /* we remove all the TBs in the range [start, end[ */
760
    /* XXX: see if in some cases it could be faster to invalidate all the code */
761
    current_tb_not_found = is_cpu_write_access;
762
    current_tb_modified = 0;
763
    current_tb = NULL; /* avoid warning */
764
    current_pc = 0; /* avoid warning */
765
    current_cs_base = 0; /* avoid warning */
766
    current_flags = 0; /* avoid warning */
767
    tb = p->first_tb;
768
    while (tb != NULL) {
769
        n = (long)tb & 3;
770
        tb = (TranslationBlock *)((long)tb & ~3);
771
        tb_next = tb->page_next[n];
772
        /* NOTE: this is subtle as a TB may span two physical pages */
773
        if (n == 0) {
774
            /* NOTE: tb_end may be after the end of the page, but
775
               it is not a problem */
776
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
777
            tb_end = tb_start + tb->size;
778
        } else {
779
            tb_start = tb->page_addr[1];
780
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
781
        }
782
        if (!(tb_end <= start || tb_start >= end)) {
783
#ifdef TARGET_HAS_PRECISE_SMC
784
            if (current_tb_not_found) {
785
                current_tb_not_found = 0;
786
                current_tb = NULL;
787
                if (env->mem_write_pc) {
788
                    /* now we have a real cpu fault */
789
                    current_tb = tb_find_pc(env->mem_write_pc);
790
                }
791
            }
792
            if (current_tb == tb &&
793
                !(current_tb->cflags & CF_SINGLE_INSN)) {
794
                /* If we are modifying the current TB, we must stop
795
                its execution. We could be more precise by checking
796
                that the modification is after the current PC, but it
797
                would require a specialized function to partially
798
                restore the CPU state */
799
                
800
                current_tb_modified = 1;
801
                cpu_restore_state(current_tb, env, 
802
                                  env->mem_write_pc, NULL);
803
#if defined(TARGET_I386)
804
                current_flags = env->hflags;
805
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
806
                current_cs_base = (target_ulong)env->segs[R_CS].base;
807
                current_pc = current_cs_base + env->eip;
808
#else
809
#error unsupported CPU
810
#endif
811
            }
812
#endif /* TARGET_HAS_PRECISE_SMC */
813
            saved_tb = env->current_tb;
814
            env->current_tb = NULL;
815
            tb_phys_invalidate(tb, -1);
816
            env->current_tb = saved_tb;
817
            if (env->interrupt_request && env->current_tb)
818
                cpu_interrupt(env, env->interrupt_request);
819
        }
820
        tb = tb_next;
821
    }
822
#if !defined(CONFIG_USER_ONLY)
823
    /* if no code remaining, no need to continue to use slow writes */
824
    if (!p->first_tb) {
825
        invalidate_page_bitmap(p);
826
        if (is_cpu_write_access) {
827
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
828
        }
829
    }
830
#endif
831
#ifdef TARGET_HAS_PRECISE_SMC
832
    if (current_tb_modified) {
833
        /* we generate a block containing just the instruction
834
           modifying the memory. It will ensure that it cannot modify
835
           itself */
836
        env->current_tb = NULL;
837
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
838
                    CF_SINGLE_INSN);
839
        cpu_resume_from_signal(env, NULL);
840
    }
841
#endif
842
}
843

    
844
/* len must be <= 8 and start must be a multiple of len */
845
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
846
{
847
    PageDesc *p;
848
    int offset, b;
849
#if 0
850
    if (1) {
851
        if (loglevel) {
852
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
853
                   cpu_single_env->mem_write_vaddr, len, 
854
                   cpu_single_env->eip, 
855
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
856
        }
857
    }
858
#endif
859
    p = page_find(start >> TARGET_PAGE_BITS);
860
    if (!p) 
861
        return;
862
    if (p->code_bitmap) {
863
        offset = start & ~TARGET_PAGE_MASK;
864
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
865
        if (b & ((1 << len) - 1))
866
            goto do_invalidate;
867
    } else {
868
    do_invalidate:
869
        tb_invalidate_phys_page_range(start, start + len, 1);
870
    }
871
}
872

    
873
#if !defined(CONFIG_SOFTMMU)
874
static void tb_invalidate_phys_page(target_ulong addr, 
875
                                    unsigned long pc, void *puc)
876
{
877
    int n, current_flags, current_tb_modified;
878
    target_ulong current_pc, current_cs_base;
879
    PageDesc *p;
880
    TranslationBlock *tb, *current_tb;
881
#ifdef TARGET_HAS_PRECISE_SMC
882
    CPUState *env = cpu_single_env;
883
#endif
884

    
885
    addr &= TARGET_PAGE_MASK;
886
    p = page_find(addr >> TARGET_PAGE_BITS);
887
    if (!p) 
888
        return;
889
    tb = p->first_tb;
890
    current_tb_modified = 0;
891
    current_tb = NULL;
892
    current_pc = 0; /* avoid warning */
893
    current_cs_base = 0; /* avoid warning */
894
    current_flags = 0; /* avoid warning */
895
#ifdef TARGET_HAS_PRECISE_SMC
896
    if (tb && pc != 0) {
897
        current_tb = tb_find_pc(pc);
898
    }
899
#endif
900
    while (tb != NULL) {
901
        n = (long)tb & 3;
902
        tb = (TranslationBlock *)((long)tb & ~3);
903
#ifdef TARGET_HAS_PRECISE_SMC
904
        if (current_tb == tb &&
905
            !(current_tb->cflags & CF_SINGLE_INSN)) {
906
                /* If we are modifying the current TB, we must stop
907
                   its execution. We could be more precise by checking
908
                   that the modification is after the current PC, but it
909
                   would require a specialized function to partially
910
                   restore the CPU state */
911
            
912
            current_tb_modified = 1;
913
            cpu_restore_state(current_tb, env, pc, puc);
914
#if defined(TARGET_I386)
915
            current_flags = env->hflags;
916
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
917
            current_cs_base = (target_ulong)env->segs[R_CS].base;
918
            current_pc = current_cs_base + env->eip;
919
#else
920
#error unsupported CPU
921
#endif
922
        }
923
#endif /* TARGET_HAS_PRECISE_SMC */
924
        tb_phys_invalidate(tb, addr);
925
        tb = tb->page_next[n];
926
    }
927
    p->first_tb = NULL;
928
#ifdef TARGET_HAS_PRECISE_SMC
929
    if (current_tb_modified) {
930
        /* we generate a block containing just the instruction
931
           modifying the memory. It will ensure that it cannot modify
932
           itself */
933
        env->current_tb = NULL;
934
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
935
                    CF_SINGLE_INSN);
936
        cpu_resume_from_signal(env, puc);
937
    }
938
#endif
939
}
940
#endif
941

    
942
/* add the tb in the target page and protect it if necessary */
943
static inline void tb_alloc_page(TranslationBlock *tb, 
944
                                 unsigned int n, unsigned int page_addr)
945
{
946
    PageDesc *p;
947
    TranslationBlock *last_first_tb;
948

    
949
    tb->page_addr[n] = page_addr;
950
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
951
    tb->page_next[n] = p->first_tb;
952
    last_first_tb = p->first_tb;
953
    p->first_tb = (TranslationBlock *)((long)tb | n);
954
    invalidate_page_bitmap(p);
955

    
956
#if defined(TARGET_HAS_SMC) || 1
957

    
958
#if defined(CONFIG_USER_ONLY)
959
    if (p->flags & PAGE_WRITE) {
960
        unsigned long host_start, host_end, addr;
961
        int prot;
962

    
963
        /* force the host page as non writable (writes will have a
964
           page fault + mprotect overhead) */
965
        host_start = page_addr & qemu_host_page_mask;
966
        host_end = host_start + qemu_host_page_size;
967
        prot = 0;
968
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
969
            prot |= page_get_flags(addr);
970
        mprotect((void *)host_start, qemu_host_page_size, 
971
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
972
#ifdef DEBUG_TB_INVALIDATE
973
        printf("protecting code page: 0x%08lx\n", 
974
               host_start);
975
#endif
976
        p->flags &= ~PAGE_WRITE;
977
    }
978
#else
979
    /* if some code is already present, then the pages are already
980
       protected. So we handle the case where only the first TB is
981
       allocated in a physical page */
982
    if (!last_first_tb) {
983
        target_ulong virt_addr;
984

    
985
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
986
        tlb_protect_code(cpu_single_env, page_addr, virt_addr);
987
    }
988
#endif
989

    
990
#endif /* TARGET_HAS_SMC */
991
}
992

    
993
/* Allocate a new translation block. Flush the translation buffer if
994
   too many translation blocks or too much generated code. */
995
TranslationBlock *tb_alloc(target_ulong pc)
996
{
997
    TranslationBlock *tb;
998

    
999
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
1000
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
1001
        return NULL;
1002
    tb = &tbs[nb_tbs++];
1003
    tb->pc = pc;
1004
    tb->cflags = 0;
1005
    return tb;
1006
}
1007

    
1008
/* add a new TB and link it to the physical page tables. phys_page2 is
1009
   (-1) to indicate that only one page contains the TB. */
1010
void tb_link_phys(TranslationBlock *tb, 
1011
                  target_ulong phys_pc, target_ulong phys_page2)
1012
{
1013
    unsigned int h;
1014
    TranslationBlock **ptb;
1015

    
1016
    /* add in the physical hash table */
1017
    h = tb_phys_hash_func(phys_pc);
1018
    ptb = &tb_phys_hash[h];
1019
    tb->phys_hash_next = *ptb;
1020
    *ptb = tb;
1021

    
1022
    /* add in the page list */
1023
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1024
    if (phys_page2 != -1)
1025
        tb_alloc_page(tb, 1, phys_page2);
1026
    else
1027
        tb->page_addr[1] = -1;
1028
#ifdef DEBUG_TB_CHECK
1029
    tb_page_check();
1030
#endif
1031
}
1032

    
1033
/* link the tb with the other TBs */
1034
void tb_link(TranslationBlock *tb)
1035
{
1036
#if !defined(CONFIG_USER_ONLY)
1037
    {
1038
        VirtPageDesc *vp;
1039
        target_ulong addr;
1040
        
1041
        /* save the code memory mappings (needed to invalidate the code) */
1042
        addr = tb->pc & TARGET_PAGE_MASK;
1043
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1044
#ifdef DEBUG_TLB_CHECK 
1045
        if (vp->valid_tag == virt_valid_tag &&
1046
            vp->phys_addr != tb->page_addr[0]) {
1047
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1048
                   addr, tb->page_addr[0], vp->phys_addr);
1049
        }
1050
#endif
1051
        vp->phys_addr = tb->page_addr[0];
1052
        if (vp->valid_tag != virt_valid_tag) {
1053
            vp->valid_tag = virt_valid_tag;
1054
#if !defined(CONFIG_SOFTMMU)
1055
            vp->prot = 0;
1056
#endif
1057
        }
1058
        
1059
        if (tb->page_addr[1] != -1) {
1060
            addr += TARGET_PAGE_SIZE;
1061
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1062
#ifdef DEBUG_TLB_CHECK 
1063
            if (vp->valid_tag == virt_valid_tag &&
1064
                vp->phys_addr != tb->page_addr[1]) { 
1065
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1066
                       addr, tb->page_addr[1], vp->phys_addr);
1067
            }
1068
#endif
1069
            vp->phys_addr = tb->page_addr[1];
1070
            if (vp->valid_tag != virt_valid_tag) {
1071
                vp->valid_tag = virt_valid_tag;
1072
#if !defined(CONFIG_SOFTMMU)
1073
                vp->prot = 0;
1074
#endif
1075
            }
1076
        }
1077
    }
1078
#endif
1079

    
1080
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1081
    tb->jmp_next[0] = NULL;
1082
    tb->jmp_next[1] = NULL;
1083
#ifdef USE_CODE_COPY
1084
    tb->cflags &= ~CF_FP_USED;
1085
    if (tb->cflags & CF_TB_FP_USED)
1086
        tb->cflags |= CF_FP_USED;
1087
#endif
1088

    
1089
    /* init original jump addresses */
1090
    if (tb->tb_next_offset[0] != 0xffff)
1091
        tb_reset_jump(tb, 0);
1092
    if (tb->tb_next_offset[1] != 0xffff)
1093
        tb_reset_jump(tb, 1);
1094
}
1095

    
1096
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1097
   tb[1].tc_ptr. Return NULL if not found */
1098
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1099
{
1100
    int m_min, m_max, m;
1101
    unsigned long v;
1102
    TranslationBlock *tb;
1103

    
1104
    if (nb_tbs <= 0)
1105
        return NULL;
1106
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1107
        tc_ptr >= (unsigned long)code_gen_ptr)
1108
        return NULL;
1109
    /* binary search (cf Knuth) */
1110
    m_min = 0;
1111
    m_max = nb_tbs - 1;
1112
    while (m_min <= m_max) {
1113
        m = (m_min + m_max) >> 1;
1114
        tb = &tbs[m];
1115
        v = (unsigned long)tb->tc_ptr;
1116
        if (v == tc_ptr)
1117
            return tb;
1118
        else if (tc_ptr < v) {
1119
            m_max = m - 1;
1120
        } else {
1121
            m_min = m + 1;
1122
        }
1123
    } 
1124
    return &tbs[m_max];
1125
}
1126

    
1127
static void tb_reset_jump_recursive(TranslationBlock *tb);
1128

    
1129
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1130
{
1131
    TranslationBlock *tb1, *tb_next, **ptb;
1132
    unsigned int n1;
1133

    
1134
    tb1 = tb->jmp_next[n];
1135
    if (tb1 != NULL) {
1136
        /* find head of list */
1137
        for(;;) {
1138
            n1 = (long)tb1 & 3;
1139
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1140
            if (n1 == 2)
1141
                break;
1142
            tb1 = tb1->jmp_next[n1];
1143
        }
1144
        /* we are now sure now that tb jumps to tb1 */
1145
        tb_next = tb1;
1146

    
1147
        /* remove tb from the jmp_first list */
1148
        ptb = &tb_next->jmp_first;
1149
        for(;;) {
1150
            tb1 = *ptb;
1151
            n1 = (long)tb1 & 3;
1152
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1153
            if (n1 == n && tb1 == tb)
1154
                break;
1155
            ptb = &tb1->jmp_next[n1];
1156
        }
1157
        *ptb = tb->jmp_next[n];
1158
        tb->jmp_next[n] = NULL;
1159
        
1160
        /* suppress the jump to next tb in generated code */
1161
        tb_reset_jump(tb, n);
1162

    
1163
        /* suppress jumps in the tb on which we could have jumped */
1164
        tb_reset_jump_recursive(tb_next);
1165
    }
1166
}
1167

    
1168
static void tb_reset_jump_recursive(TranslationBlock *tb)
1169
{
1170
    tb_reset_jump_recursive2(tb, 0);
1171
    tb_reset_jump_recursive2(tb, 1);
1172
}
1173

    
1174
#if defined(TARGET_HAS_ICE)
1175
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1176
{
1177
    target_ulong phys_addr;
1178

    
1179
    phys_addr = cpu_get_phys_page_debug(env, pc);
1180
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1181
}
1182
#endif
1183

    
1184
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1185
   breakpoint is reached */
1186
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1187
{
1188
#if defined(TARGET_HAS_ICE)
1189
    int i;
1190
    
1191
    for(i = 0; i < env->nb_breakpoints; i++) {
1192
        if (env->breakpoints[i] == pc)
1193
            return 0;
1194
    }
1195

    
1196
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1197
        return -1;
1198
    env->breakpoints[env->nb_breakpoints++] = pc;
1199
    
1200
    breakpoint_invalidate(env, pc);
1201
    return 0;
1202
#else
1203
    return -1;
1204
#endif
1205
}
1206

    
1207
/* remove a breakpoint */
1208
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1209
{
1210
#if defined(TARGET_HAS_ICE)
1211
    int i;
1212
    for(i = 0; i < env->nb_breakpoints; i++) {
1213
        if (env->breakpoints[i] == pc)
1214
            goto found;
1215
    }
1216
    return -1;
1217
 found:
1218
    env->nb_breakpoints--;
1219
    if (i < env->nb_breakpoints)
1220
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1221

    
1222
    breakpoint_invalidate(env, pc);
1223
    return 0;
1224
#else
1225
    return -1;
1226
#endif
1227
}
1228

    
1229
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1230
   CPU loop after each instruction */
1231
void cpu_single_step(CPUState *env, int enabled)
1232
{
1233
#if defined(TARGET_HAS_ICE)
1234
    if (env->singlestep_enabled != enabled) {
1235
        env->singlestep_enabled = enabled;
1236
        /* must flush all the translated code to avoid inconsistancies */
1237
        /* XXX: only flush what is necessary */
1238
        tb_flush(env);
1239
    }
1240
#endif
1241
}
1242

    
1243
/* enable or disable low levels log */
1244
void cpu_set_log(int log_flags)
1245
{
1246
    loglevel = log_flags;
1247
    if (loglevel && !logfile) {
1248
        logfile = fopen(logfilename, "w");
1249
        if (!logfile) {
1250
            perror(logfilename);
1251
            _exit(1);
1252
        }
1253
#if !defined(CONFIG_SOFTMMU)
1254
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1255
        {
1256
            static uint8_t logfile_buf[4096];
1257
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1258
        }
1259
#else
1260
        setvbuf(logfile, NULL, _IOLBF, 0);
1261
#endif
1262
    }
1263
}
1264

    
1265
void cpu_set_log_filename(const char *filename)
1266
{
1267
    logfilename = strdup(filename);
1268
}
1269

    
1270
/* mask must never be zero, except for A20 change call */
1271
void cpu_interrupt(CPUState *env, int mask)
1272
{
1273
    TranslationBlock *tb;
1274
    static int interrupt_lock;
1275

    
1276
    env->interrupt_request |= mask;
1277
    /* if the cpu is currently executing code, we must unlink it and
1278
       all the potentially executing TB */
1279
    tb = env->current_tb;
1280
    if (tb && !testandset(&interrupt_lock)) {
1281
        env->current_tb = NULL;
1282
        tb_reset_jump_recursive(tb);
1283
        interrupt_lock = 0;
1284
    }
1285
}
1286

    
1287
void cpu_reset_interrupt(CPUState *env, int mask)
1288
{
1289
    env->interrupt_request &= ~mask;
1290
}
1291

    
1292
CPULogItem cpu_log_items[] = {
1293
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1294
      "show generated host assembly code for each compiled TB" },
1295
    { CPU_LOG_TB_IN_ASM, "in_asm",
1296
      "show target assembly code for each compiled TB" },
1297
    { CPU_LOG_TB_OP, "op", 
1298
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1299
#ifdef TARGET_I386
1300
    { CPU_LOG_TB_OP_OPT, "op_opt",
1301
      "show micro ops after optimization for each compiled TB" },
1302
#endif
1303
    { CPU_LOG_INT, "int",
1304
      "show interrupts/exceptions in short format" },
1305
    { CPU_LOG_EXEC, "exec",
1306
      "show trace before each executed TB (lots of logs)" },
1307
    { CPU_LOG_TB_CPU, "cpu",
1308
      "show CPU state before bloc translation" },
1309
#ifdef TARGET_I386
1310
    { CPU_LOG_PCALL, "pcall",
1311
      "show protected mode far calls/returns/exceptions" },
1312
#endif
1313
#ifdef DEBUG_IOPORT
1314
    { CPU_LOG_IOPORT, "ioport",
1315
      "show all i/o ports accesses" },
1316
#endif
1317
    { 0, NULL, NULL },
1318
};
1319

    
1320
static int cmp1(const char *s1, int n, const char *s2)
1321
{
1322
    if (strlen(s2) != n)
1323
        return 0;
1324
    return memcmp(s1, s2, n) == 0;
1325
}
1326
      
1327
/* takes a comma separated list of log masks. Return 0 if error. */
1328
int cpu_str_to_log_mask(const char *str)
1329
{
1330
    CPULogItem *item;
1331
    int mask;
1332
    const char *p, *p1;
1333

    
1334
    p = str;
1335
    mask = 0;
1336
    for(;;) {
1337
        p1 = strchr(p, ',');
1338
        if (!p1)
1339
            p1 = p + strlen(p);
1340
        if(cmp1(p,p1-p,"all")) {
1341
                for(item = cpu_log_items; item->mask != 0; item++) {
1342
                        mask |= item->mask;
1343
                }
1344
        } else {
1345
        for(item = cpu_log_items; item->mask != 0; item++) {
1346
            if (cmp1(p, p1 - p, item->name))
1347
                goto found;
1348
        }
1349
        return 0;
1350
        }
1351
    found:
1352
        mask |= item->mask;
1353
        if (*p1 != ',')
1354
            break;
1355
        p = p1 + 1;
1356
    }
1357
    return mask;
1358
}
1359

    
1360
void cpu_abort(CPUState *env, const char *fmt, ...)
1361
{
1362
    va_list ap;
1363

    
1364
    va_start(ap, fmt);
1365
    fprintf(stderr, "qemu: fatal: ");
1366
    vfprintf(stderr, fmt, ap);
1367
    fprintf(stderr, "\n");
1368
#ifdef TARGET_I386
1369
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1370
#else
1371
    cpu_dump_state(env, stderr, fprintf, 0);
1372
#endif
1373
    va_end(ap);
1374
    abort();
1375
}
1376

    
1377
#if !defined(CONFIG_USER_ONLY)
1378

    
1379
/* NOTE: if flush_global is true, also flush global entries (not
1380
   implemented yet) */
1381
void tlb_flush(CPUState *env, int flush_global)
1382
{
1383
    int i;
1384

    
1385
#if defined(DEBUG_TLB)
1386
    printf("tlb_flush:\n");
1387
#endif
1388
    /* must reset current TB so that interrupts cannot modify the
1389
       links while we are modifying them */
1390
    env->current_tb = NULL;
1391

    
1392
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1393
        env->tlb_read[0][i].address = -1;
1394
        env->tlb_write[0][i].address = -1;
1395
        env->tlb_read[1][i].address = -1;
1396
        env->tlb_write[1][i].address = -1;
1397
    }
1398

    
1399
    virt_page_flush();
1400
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1401

    
1402
#if !defined(CONFIG_SOFTMMU)
1403
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1404
#endif
1405
#ifdef USE_KQEMU
1406
    if (env->kqemu_enabled) {
1407
        kqemu_flush(env, flush_global);
1408
    }
1409
#endif
1410
    tlb_flush_count++;
1411
}
1412

    
1413
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1414
{
1415
    if (addr == (tlb_entry->address & 
1416
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1417
        tlb_entry->address = -1;
1418
}
1419

    
1420
void tlb_flush_page(CPUState *env, target_ulong addr)
1421
{
1422
    int i, n;
1423
    VirtPageDesc *vp;
1424
    PageDesc *p;
1425
    TranslationBlock *tb;
1426

    
1427
#if defined(DEBUG_TLB)
1428
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1429
#endif
1430
    /* must reset current TB so that interrupts cannot modify the
1431
       links while we are modifying them */
1432
    env->current_tb = NULL;
1433

    
1434
    addr &= TARGET_PAGE_MASK;
1435
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1436
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1437
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1438
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1439
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1440

    
1441
    /* remove from the virtual pc hash table all the TB at this
1442
       virtual address */
1443
    
1444
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1445
    if (vp && vp->valid_tag == virt_valid_tag) {
1446
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1447
        if (p) {
1448
            /* we remove all the links to the TBs in this virtual page */
1449
            tb = p->first_tb;
1450
            while (tb != NULL) {
1451
                n = (long)tb & 3;
1452
                tb = (TranslationBlock *)((long)tb & ~3);
1453
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1454
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1455
                    tb_invalidate(tb);
1456
                }
1457
                tb = tb->page_next[n];
1458
            }
1459
        }
1460
        vp->valid_tag = 0;
1461
    }
1462

    
1463
#if !defined(CONFIG_SOFTMMU)
1464
    if (addr < MMAP_AREA_END)
1465
        munmap((void *)addr, TARGET_PAGE_SIZE);
1466
#endif
1467
#ifdef USE_KQEMU
1468
    if (env->kqemu_enabled) {
1469
        kqemu_flush_page(env, addr);
1470
    }
1471
#endif
1472
}
1473

    
1474
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1475
{
1476
    if (addr == (tlb_entry->address & 
1477
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1478
        (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1479
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1480
    }
1481
}
1482

    
1483
/* update the TLBs so that writes to code in the virtual page 'addr'
1484
   can be detected */
1485
static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, 
1486
                             target_ulong vaddr)
1487
{
1488
    int i;
1489

    
1490
    vaddr &= TARGET_PAGE_MASK;
1491
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1492
    tlb_protect_code1(&env->tlb_write[0][i], vaddr);
1493
    tlb_protect_code1(&env->tlb_write[1][i], vaddr);
1494

    
1495
#ifdef USE_KQEMU
1496
    if (env->kqemu_enabled) {
1497
        kqemu_set_notdirty(env, ram_addr);
1498
    }
1499
#endif
1500
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
1501
    
1502
#if !defined(CONFIG_SOFTMMU)
1503
    /* NOTE: as we generated the code for this page, it is already at
1504
       least readable */
1505
    if (vaddr < MMAP_AREA_END)
1506
        mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
1507
#endif
1508
}
1509

    
1510
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1511
   tested for self modifying code */
1512
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1513
                                    target_ulong vaddr)
1514
{
1515
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1516
}
1517

    
1518
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1519
                                         unsigned long start, unsigned long length)
1520
{
1521
    unsigned long addr;
1522
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1523
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1524
        if ((addr - start) < length) {
1525
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1526
        }
1527
    }
1528
}
1529

    
1530
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1531
                                     int dirty_flags)
1532
{
1533
    CPUState *env;
1534
    unsigned long length, start1;
1535
    int i, mask, len;
1536
    uint8_t *p;
1537

    
1538
    start &= TARGET_PAGE_MASK;
1539
    end = TARGET_PAGE_ALIGN(end);
1540

    
1541
    length = end - start;
1542
    if (length == 0)
1543
        return;
1544
    len = length >> TARGET_PAGE_BITS;
1545
    env = cpu_single_env;
1546
#ifdef USE_KQEMU
1547
    if (env->kqemu_enabled) {
1548
        ram_addr_t addr;
1549
        addr = start;
1550
        for(i = 0; i < len; i++) {
1551
            kqemu_set_notdirty(env, addr);
1552
            addr += TARGET_PAGE_SIZE;
1553
        }
1554
    }
1555
#endif
1556
    mask = ~dirty_flags;
1557
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1558
    for(i = 0; i < len; i++)
1559
        p[i] &= mask;
1560

    
1561
    /* we modify the TLB cache so that the dirty bit will be set again
1562
       when accessing the range */
1563
    start1 = start + (unsigned long)phys_ram_base;
1564
    for(i = 0; i < CPU_TLB_SIZE; i++)
1565
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1566
    for(i = 0; i < CPU_TLB_SIZE; i++)
1567
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1568

    
1569
#if !defined(CONFIG_SOFTMMU)
1570
    /* XXX: this is expensive */
1571
    {
1572
        VirtPageDesc *p;
1573
        int j;
1574
        target_ulong addr;
1575

    
1576
        for(i = 0; i < L1_SIZE; i++) {
1577
            p = l1_virt_map[i];
1578
            if (p) {
1579
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1580
                for(j = 0; j < L2_SIZE; j++) {
1581
                    if (p->valid_tag == virt_valid_tag &&
1582
                        p->phys_addr >= start && p->phys_addr < end &&
1583
                        (p->prot & PROT_WRITE)) {
1584
                        if (addr < MMAP_AREA_END) {
1585
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1586
                                     p->prot & ~PROT_WRITE);
1587
                        }
1588
                    }
1589
                    addr += TARGET_PAGE_SIZE;
1590
                    p++;
1591
                }
1592
            }
1593
        }
1594
    }
1595
#endif
1596
}
1597

    
1598
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1599
{
1600
    ram_addr_t ram_addr;
1601

    
1602
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1603
        ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) + 
1604
            tlb_entry->addend - (unsigned long)phys_ram_base;
1605
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1606
            tlb_entry->address |= IO_MEM_NOTDIRTY;
1607
        }
1608
    }
1609
}
1610

    
1611
/* update the TLB according to the current state of the dirty bits */
1612
void cpu_tlb_update_dirty(CPUState *env)
1613
{
1614
    int i;
1615
    for(i = 0; i < CPU_TLB_SIZE; i++)
1616
        tlb_update_dirty(&env->tlb_write[0][i]);
1617
    for(i = 0; i < CPU_TLB_SIZE; i++)
1618
        tlb_update_dirty(&env->tlb_write[1][i]);
1619
}
1620

    
1621
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1622
                                  unsigned long start)
1623
{
1624
    unsigned long addr;
1625
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1626
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1627
        if (addr == start) {
1628
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1629
        }
1630
    }
1631
}
1632

    
1633
/* update the TLB corresponding to virtual page vaddr and phys addr
1634
   addr so that it is no longer dirty */
1635
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1636
{
1637
    CPUState *env = cpu_single_env;
1638
    int i;
1639

    
1640
    addr &= TARGET_PAGE_MASK;
1641
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1642
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1643
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1644
}
1645

    
1646
/* add a new TLB entry. At most one entry for a given virtual address
1647
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1648
   (can only happen in non SOFTMMU mode for I/O pages or pages
1649
   conflicting with the host address space). */
1650
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1651
                 target_phys_addr_t paddr, int prot, 
1652
                 int is_user, int is_softmmu)
1653
{
1654
    PhysPageDesc *p;
1655
    unsigned long pd;
1656
    unsigned int index;
1657
    target_ulong address;
1658
    target_phys_addr_t addend;
1659
    int ret;
1660

    
1661
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1662
    if (!p) {
1663
        pd = IO_MEM_UNASSIGNED;
1664
    } else {
1665
        pd = p->phys_offset;
1666
    }
1667
#if defined(DEBUG_TLB)
1668
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1669
           vaddr, paddr, prot, is_user, is_softmmu, pd);
1670
#endif
1671

    
1672
    ret = 0;
1673
#if !defined(CONFIG_SOFTMMU)
1674
    if (is_softmmu) 
1675
#endif
1676
    {
1677
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1678
            /* IO memory case */
1679
            address = vaddr | pd;
1680
            addend = paddr;
1681
        } else {
1682
            /* standard memory */
1683
            address = vaddr;
1684
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1685
        }
1686
        
1687
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1688
        addend -= vaddr;
1689
        if (prot & PAGE_READ) {
1690
            env->tlb_read[is_user][index].address = address;
1691
            env->tlb_read[is_user][index].addend = addend;
1692
        } else {
1693
            env->tlb_read[is_user][index].address = -1;
1694
            env->tlb_read[is_user][index].addend = -1;
1695
        }
1696
        if (prot & PAGE_WRITE) {
1697
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1698
                /* ROM: access is ignored (same as unassigned) */
1699
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1700
                env->tlb_write[is_user][index].addend = addend;
1701
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1702
                       !cpu_physical_memory_is_dirty(pd)) {
1703
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1704
                env->tlb_write[is_user][index].addend = addend;
1705
            } else {
1706
                env->tlb_write[is_user][index].address = address;
1707
                env->tlb_write[is_user][index].addend = addend;
1708
            }
1709
        } else {
1710
            env->tlb_write[is_user][index].address = -1;
1711
            env->tlb_write[is_user][index].addend = -1;
1712
        }
1713
    }
1714
#if !defined(CONFIG_SOFTMMU)
1715
    else {
1716
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1717
            /* IO access: no mapping is done as it will be handled by the
1718
               soft MMU */
1719
            if (!(env->hflags & HF_SOFTMMU_MASK))
1720
                ret = 2;
1721
        } else {
1722
            void *map_addr;
1723

    
1724
            if (vaddr >= MMAP_AREA_END) {
1725
                ret = 2;
1726
            } else {
1727
                if (prot & PROT_WRITE) {
1728
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1729
#if defined(TARGET_HAS_SMC) || 1
1730
                        first_tb ||
1731
#endif
1732
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1733
                         !cpu_physical_memory_is_dirty(pd))) {
1734
                        /* ROM: we do as if code was inside */
1735
                        /* if code is present, we only map as read only and save the
1736
                           original mapping */
1737
                        VirtPageDesc *vp;
1738
                        
1739
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1740
                        vp->phys_addr = pd;
1741
                        vp->prot = prot;
1742
                        vp->valid_tag = virt_valid_tag;
1743
                        prot &= ~PAGE_WRITE;
1744
                    }
1745
                }
1746
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1747
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1748
                if (map_addr == MAP_FAILED) {
1749
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1750
                              paddr, vaddr);
1751
                }
1752
            }
1753
        }
1754
    }
1755
#endif
1756
    return ret;
1757
}
1758

    
1759
/* called from signal handler: invalidate the code and unprotect the
1760
   page. Return TRUE if the fault was succesfully handled. */
1761
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1762
{
1763
#if !defined(CONFIG_SOFTMMU)
1764
    VirtPageDesc *vp;
1765

    
1766
#if defined(DEBUG_TLB)
1767
    printf("page_unprotect: addr=0x%08x\n", addr);
1768
#endif
1769
    addr &= TARGET_PAGE_MASK;
1770

    
1771
    /* if it is not mapped, no need to worry here */
1772
    if (addr >= MMAP_AREA_END)
1773
        return 0;
1774
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1775
    if (!vp)
1776
        return 0;
1777
    /* NOTE: in this case, validate_tag is _not_ tested as it
1778
       validates only the code TLB */
1779
    if (vp->valid_tag != virt_valid_tag)
1780
        return 0;
1781
    if (!(vp->prot & PAGE_WRITE))
1782
        return 0;
1783
#if defined(DEBUG_TLB)
1784
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1785
           addr, vp->phys_addr, vp->prot);
1786
#endif
1787
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1788
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1789
                  (unsigned long)addr, vp->prot);
1790
    /* set the dirty bit */
1791
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1792
    /* flush the code inside */
1793
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1794
    return 1;
1795
#else
1796
    return 0;
1797
#endif
1798
}
1799

    
1800
#else
1801

    
1802
void tlb_flush(CPUState *env, int flush_global)
1803
{
1804
}
1805

    
1806
void tlb_flush_page(CPUState *env, target_ulong addr)
1807
{
1808
}
1809

    
1810
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1811
                 target_phys_addr_t paddr, int prot, 
1812
                 int is_user, int is_softmmu)
1813
{
1814
    return 0;
1815
}
1816

    
1817
/* dump memory mappings */
1818
void page_dump(FILE *f)
1819
{
1820
    unsigned long start, end;
1821
    int i, j, prot, prot1;
1822
    PageDesc *p;
1823

    
1824
    fprintf(f, "%-8s %-8s %-8s %s\n",
1825
            "start", "end", "size", "prot");
1826
    start = -1;
1827
    end = -1;
1828
    prot = 0;
1829
    for(i = 0; i <= L1_SIZE; i++) {
1830
        if (i < L1_SIZE)
1831
            p = l1_map[i];
1832
        else
1833
            p = NULL;
1834
        for(j = 0;j < L2_SIZE; j++) {
1835
            if (!p)
1836
                prot1 = 0;
1837
            else
1838
                prot1 = p[j].flags;
1839
            if (prot1 != prot) {
1840
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1841
                if (start != -1) {
1842
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1843
                            start, end, end - start, 
1844
                            prot & PAGE_READ ? 'r' : '-',
1845
                            prot & PAGE_WRITE ? 'w' : '-',
1846
                            prot & PAGE_EXEC ? 'x' : '-');
1847
                }
1848
                if (prot1 != 0)
1849
                    start = end;
1850
                else
1851
                    start = -1;
1852
                prot = prot1;
1853
            }
1854
            if (!p)
1855
                break;
1856
        }
1857
    }
1858
}
1859

    
1860
int page_get_flags(unsigned long address)
1861
{
1862
    PageDesc *p;
1863

    
1864
    p = page_find(address >> TARGET_PAGE_BITS);
1865
    if (!p)
1866
        return 0;
1867
    return p->flags;
1868
}
1869

    
1870
/* modify the flags of a page and invalidate the code if
1871
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1872
   depending on PAGE_WRITE */
1873
void page_set_flags(unsigned long start, unsigned long end, int flags)
1874
{
1875
    PageDesc *p;
1876
    unsigned long addr;
1877

    
1878
    start = start & TARGET_PAGE_MASK;
1879
    end = TARGET_PAGE_ALIGN(end);
1880
    if (flags & PAGE_WRITE)
1881
        flags |= PAGE_WRITE_ORG;
1882
    spin_lock(&tb_lock);
1883
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1884
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1885
        /* if the write protection is set, then we invalidate the code
1886
           inside */
1887
        if (!(p->flags & PAGE_WRITE) && 
1888
            (flags & PAGE_WRITE) &&
1889
            p->first_tb) {
1890
            tb_invalidate_phys_page(addr, 0, NULL);
1891
        }
1892
        p->flags = flags;
1893
    }
1894
    spin_unlock(&tb_lock);
1895
}
1896

    
1897
/* called from signal handler: invalidate the code and unprotect the
1898
   page. Return TRUE if the fault was succesfully handled. */
1899
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1900
{
1901
    unsigned int page_index, prot, pindex;
1902
    PageDesc *p, *p1;
1903
    unsigned long host_start, host_end, addr;
1904

    
1905
    host_start = address & qemu_host_page_mask;
1906
    page_index = host_start >> TARGET_PAGE_BITS;
1907
    p1 = page_find(page_index);
1908
    if (!p1)
1909
        return 0;
1910
    host_end = host_start + qemu_host_page_size;
1911
    p = p1;
1912
    prot = 0;
1913
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1914
        prot |= p->flags;
1915
        p++;
1916
    }
1917
    /* if the page was really writable, then we change its
1918
       protection back to writable */
1919
    if (prot & PAGE_WRITE_ORG) {
1920
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1921
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1922
            mprotect((void *)host_start, qemu_host_page_size, 
1923
                     (prot & PAGE_BITS) | PAGE_WRITE);
1924
            p1[pindex].flags |= PAGE_WRITE;
1925
            /* and since the content will be modified, we must invalidate
1926
               the corresponding translated code. */
1927
            tb_invalidate_phys_page(address, pc, puc);
1928
#ifdef DEBUG_TB_CHECK
1929
            tb_invalidate_check(address);
1930
#endif
1931
            return 1;
1932
        }
1933
    }
1934
    return 0;
1935
}
1936

    
1937
/* call this function when system calls directly modify a memory area */
1938
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1939
{
1940
    unsigned long start, end, addr;
1941

    
1942
    start = (unsigned long)data;
1943
    end = start + data_size;
1944
    start &= TARGET_PAGE_MASK;
1945
    end = TARGET_PAGE_ALIGN(end);
1946
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1947
        page_unprotect(addr, 0, NULL);
1948
    }
1949
}
1950

    
1951
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1952
{
1953
}
1954
#endif /* defined(CONFIG_USER_ONLY) */
1955

    
1956
/* register physical memory. 'size' must be a multiple of the target
1957
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1958
   io memory page */
1959
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1960
                                  unsigned long size,
1961
                                  unsigned long phys_offset)
1962
{
1963
    target_phys_addr_t addr, end_addr;
1964
    PhysPageDesc *p;
1965

    
1966
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1967
    end_addr = start_addr + size;
1968
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1969
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1970
        p->phys_offset = phys_offset;
1971
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1972
            phys_offset += TARGET_PAGE_SIZE;
1973
    }
1974
}
1975

    
1976
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1977
{
1978
    return 0;
1979
}
1980

    
1981
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1982
{
1983
}
1984

    
1985
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1986
    unassigned_mem_readb,
1987
    unassigned_mem_readb,
1988
    unassigned_mem_readb,
1989
};
1990

    
1991
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1992
    unassigned_mem_writeb,
1993
    unassigned_mem_writeb,
1994
    unassigned_mem_writeb,
1995
};
1996

    
1997
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1998
{
1999
    unsigned long ram_addr;
2000
    int dirty_flags;
2001
    ram_addr = addr - (unsigned long)phys_ram_base;
2002
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2003
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2004
#if !defined(CONFIG_USER_ONLY)
2005
        tb_invalidate_phys_page_fast(ram_addr, 1);
2006
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2007
#endif
2008
    }
2009
    stb_p((uint8_t *)(long)addr, val);
2010
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2011
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2012
    /* we remove the notdirty callback only if the code has been
2013
       flushed */
2014
    if (dirty_flags == 0xff)
2015
        tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2016
}
2017

    
2018
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2019
{
2020
    unsigned long ram_addr;
2021
    int dirty_flags;
2022
    ram_addr = addr - (unsigned long)phys_ram_base;
2023
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2024
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2025
#if !defined(CONFIG_USER_ONLY)
2026
        tb_invalidate_phys_page_fast(ram_addr, 2);
2027
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2028
#endif
2029
    }
2030
    stw_p((uint8_t *)(long)addr, val);
2031
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2032
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2033
    /* we remove the notdirty callback only if the code has been
2034
       flushed */
2035
    if (dirty_flags == 0xff)
2036
        tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2037
}
2038

    
2039
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2040
{
2041
    unsigned long ram_addr;
2042
    int dirty_flags;
2043
    ram_addr = addr - (unsigned long)phys_ram_base;
2044
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2045
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2046
#if !defined(CONFIG_USER_ONLY)
2047
        tb_invalidate_phys_page_fast(ram_addr, 4);
2048
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2049
#endif
2050
    }
2051
    stl_p((uint8_t *)(long)addr, val);
2052
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2053
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2054
    /* we remove the notdirty callback only if the code has been
2055
       flushed */
2056
    if (dirty_flags == 0xff)
2057
        tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2058
}
2059

    
2060
static CPUReadMemoryFunc *error_mem_read[3] = {
2061
    NULL, /* never used */
2062
    NULL, /* never used */
2063
    NULL, /* never used */
2064
};
2065

    
2066
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2067
    notdirty_mem_writeb,
2068
    notdirty_mem_writew,
2069
    notdirty_mem_writel,
2070
};
2071

    
2072
static void io_mem_init(void)
2073
{
2074
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2075
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2076
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2077
    io_mem_nb = 5;
2078

    
2079
    /* alloc dirty bits array */
2080
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2081
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2082
}
2083

    
2084
/* mem_read and mem_write are arrays of functions containing the
2085
   function to access byte (index 0), word (index 1) and dword (index
2086
   2). All functions must be supplied. If io_index is non zero, the
2087
   corresponding io zone is modified. If it is zero, a new io zone is
2088
   allocated. The return value can be used with
2089
   cpu_register_physical_memory(). (-1) is returned if error. */
2090
int cpu_register_io_memory(int io_index,
2091
                           CPUReadMemoryFunc **mem_read,
2092
                           CPUWriteMemoryFunc **mem_write,
2093
                           void *opaque)
2094
{
2095
    int i;
2096

    
2097
    if (io_index <= 0) {
2098
        if (io_index >= IO_MEM_NB_ENTRIES)
2099
            return -1;
2100
        io_index = io_mem_nb++;
2101
    } else {
2102
        if (io_index >= IO_MEM_NB_ENTRIES)
2103
            return -1;
2104
    }
2105
    
2106
    for(i = 0;i < 3; i++) {
2107
        io_mem_read[io_index][i] = mem_read[i];
2108
        io_mem_write[io_index][i] = mem_write[i];
2109
    }
2110
    io_mem_opaque[io_index] = opaque;
2111
    return io_index << IO_MEM_SHIFT;
2112
}
2113

    
2114
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2115
{
2116
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2117
}
2118

    
2119
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2120
{
2121
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2122
}
2123

    
2124
/* physical memory access (slow version, mainly for debug) */
2125
#if defined(CONFIG_USER_ONLY)
2126
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2127
                            int len, int is_write)
2128
{
2129
    int l, flags;
2130
    target_ulong page;
2131

    
2132
    while (len > 0) {
2133
        page = addr & TARGET_PAGE_MASK;
2134
        l = (page + TARGET_PAGE_SIZE) - addr;
2135
        if (l > len)
2136
            l = len;
2137
        flags = page_get_flags(page);
2138
        if (!(flags & PAGE_VALID))
2139
            return;
2140
        if (is_write) {
2141
            if (!(flags & PAGE_WRITE))
2142
                return;
2143
            memcpy((uint8_t *)addr, buf, len);
2144
        } else {
2145
            if (!(flags & PAGE_READ))
2146
                return;
2147
            memcpy(buf, (uint8_t *)addr, len);
2148
        }
2149
        len -= l;
2150
        buf += l;
2151
        addr += l;
2152
    }
2153
}
2154

    
2155
#else
2156
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2157
                            int len, int is_write)
2158
{
2159
    int l, io_index;
2160
    uint8_t *ptr;
2161
    uint32_t val;
2162
    target_phys_addr_t page;
2163
    unsigned long pd;
2164
    PhysPageDesc *p;
2165
    
2166
    while (len > 0) {
2167
        page = addr & TARGET_PAGE_MASK;
2168
        l = (page + TARGET_PAGE_SIZE) - addr;
2169
        if (l > len)
2170
            l = len;
2171
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2172
        if (!p) {
2173
            pd = IO_MEM_UNASSIGNED;
2174
        } else {
2175
            pd = p->phys_offset;
2176
        }
2177
        
2178
        if (is_write) {
2179
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2180
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2181
                if (l >= 4 && ((addr & 3) == 0)) {
2182
                    /* 32 bit write access */
2183
                    val = ldl_p(buf);
2184
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2185
                    l = 4;
2186
                } else if (l >= 2 && ((addr & 1) == 0)) {
2187
                    /* 16 bit write access */
2188
                    val = lduw_p(buf);
2189
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2190
                    l = 2;
2191
                } else {
2192
                    /* 8 bit write access */
2193
                    val = ldub_p(buf);
2194
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2195
                    l = 1;
2196
                }
2197
            } else {
2198
                unsigned long addr1;
2199
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2200
                /* RAM case */
2201
                ptr = phys_ram_base + addr1;
2202
                memcpy(ptr, buf, l);
2203
                if (!cpu_physical_memory_is_dirty(addr1)) {
2204
                    /* invalidate code */
2205
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2206
                    /* set dirty bit */
2207
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2208
                        (0xff & ~CODE_DIRTY_FLAG);
2209
                }
2210
            }
2211
        } else {
2212
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2213
                /* I/O case */
2214
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2215
                if (l >= 4 && ((addr & 3) == 0)) {
2216
                    /* 32 bit read access */
2217
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2218
                    stl_p(buf, val);
2219
                    l = 4;
2220
                } else if (l >= 2 && ((addr & 1) == 0)) {
2221
                    /* 16 bit read access */
2222
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2223
                    stw_p(buf, val);
2224
                    l = 2;
2225
                } else {
2226
                    /* 8 bit read access */
2227
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2228
                    stb_p(buf, val);
2229
                    l = 1;
2230
                }
2231
            } else {
2232
                /* RAM case */
2233
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2234
                    (addr & ~TARGET_PAGE_MASK);
2235
                memcpy(buf, ptr, l);
2236
            }
2237
        }
2238
        len -= l;
2239
        buf += l;
2240
        addr += l;
2241
    }
2242
}
2243

    
2244
/* warning: addr must be aligned */
2245
uint32_t ldl_phys(target_phys_addr_t addr)
2246
{
2247
    int io_index;
2248
    uint8_t *ptr;
2249
    uint32_t val;
2250
    unsigned long pd;
2251
    PhysPageDesc *p;
2252

    
2253
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2254
    if (!p) {
2255
        pd = IO_MEM_UNASSIGNED;
2256
    } else {
2257
        pd = p->phys_offset;
2258
    }
2259
        
2260
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2261
        /* I/O case */
2262
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2263
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2264
    } else {
2265
        /* RAM case */
2266
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2267
            (addr & ~TARGET_PAGE_MASK);
2268
        val = ldl_p(ptr);
2269
    }
2270
    return val;
2271
}
2272

    
2273
/* XXX: optimize */
2274
uint32_t ldub_phys(target_phys_addr_t addr)
2275
{
2276
    uint8_t val;
2277
    cpu_physical_memory_read(addr, &val, 1);
2278
    return val;
2279
}
2280

    
2281
/* XXX: optimize */
2282
uint32_t lduw_phys(target_phys_addr_t addr)
2283
{
2284
    uint16_t val;
2285
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2286
    return tswap16(val);
2287
}
2288

    
2289
/* XXX: optimize */
2290
uint64_t ldq_phys(target_phys_addr_t addr)
2291
{
2292
    uint64_t val;
2293
    cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2294
    return tswap64(val);
2295
}
2296

    
2297
/* warning: addr must be aligned. The ram page is not masked as dirty
2298
   and the code inside is not invalidated. It is useful if the dirty
2299
   bits are used to track modified PTEs */
2300
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2301
{
2302
    int io_index;
2303
    uint8_t *ptr;
2304
    unsigned long pd;
2305
    PhysPageDesc *p;
2306

    
2307
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2308
    if (!p) {
2309
        pd = IO_MEM_UNASSIGNED;
2310
    } else {
2311
        pd = p->phys_offset;
2312
    }
2313
        
2314
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2315
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2316
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2317
    } else {
2318
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2319
            (addr & ~TARGET_PAGE_MASK);
2320
        stl_p(ptr, val);
2321
    }
2322
}
2323

    
2324
/* warning: addr must be aligned */
2325
void stl_phys(target_phys_addr_t addr, uint32_t val)
2326
{
2327
    int io_index;
2328
    uint8_t *ptr;
2329
    unsigned long pd;
2330
    PhysPageDesc *p;
2331

    
2332
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2333
    if (!p) {
2334
        pd = IO_MEM_UNASSIGNED;
2335
    } else {
2336
        pd = p->phys_offset;
2337
    }
2338
        
2339
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2340
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2341
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2342
    } else {
2343
        unsigned long addr1;
2344
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2345
        /* RAM case */
2346
        ptr = phys_ram_base + addr1;
2347
        stl_p(ptr, val);
2348
        if (!cpu_physical_memory_is_dirty(addr1)) {
2349
            /* invalidate code */
2350
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2351
            /* set dirty bit */
2352
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2353
                (0xff & ~CODE_DIRTY_FLAG);
2354
        }
2355
    }
2356
}
2357

    
2358
/* XXX: optimize */
2359
void stb_phys(target_phys_addr_t addr, uint32_t val)
2360
{
2361
    uint8_t v = val;
2362
    cpu_physical_memory_write(addr, &v, 1);
2363
}
2364

    
2365
/* XXX: optimize */
2366
void stw_phys(target_phys_addr_t addr, uint32_t val)
2367
{
2368
    uint16_t v = tswap16(val);
2369
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2370
}
2371

    
2372
/* XXX: optimize */
2373
void stq_phys(target_phys_addr_t addr, uint64_t val)
2374
{
2375
    val = tswap64(val);
2376
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2377
}
2378

    
2379
#endif
2380

    
2381
/* virtual memory access for debug */
2382
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2383
                        uint8_t *buf, int len, int is_write)
2384
{
2385
    int l;
2386
    target_ulong page, phys_addr;
2387

    
2388
    while (len > 0) {
2389
        page = addr & TARGET_PAGE_MASK;
2390
        phys_addr = cpu_get_phys_page_debug(env, page);
2391
        /* if no physical page mapped, return an error */
2392
        if (phys_addr == -1)
2393
            return -1;
2394
        l = (page + TARGET_PAGE_SIZE) - addr;
2395
        if (l > len)
2396
            l = len;
2397
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2398
                               buf, l, is_write);
2399
        len -= l;
2400
        buf += l;
2401
        addr += l;
2402
    }
2403
    return 0;
2404
}
2405

    
2406
void dump_exec_info(FILE *f,
2407
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2408
{
2409
    int i, target_code_size, max_target_code_size;
2410
    int direct_jmp_count, direct_jmp2_count, cross_page;
2411
    TranslationBlock *tb;
2412
    
2413
    target_code_size = 0;
2414
    max_target_code_size = 0;
2415
    cross_page = 0;
2416
    direct_jmp_count = 0;
2417
    direct_jmp2_count = 0;
2418
    for(i = 0; i < nb_tbs; i++) {
2419
        tb = &tbs[i];
2420
        target_code_size += tb->size;
2421
        if (tb->size > max_target_code_size)
2422
            max_target_code_size = tb->size;
2423
        if (tb->page_addr[1] != -1)
2424
            cross_page++;
2425
        if (tb->tb_next_offset[0] != 0xffff) {
2426
            direct_jmp_count++;
2427
            if (tb->tb_next_offset[1] != 0xffff) {
2428
                direct_jmp2_count++;
2429
            }
2430
        }
2431
    }
2432
    /* XXX: avoid using doubles ? */
2433
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2434
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2435
                nb_tbs ? target_code_size / nb_tbs : 0,
2436
                max_target_code_size);
2437
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2438
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2439
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2440
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2441
            cross_page, 
2442
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2443
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2444
                direct_jmp_count, 
2445
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2446
                direct_jmp2_count,
2447
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2448
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2449
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2450
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2451
}
2452

    
2453
#if !defined(CONFIG_USER_ONLY) 
2454

    
2455
#define MMUSUFFIX _cmmu
2456
#define GETPC() NULL
2457
#define env cpu_single_env
2458
#define SOFTMMU_CODE_ACCESS
2459

    
2460
#define SHIFT 0
2461
#include "softmmu_template.h"
2462

    
2463
#define SHIFT 1
2464
#include "softmmu_template.h"
2465

    
2466
#define SHIFT 2
2467
#include "softmmu_template.h"
2468

    
2469
#define SHIFT 3
2470
#include "softmmu_template.h"
2471

    
2472
#undef env
2473

    
2474
#endif