Statistics
| Branch: | Revision:

root / exec.c @ afc7df11

History | View | Annotate | Download (68.6 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57
int nb_tbs;
58
/* any access to the tbs or the page table must use this lock */
59
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
60

    
61
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62
uint8_t *code_gen_ptr;
63

    
64
int phys_ram_size;
65
int phys_ram_fd;
66
uint8_t *phys_ram_base;
67
uint8_t *phys_ram_dirty;
68

    
69
typedef struct PageDesc {
70
    /* list of TBs intersecting this ram page */
71
    TranslationBlock *first_tb;
72
    /* in order to optimize self modifying code, we count the number
73
       of lookups we do to a given page to use a bitmap */
74
    unsigned int code_write_count;
75
    uint8_t *code_bitmap;
76
#if defined(CONFIG_USER_ONLY)
77
    unsigned long flags;
78
#endif
79
} PageDesc;
80

    
81
typedef struct PhysPageDesc {
82
    /* offset in host memory of the page + io_index in the low 12 bits */
83
    unsigned long phys_offset;
84
} PhysPageDesc;
85

    
86
typedef struct VirtPageDesc {
87
    /* physical address of code page. It is valid only if 'valid_tag'
88
       matches 'virt_valid_tag' */ 
89
    target_ulong phys_addr; 
90
    unsigned int valid_tag;
91
#if !defined(CONFIG_SOFTMMU)
92
    /* original page access rights. It is valid only if 'valid_tag'
93
       matches 'virt_valid_tag' */
94
    unsigned int prot;
95
#endif
96
} VirtPageDesc;
97

    
98
#define L2_BITS 10
99
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100

    
101
#define L1_SIZE (1 << L1_BITS)
102
#define L2_SIZE (1 << L2_BITS)
103

    
104
static void io_mem_init(void);
105

    
106
unsigned long qemu_real_host_page_size;
107
unsigned long qemu_host_page_bits;
108
unsigned long qemu_host_page_size;
109
unsigned long qemu_host_page_mask;
110

    
111
/* XXX: for system emulation, it could just be an array */
112
static PageDesc *l1_map[L1_SIZE];
113
PhysPageDesc **l1_phys_map;
114

    
115
#if !defined(CONFIG_USER_ONLY)
116
static VirtPageDesc *l1_virt_map[L1_SIZE];
117
static unsigned int virt_valid_tag;
118
#endif
119

    
120
/* io memory support */
121
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
123
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
124
static int io_mem_nb;
125

    
126
/* log support */
127
char *logfilename = "/tmp/qemu.log";
128
FILE *logfile;
129
int loglevel;
130

    
131
/* statistics */
132
static int tlb_flush_count;
133
static int tb_flush_count;
134
static int tb_phys_invalidate_count;
135

    
136
static void page_init(void)
137
{
138
    /* NOTE: we can always suppose that qemu_host_page_size >=
139
       TARGET_PAGE_SIZE */
140
#ifdef _WIN32
141
    {
142
        SYSTEM_INFO system_info;
143
        DWORD old_protect;
144
        
145
        GetSystemInfo(&system_info);
146
        qemu_real_host_page_size = system_info.dwPageSize;
147
        
148
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
149
                       PAGE_EXECUTE_READWRITE, &old_protect);
150
    }
151
#else
152
    qemu_real_host_page_size = getpagesize();
153
    {
154
        unsigned long start, end;
155

    
156
        start = (unsigned long)code_gen_buffer;
157
        start &= ~(qemu_real_host_page_size - 1);
158
        
159
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
160
        end += qemu_real_host_page_size - 1;
161
        end &= ~(qemu_real_host_page_size - 1);
162
        
163
        mprotect((void *)start, end - start, 
164
                 PROT_READ | PROT_WRITE | PROT_EXEC);
165
    }
166
#endif
167

    
168
    if (qemu_host_page_size == 0)
169
        qemu_host_page_size = qemu_real_host_page_size;
170
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
171
        qemu_host_page_size = TARGET_PAGE_SIZE;
172
    qemu_host_page_bits = 0;
173
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
174
        qemu_host_page_bits++;
175
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
176
#if !defined(CONFIG_USER_ONLY)
177
    virt_valid_tag = 1;
178
#endif
179
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(PhysPageDesc *));
180
    memset(l1_phys_map, 0, L1_SIZE * sizeof(PhysPageDesc *));
181
}
182

    
183
static inline PageDesc *page_find_alloc(unsigned int index)
184
{
185
    PageDesc **lp, *p;
186

    
187
    lp = &l1_map[index >> L2_BITS];
188
    p = *lp;
189
    if (!p) {
190
        /* allocate if not found */
191
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
192
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
193
        *lp = p;
194
    }
195
    return p + (index & (L2_SIZE - 1));
196
}
197

    
198
static inline PageDesc *page_find(unsigned int index)
199
{
200
    PageDesc *p;
201

    
202
    p = l1_map[index >> L2_BITS];
203
    if (!p)
204
        return 0;
205
    return p + (index & (L2_SIZE - 1));
206
}
207

    
208
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
209
{
210
    PhysPageDesc **lp, *p;
211

    
212
    lp = &l1_phys_map[index >> L2_BITS];
213
    p = *lp;
214
    if (!p) {
215
        /* allocate if not found */
216
        p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
217
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
218
        *lp = p;
219
    }
220
    return p + (index & (L2_SIZE - 1));
221
}
222

    
223
static inline PhysPageDesc *phys_page_find(unsigned int index)
224
{
225
    PhysPageDesc *p;
226

    
227
    p = l1_phys_map[index >> L2_BITS];
228
    if (!p)
229
        return 0;
230
    return p + (index & (L2_SIZE - 1));
231
}
232

    
233
#if !defined(CONFIG_USER_ONLY)
234
static void tlb_protect_code(CPUState *env, target_ulong addr);
235
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
236

    
237
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
238
{
239
    VirtPageDesc **lp, *p;
240

    
241
    /* XXX: should not truncate for 64 bit addresses */
242
#if TARGET_LONG_BITS > 32
243
    index &= (L1_SIZE - 1);
244
#endif
245
    lp = &l1_virt_map[index >> L2_BITS];
246
    p = *lp;
247
    if (!p) {
248
        /* allocate if not found */
249
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
250
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
251
        *lp = p;
252
    }
253
    return p + (index & (L2_SIZE - 1));
254
}
255

    
256
static inline VirtPageDesc *virt_page_find(unsigned int index)
257
{
258
    VirtPageDesc *p;
259

    
260
    p = l1_virt_map[index >> L2_BITS];
261
    if (!p)
262
        return 0;
263
    return p + (index & (L2_SIZE - 1));
264
}
265

    
266
static void virt_page_flush(void)
267
{
268
    int i, j;
269
    VirtPageDesc *p;
270
    
271
    virt_valid_tag++;
272

    
273
    if (virt_valid_tag == 0) {
274
        virt_valid_tag = 1;
275
        for(i = 0; i < L1_SIZE; i++) {
276
            p = l1_virt_map[i];
277
            if (p) {
278
                for(j = 0; j < L2_SIZE; j++)
279
                    p[j].valid_tag = 0;
280
            }
281
        }
282
    }
283
}
284
#else
285
static void virt_page_flush(void)
286
{
287
}
288
#endif
289

    
290
void cpu_exec_init(void)
291
{
292
    if (!code_gen_ptr) {
293
        code_gen_ptr = code_gen_buffer;
294
        page_init();
295
        io_mem_init();
296
    }
297
}
298

    
299
static inline void invalidate_page_bitmap(PageDesc *p)
300
{
301
    if (p->code_bitmap) {
302
        qemu_free(p->code_bitmap);
303
        p->code_bitmap = NULL;
304
    }
305
    p->code_write_count = 0;
306
}
307

    
308
/* set to NULL all the 'first_tb' fields in all PageDescs */
309
static void page_flush_tb(void)
310
{
311
    int i, j;
312
    PageDesc *p;
313

    
314
    for(i = 0; i < L1_SIZE; i++) {
315
        p = l1_map[i];
316
        if (p) {
317
            for(j = 0; j < L2_SIZE; j++) {
318
                p->first_tb = NULL;
319
                invalidate_page_bitmap(p);
320
                p++;
321
            }
322
        }
323
    }
324
}
325

    
326
/* flush all the translation blocks */
327
/* XXX: tb_flush is currently not thread safe */
328
void tb_flush(CPUState *env)
329
{
330
#if defined(DEBUG_FLUSH)
331
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
332
           code_gen_ptr - code_gen_buffer, 
333
           nb_tbs, 
334
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
335
#endif
336
    nb_tbs = 0;
337
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
338
    virt_page_flush();
339

    
340
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
341
    page_flush_tb();
342

    
343
    code_gen_ptr = code_gen_buffer;
344
    /* XXX: flush processor icache at this point if cache flush is
345
       expensive */
346
    tb_flush_count++;
347
}
348

    
349
#ifdef DEBUG_TB_CHECK
350

    
351
static void tb_invalidate_check(unsigned long address)
352
{
353
    TranslationBlock *tb;
354
    int i;
355
    address &= TARGET_PAGE_MASK;
356
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
357
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
358
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
359
                  address >= tb->pc + tb->size)) {
360
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
361
                       address, tb->pc, tb->size);
362
            }
363
        }
364
    }
365
}
366

    
367
/* verify that all the pages have correct rights for code */
368
static void tb_page_check(void)
369
{
370
    TranslationBlock *tb;
371
    int i, flags1, flags2;
372
    
373
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
374
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
375
            flags1 = page_get_flags(tb->pc);
376
            flags2 = page_get_flags(tb->pc + tb->size - 1);
377
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
378
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
379
                       tb->pc, tb->size, flags1, flags2);
380
            }
381
        }
382
    }
383
}
384

    
385
void tb_jmp_check(TranslationBlock *tb)
386
{
387
    TranslationBlock *tb1;
388
    unsigned int n1;
389

    
390
    /* suppress any remaining jumps to this TB */
391
    tb1 = tb->jmp_first;
392
    for(;;) {
393
        n1 = (long)tb1 & 3;
394
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
395
        if (n1 == 2)
396
            break;
397
        tb1 = tb1->jmp_next[n1];
398
    }
399
    /* check end of list */
400
    if (tb1 != tb) {
401
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
402
    }
403
}
404

    
405
#endif
406

    
407
/* invalidate one TB */
408
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
409
                             int next_offset)
410
{
411
    TranslationBlock *tb1;
412
    for(;;) {
413
        tb1 = *ptb;
414
        if (tb1 == tb) {
415
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
416
            break;
417
        }
418
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
419
    }
420
}
421

    
422
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
423
{
424
    TranslationBlock *tb1;
425
    unsigned int n1;
426

    
427
    for(;;) {
428
        tb1 = *ptb;
429
        n1 = (long)tb1 & 3;
430
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
431
        if (tb1 == tb) {
432
            *ptb = tb1->page_next[n1];
433
            break;
434
        }
435
        ptb = &tb1->page_next[n1];
436
    }
437
}
438

    
439
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
440
{
441
    TranslationBlock *tb1, **ptb;
442
    unsigned int n1;
443

    
444
    ptb = &tb->jmp_next[n];
445
    tb1 = *ptb;
446
    if (tb1) {
447
        /* find tb(n) in circular list */
448
        for(;;) {
449
            tb1 = *ptb;
450
            n1 = (long)tb1 & 3;
451
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
452
            if (n1 == n && tb1 == tb)
453
                break;
454
            if (n1 == 2) {
455
                ptb = &tb1->jmp_first;
456
            } else {
457
                ptb = &tb1->jmp_next[n1];
458
            }
459
        }
460
        /* now we can suppress tb(n) from the list */
461
        *ptb = tb->jmp_next[n];
462

    
463
        tb->jmp_next[n] = NULL;
464
    }
465
}
466

    
467
/* reset the jump entry 'n' of a TB so that it is not chained to
468
   another TB */
469
static inline void tb_reset_jump(TranslationBlock *tb, int n)
470
{
471
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
472
}
473

    
474
static inline void tb_invalidate(TranslationBlock *tb)
475
{
476
    unsigned int h, n1;
477
    TranslationBlock *tb1, *tb2, **ptb;
478
    
479
    tb_invalidated_flag = 1;
480

    
481
    /* remove the TB from the hash list */
482
    h = tb_hash_func(tb->pc);
483
    ptb = &tb_hash[h];
484
    for(;;) {
485
        tb1 = *ptb;
486
        /* NOTE: the TB is not necessarily linked in the hash. It
487
           indicates that it is not currently used */
488
        if (tb1 == NULL)
489
            return;
490
        if (tb1 == tb) {
491
            *ptb = tb1->hash_next;
492
            break;
493
        }
494
        ptb = &tb1->hash_next;
495
    }
496

    
497
    /* suppress this TB from the two jump lists */
498
    tb_jmp_remove(tb, 0);
499
    tb_jmp_remove(tb, 1);
500

    
501
    /* suppress any remaining jumps to this TB */
502
    tb1 = tb->jmp_first;
503
    for(;;) {
504
        n1 = (long)tb1 & 3;
505
        if (n1 == 2)
506
            break;
507
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
508
        tb2 = tb1->jmp_next[n1];
509
        tb_reset_jump(tb1, n1);
510
        tb1->jmp_next[n1] = NULL;
511
        tb1 = tb2;
512
    }
513
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
514
}
515

    
516
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
517
{
518
    PageDesc *p;
519
    unsigned int h;
520
    target_ulong phys_pc;
521
    
522
    /* remove the TB from the hash list */
523
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
524
    h = tb_phys_hash_func(phys_pc);
525
    tb_remove(&tb_phys_hash[h], tb, 
526
              offsetof(TranslationBlock, phys_hash_next));
527

    
528
    /* remove the TB from the page list */
529
    if (tb->page_addr[0] != page_addr) {
530
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
531
        tb_page_remove(&p->first_tb, tb);
532
        invalidate_page_bitmap(p);
533
    }
534
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
535
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
536
        tb_page_remove(&p->first_tb, tb);
537
        invalidate_page_bitmap(p);
538
    }
539

    
540
    tb_invalidate(tb);
541
    tb_phys_invalidate_count++;
542
}
543

    
544
static inline void set_bits(uint8_t *tab, int start, int len)
545
{
546
    int end, mask, end1;
547

    
548
    end = start + len;
549
    tab += start >> 3;
550
    mask = 0xff << (start & 7);
551
    if ((start & ~7) == (end & ~7)) {
552
        if (start < end) {
553
            mask &= ~(0xff << (end & 7));
554
            *tab |= mask;
555
        }
556
    } else {
557
        *tab++ |= mask;
558
        start = (start + 8) & ~7;
559
        end1 = end & ~7;
560
        while (start < end1) {
561
            *tab++ = 0xff;
562
            start += 8;
563
        }
564
        if (start < end) {
565
            mask = ~(0xff << (end & 7));
566
            *tab |= mask;
567
        }
568
    }
569
}
570

    
571
static void build_page_bitmap(PageDesc *p)
572
{
573
    int n, tb_start, tb_end;
574
    TranslationBlock *tb;
575
    
576
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
577
    if (!p->code_bitmap)
578
        return;
579
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
580

    
581
    tb = p->first_tb;
582
    while (tb != NULL) {
583
        n = (long)tb & 3;
584
        tb = (TranslationBlock *)((long)tb & ~3);
585
        /* NOTE: this is subtle as a TB may span two physical pages */
586
        if (n == 0) {
587
            /* NOTE: tb_end may be after the end of the page, but
588
               it is not a problem */
589
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
590
            tb_end = tb_start + tb->size;
591
            if (tb_end > TARGET_PAGE_SIZE)
592
                tb_end = TARGET_PAGE_SIZE;
593
        } else {
594
            tb_start = 0;
595
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
596
        }
597
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
598
        tb = tb->page_next[n];
599
    }
600
}
601

    
602
#ifdef TARGET_HAS_PRECISE_SMC
603

    
604
static void tb_gen_code(CPUState *env, 
605
                        target_ulong pc, target_ulong cs_base, int flags,
606
                        int cflags)
607
{
608
    TranslationBlock *tb;
609
    uint8_t *tc_ptr;
610
    target_ulong phys_pc, phys_page2, virt_page2;
611
    int code_gen_size;
612

    
613
    phys_pc = get_phys_addr_code(env, pc);
614
    tb = tb_alloc(pc);
615
    if (!tb) {
616
        /* flush must be done */
617
        tb_flush(env);
618
        /* cannot fail at this point */
619
        tb = tb_alloc(pc);
620
    }
621
    tc_ptr = code_gen_ptr;
622
    tb->tc_ptr = tc_ptr;
623
    tb->cs_base = cs_base;
624
    tb->flags = flags;
625
    tb->cflags = cflags;
626
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
627
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
628
    
629
    /* check next page if needed */
630
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
631
    phys_page2 = -1;
632
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
633
        phys_page2 = get_phys_addr_code(env, virt_page2);
634
    }
635
    tb_link_phys(tb, phys_pc, phys_page2);
636
}
637
#endif
638
    
639
/* invalidate all TBs which intersect with the target physical page
640
   starting in range [start;end[. NOTE: start and end must refer to
641
   the same physical page. 'is_cpu_write_access' should be true if called
642
   from a real cpu write access: the virtual CPU will exit the current
643
   TB if code is modified inside this TB. */
644
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
645
                                   int is_cpu_write_access)
646
{
647
    int n, current_tb_modified, current_tb_not_found, current_flags;
648
    CPUState *env = cpu_single_env;
649
    PageDesc *p;
650
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
651
    target_ulong tb_start, tb_end;
652
    target_ulong current_pc, current_cs_base;
653

    
654
    p = page_find(start >> TARGET_PAGE_BITS);
655
    if (!p) 
656
        return;
657
    if (!p->code_bitmap && 
658
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
659
        is_cpu_write_access) {
660
        /* build code bitmap */
661
        build_page_bitmap(p);
662
    }
663

    
664
    /* we remove all the TBs in the range [start, end[ */
665
    /* XXX: see if in some cases it could be faster to invalidate all the code */
666
    current_tb_not_found = is_cpu_write_access;
667
    current_tb_modified = 0;
668
    current_tb = NULL; /* avoid warning */
669
    current_pc = 0; /* avoid warning */
670
    current_cs_base = 0; /* avoid warning */
671
    current_flags = 0; /* avoid warning */
672
    tb = p->first_tb;
673
    while (tb != NULL) {
674
        n = (long)tb & 3;
675
        tb = (TranslationBlock *)((long)tb & ~3);
676
        tb_next = tb->page_next[n];
677
        /* NOTE: this is subtle as a TB may span two physical pages */
678
        if (n == 0) {
679
            /* NOTE: tb_end may be after the end of the page, but
680
               it is not a problem */
681
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
682
            tb_end = tb_start + tb->size;
683
        } else {
684
            tb_start = tb->page_addr[1];
685
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
686
        }
687
        if (!(tb_end <= start || tb_start >= end)) {
688
#ifdef TARGET_HAS_PRECISE_SMC
689
            if (current_tb_not_found) {
690
                current_tb_not_found = 0;
691
                current_tb = NULL;
692
                if (env->mem_write_pc) {
693
                    /* now we have a real cpu fault */
694
                    current_tb = tb_find_pc(env->mem_write_pc);
695
                }
696
            }
697
            if (current_tb == tb &&
698
                !(current_tb->cflags & CF_SINGLE_INSN)) {
699
                /* If we are modifying the current TB, we must stop
700
                its execution. We could be more precise by checking
701
                that the modification is after the current PC, but it
702
                would require a specialized function to partially
703
                restore the CPU state */
704
                
705
                current_tb_modified = 1;
706
                cpu_restore_state(current_tb, env, 
707
                                  env->mem_write_pc, NULL);
708
#if defined(TARGET_I386)
709
                current_flags = env->hflags;
710
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
711
                current_cs_base = (target_ulong)env->segs[R_CS].base;
712
                current_pc = current_cs_base + env->eip;
713
#else
714
#error unsupported CPU
715
#endif
716
            }
717
#endif /* TARGET_HAS_PRECISE_SMC */
718
            saved_tb = env->current_tb;
719
            env->current_tb = NULL;
720
            tb_phys_invalidate(tb, -1);
721
            env->current_tb = saved_tb;
722
            if (env->interrupt_request && env->current_tb)
723
                cpu_interrupt(env, env->interrupt_request);
724
        }
725
        tb = tb_next;
726
    }
727
#if !defined(CONFIG_USER_ONLY)
728
    /* if no code remaining, no need to continue to use slow writes */
729
    if (!p->first_tb) {
730
        invalidate_page_bitmap(p);
731
        if (is_cpu_write_access) {
732
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
733
        }
734
    }
735
#endif
736
#ifdef TARGET_HAS_PRECISE_SMC
737
    if (current_tb_modified) {
738
        /* we generate a block containing just the instruction
739
           modifying the memory. It will ensure that it cannot modify
740
           itself */
741
        env->current_tb = NULL;
742
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
743
                    CF_SINGLE_INSN);
744
        cpu_resume_from_signal(env, NULL);
745
    }
746
#endif
747
}
748

    
749
/* len must be <= 8 and start must be a multiple of len */
750
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
751
{
752
    PageDesc *p;
753
    int offset, b;
754
#if 0
755
    if (1) {
756
        if (loglevel) {
757
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
758
                   cpu_single_env->mem_write_vaddr, len, 
759
                   cpu_single_env->eip, 
760
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
761
        }
762
    }
763
#endif
764
    p = page_find(start >> TARGET_PAGE_BITS);
765
    if (!p) 
766
        return;
767
    if (p->code_bitmap) {
768
        offset = start & ~TARGET_PAGE_MASK;
769
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
770
        if (b & ((1 << len) - 1))
771
            goto do_invalidate;
772
    } else {
773
    do_invalidate:
774
        tb_invalidate_phys_page_range(start, start + len, 1);
775
    }
776
}
777

    
778
#if !defined(CONFIG_SOFTMMU)
779
static void tb_invalidate_phys_page(target_ulong addr, 
780
                                    unsigned long pc, void *puc)
781
{
782
    int n, current_flags, current_tb_modified;
783
    target_ulong current_pc, current_cs_base;
784
    PageDesc *p;
785
    TranslationBlock *tb, *current_tb;
786
#ifdef TARGET_HAS_PRECISE_SMC
787
    CPUState *env = cpu_single_env;
788
#endif
789

    
790
    addr &= TARGET_PAGE_MASK;
791
    p = page_find(addr >> TARGET_PAGE_BITS);
792
    if (!p) 
793
        return;
794
    tb = p->first_tb;
795
    current_tb_modified = 0;
796
    current_tb = NULL;
797
    current_pc = 0; /* avoid warning */
798
    current_cs_base = 0; /* avoid warning */
799
    current_flags = 0; /* avoid warning */
800
#ifdef TARGET_HAS_PRECISE_SMC
801
    if (tb && pc != 0) {
802
        current_tb = tb_find_pc(pc);
803
    }
804
#endif
805
    while (tb != NULL) {
806
        n = (long)tb & 3;
807
        tb = (TranslationBlock *)((long)tb & ~3);
808
#ifdef TARGET_HAS_PRECISE_SMC
809
        if (current_tb == tb &&
810
            !(current_tb->cflags & CF_SINGLE_INSN)) {
811
                /* If we are modifying the current TB, we must stop
812
                   its execution. We could be more precise by checking
813
                   that the modification is after the current PC, but it
814
                   would require a specialized function to partially
815
                   restore the CPU state */
816
            
817
            current_tb_modified = 1;
818
            cpu_restore_state(current_tb, env, pc, puc);
819
#if defined(TARGET_I386)
820
            current_flags = env->hflags;
821
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
822
            current_cs_base = (target_ulong)env->segs[R_CS].base;
823
            current_pc = current_cs_base + env->eip;
824
#else
825
#error unsupported CPU
826
#endif
827
        }
828
#endif /* TARGET_HAS_PRECISE_SMC */
829
        tb_phys_invalidate(tb, addr);
830
        tb = tb->page_next[n];
831
    }
832
    p->first_tb = NULL;
833
#ifdef TARGET_HAS_PRECISE_SMC
834
    if (current_tb_modified) {
835
        /* we generate a block containing just the instruction
836
           modifying the memory. It will ensure that it cannot modify
837
           itself */
838
        env->current_tb = NULL;
839
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
840
                    CF_SINGLE_INSN);
841
        cpu_resume_from_signal(env, puc);
842
    }
843
#endif
844
}
845
#endif
846

    
847
/* add the tb in the target page and protect it if necessary */
848
static inline void tb_alloc_page(TranslationBlock *tb, 
849
                                 unsigned int n, unsigned int page_addr)
850
{
851
    PageDesc *p;
852
    TranslationBlock *last_first_tb;
853

    
854
    tb->page_addr[n] = page_addr;
855
    p = page_find(page_addr >> TARGET_PAGE_BITS);
856
    tb->page_next[n] = p->first_tb;
857
    last_first_tb = p->first_tb;
858
    p->first_tb = (TranslationBlock *)((long)tb | n);
859
    invalidate_page_bitmap(p);
860

    
861
#if defined(TARGET_HAS_SMC) || 1
862

    
863
#if defined(CONFIG_USER_ONLY)
864
    if (p->flags & PAGE_WRITE) {
865
        unsigned long host_start, host_end, addr;
866
        int prot;
867

    
868
        /* force the host page as non writable (writes will have a
869
           page fault + mprotect overhead) */
870
        host_start = page_addr & qemu_host_page_mask;
871
        host_end = host_start + qemu_host_page_size;
872
        prot = 0;
873
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
874
            prot |= page_get_flags(addr);
875
        mprotect((void *)host_start, qemu_host_page_size, 
876
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
877
#ifdef DEBUG_TB_INVALIDATE
878
        printf("protecting code page: 0x%08lx\n", 
879
               host_start);
880
#endif
881
        p->flags &= ~PAGE_WRITE;
882
    }
883
#else
884
    /* if some code is already present, then the pages are already
885
       protected. So we handle the case where only the first TB is
886
       allocated in a physical page */
887
    if (!last_first_tb) {
888
        target_ulong virt_addr;
889

    
890
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
891
        tlb_protect_code(cpu_single_env, virt_addr);        
892
    }
893
#endif
894

    
895
#endif /* TARGET_HAS_SMC */
896
}
897

    
898
/* Allocate a new translation block. Flush the translation buffer if
899
   too many translation blocks or too much generated code. */
900
TranslationBlock *tb_alloc(target_ulong pc)
901
{
902
    TranslationBlock *tb;
903

    
904
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
905
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
906
        return NULL;
907
    tb = &tbs[nb_tbs++];
908
    tb->pc = pc;
909
    tb->cflags = 0;
910
    return tb;
911
}
912

    
913
/* add a new TB and link it to the physical page tables. phys_page2 is
914
   (-1) to indicate that only one page contains the TB. */
915
void tb_link_phys(TranslationBlock *tb, 
916
                  target_ulong phys_pc, target_ulong phys_page2)
917
{
918
    unsigned int h;
919
    TranslationBlock **ptb;
920

    
921
    /* add in the physical hash table */
922
    h = tb_phys_hash_func(phys_pc);
923
    ptb = &tb_phys_hash[h];
924
    tb->phys_hash_next = *ptb;
925
    *ptb = tb;
926

    
927
    /* add in the page list */
928
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
929
    if (phys_page2 != -1)
930
        tb_alloc_page(tb, 1, phys_page2);
931
    else
932
        tb->page_addr[1] = -1;
933
#ifdef DEBUG_TB_CHECK
934
    tb_page_check();
935
#endif
936
}
937

    
938
/* link the tb with the other TBs */
939
void tb_link(TranslationBlock *tb)
940
{
941
#if !defined(CONFIG_USER_ONLY)
942
    {
943
        VirtPageDesc *vp;
944
        target_ulong addr;
945
        
946
        /* save the code memory mappings (needed to invalidate the code) */
947
        addr = tb->pc & TARGET_PAGE_MASK;
948
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
949
#ifdef DEBUG_TLB_CHECK 
950
        if (vp->valid_tag == virt_valid_tag &&
951
            vp->phys_addr != tb->page_addr[0]) {
952
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
953
                   addr, tb->page_addr[0], vp->phys_addr);
954
        }
955
#endif
956
        vp->phys_addr = tb->page_addr[0];
957
        if (vp->valid_tag != virt_valid_tag) {
958
            vp->valid_tag = virt_valid_tag;
959
#if !defined(CONFIG_SOFTMMU)
960
            vp->prot = 0;
961
#endif
962
        }
963
        
964
        if (tb->page_addr[1] != -1) {
965
            addr += TARGET_PAGE_SIZE;
966
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
967
#ifdef DEBUG_TLB_CHECK 
968
            if (vp->valid_tag == virt_valid_tag &&
969
                vp->phys_addr != tb->page_addr[1]) { 
970
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
971
                       addr, tb->page_addr[1], vp->phys_addr);
972
            }
973
#endif
974
            vp->phys_addr = tb->page_addr[1];
975
            if (vp->valid_tag != virt_valid_tag) {
976
                vp->valid_tag = virt_valid_tag;
977
#if !defined(CONFIG_SOFTMMU)
978
                vp->prot = 0;
979
#endif
980
            }
981
        }
982
    }
983
#endif
984

    
985
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
986
    tb->jmp_next[0] = NULL;
987
    tb->jmp_next[1] = NULL;
988
#ifdef USE_CODE_COPY
989
    tb->cflags &= ~CF_FP_USED;
990
    if (tb->cflags & CF_TB_FP_USED)
991
        tb->cflags |= CF_FP_USED;
992
#endif
993

    
994
    /* init original jump addresses */
995
    if (tb->tb_next_offset[0] != 0xffff)
996
        tb_reset_jump(tb, 0);
997
    if (tb->tb_next_offset[1] != 0xffff)
998
        tb_reset_jump(tb, 1);
999
}
1000

    
1001
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1002
   tb[1].tc_ptr. Return NULL if not found */
1003
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1004
{
1005
    int m_min, m_max, m;
1006
    unsigned long v;
1007
    TranslationBlock *tb;
1008

    
1009
    if (nb_tbs <= 0)
1010
        return NULL;
1011
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1012
        tc_ptr >= (unsigned long)code_gen_ptr)
1013
        return NULL;
1014
    /* binary search (cf Knuth) */
1015
    m_min = 0;
1016
    m_max = nb_tbs - 1;
1017
    while (m_min <= m_max) {
1018
        m = (m_min + m_max) >> 1;
1019
        tb = &tbs[m];
1020
        v = (unsigned long)tb->tc_ptr;
1021
        if (v == tc_ptr)
1022
            return tb;
1023
        else if (tc_ptr < v) {
1024
            m_max = m - 1;
1025
        } else {
1026
            m_min = m + 1;
1027
        }
1028
    } 
1029
    return &tbs[m_max];
1030
}
1031

    
1032
static void tb_reset_jump_recursive(TranslationBlock *tb);
1033

    
1034
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1035
{
1036
    TranslationBlock *tb1, *tb_next, **ptb;
1037
    unsigned int n1;
1038

    
1039
    tb1 = tb->jmp_next[n];
1040
    if (tb1 != NULL) {
1041
        /* find head of list */
1042
        for(;;) {
1043
            n1 = (long)tb1 & 3;
1044
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1045
            if (n1 == 2)
1046
                break;
1047
            tb1 = tb1->jmp_next[n1];
1048
        }
1049
        /* we are now sure now that tb jumps to tb1 */
1050
        tb_next = tb1;
1051

    
1052
        /* remove tb from the jmp_first list */
1053
        ptb = &tb_next->jmp_first;
1054
        for(;;) {
1055
            tb1 = *ptb;
1056
            n1 = (long)tb1 & 3;
1057
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1058
            if (n1 == n && tb1 == tb)
1059
                break;
1060
            ptb = &tb1->jmp_next[n1];
1061
        }
1062
        *ptb = tb->jmp_next[n];
1063
        tb->jmp_next[n] = NULL;
1064
        
1065
        /* suppress the jump to next tb in generated code */
1066
        tb_reset_jump(tb, n);
1067

    
1068
        /* suppress jumps in the tb on which we could have jumped */
1069
        tb_reset_jump_recursive(tb_next);
1070
    }
1071
}
1072

    
1073
static void tb_reset_jump_recursive(TranslationBlock *tb)
1074
{
1075
    tb_reset_jump_recursive2(tb, 0);
1076
    tb_reset_jump_recursive2(tb, 1);
1077
}
1078

    
1079
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1080
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1081
{
1082
    target_ulong phys_addr;
1083

    
1084
    phys_addr = cpu_get_phys_page_debug(env, pc);
1085
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1086
}
1087
#endif
1088

    
1089
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1090
   breakpoint is reached */
1091
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1092
{
1093
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1094
    int i;
1095
    
1096
    for(i = 0; i < env->nb_breakpoints; i++) {
1097
        if (env->breakpoints[i] == pc)
1098
            return 0;
1099
    }
1100

    
1101
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1102
        return -1;
1103
    env->breakpoints[env->nb_breakpoints++] = pc;
1104
    
1105
    breakpoint_invalidate(env, pc);
1106
    return 0;
1107
#else
1108
    return -1;
1109
#endif
1110
}
1111

    
1112
/* remove a breakpoint */
1113
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1114
{
1115
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1116
    int i;
1117
    for(i = 0; i < env->nb_breakpoints; i++) {
1118
        if (env->breakpoints[i] == pc)
1119
            goto found;
1120
    }
1121
    return -1;
1122
 found:
1123
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1124
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1125
    env->nb_breakpoints--;
1126

    
1127
    breakpoint_invalidate(env, pc);
1128
    return 0;
1129
#else
1130
    return -1;
1131
#endif
1132
}
1133

    
1134
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1135
   CPU loop after each instruction */
1136
void cpu_single_step(CPUState *env, int enabled)
1137
{
1138
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1139
    if (env->singlestep_enabled != enabled) {
1140
        env->singlestep_enabled = enabled;
1141
        /* must flush all the translated code to avoid inconsistancies */
1142
        /* XXX: only flush what is necessary */
1143
        tb_flush(env);
1144
    }
1145
#endif
1146
}
1147

    
1148
/* enable or disable low levels log */
1149
void cpu_set_log(int log_flags)
1150
{
1151
    loglevel = log_flags;
1152
    if (loglevel && !logfile) {
1153
        logfile = fopen(logfilename, "w");
1154
        if (!logfile) {
1155
            perror(logfilename);
1156
            _exit(1);
1157
        }
1158
#if !defined(CONFIG_SOFTMMU)
1159
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1160
        {
1161
            static uint8_t logfile_buf[4096];
1162
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1163
        }
1164
#else
1165
        setvbuf(logfile, NULL, _IOLBF, 0);
1166
#endif
1167
    }
1168
}
1169

    
1170
void cpu_set_log_filename(const char *filename)
1171
{
1172
    logfilename = strdup(filename);
1173
}
1174

    
1175
/* mask must never be zero, except for A20 change call */
1176
void cpu_interrupt(CPUState *env, int mask)
1177
{
1178
    TranslationBlock *tb;
1179
    static int interrupt_lock;
1180

    
1181
    env->interrupt_request |= mask;
1182
    /* if the cpu is currently executing code, we must unlink it and
1183
       all the potentially executing TB */
1184
    tb = env->current_tb;
1185
    if (tb && !testandset(&interrupt_lock)) {
1186
        env->current_tb = NULL;
1187
        tb_reset_jump_recursive(tb);
1188
        interrupt_lock = 0;
1189
    }
1190
}
1191

    
1192
void cpu_reset_interrupt(CPUState *env, int mask)
1193
{
1194
    env->interrupt_request &= ~mask;
1195
}
1196

    
1197
CPULogItem cpu_log_items[] = {
1198
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1199
      "show generated host assembly code for each compiled TB" },
1200
    { CPU_LOG_TB_IN_ASM, "in_asm",
1201
      "show target assembly code for each compiled TB" },
1202
    { CPU_LOG_TB_OP, "op", 
1203
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1204
#ifdef TARGET_I386
1205
    { CPU_LOG_TB_OP_OPT, "op_opt",
1206
      "show micro ops after optimization for each compiled TB" },
1207
#endif
1208
    { CPU_LOG_INT, "int",
1209
      "show interrupts/exceptions in short format" },
1210
    { CPU_LOG_EXEC, "exec",
1211
      "show trace before each executed TB (lots of logs)" },
1212
    { CPU_LOG_TB_CPU, "cpu",
1213
      "show CPU state before bloc translation" },
1214
#ifdef TARGET_I386
1215
    { CPU_LOG_PCALL, "pcall",
1216
      "show protected mode far calls/returns/exceptions" },
1217
#endif
1218
#ifdef DEBUG_IOPORT
1219
    { CPU_LOG_IOPORT, "ioport",
1220
      "show all i/o ports accesses" },
1221
#endif
1222
    { 0, NULL, NULL },
1223
};
1224

    
1225
static int cmp1(const char *s1, int n, const char *s2)
1226
{
1227
    if (strlen(s2) != n)
1228
        return 0;
1229
    return memcmp(s1, s2, n) == 0;
1230
}
1231
      
1232
/* takes a comma separated list of log masks. Return 0 if error. */
1233
int cpu_str_to_log_mask(const char *str)
1234
{
1235
    CPULogItem *item;
1236
    int mask;
1237
    const char *p, *p1;
1238

    
1239
    p = str;
1240
    mask = 0;
1241
    for(;;) {
1242
        p1 = strchr(p, ',');
1243
        if (!p1)
1244
            p1 = p + strlen(p);
1245
        if(cmp1(p,p1-p,"all")) {
1246
                for(item = cpu_log_items; item->mask != 0; item++) {
1247
                        mask |= item->mask;
1248
                }
1249
        } else {
1250
        for(item = cpu_log_items; item->mask != 0; item++) {
1251
            if (cmp1(p, p1 - p, item->name))
1252
                goto found;
1253
        }
1254
        return 0;
1255
        }
1256
    found:
1257
        mask |= item->mask;
1258
        if (*p1 != ',')
1259
            break;
1260
        p = p1 + 1;
1261
    }
1262
    return mask;
1263
}
1264

    
1265
void cpu_abort(CPUState *env, const char *fmt, ...)
1266
{
1267
    va_list ap;
1268

    
1269
    va_start(ap, fmt);
1270
    fprintf(stderr, "qemu: fatal: ");
1271
    vfprintf(stderr, fmt, ap);
1272
    fprintf(stderr, "\n");
1273
#ifdef TARGET_I386
1274
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1275
#else
1276
    cpu_dump_state(env, stderr, fprintf, 0);
1277
#endif
1278
    va_end(ap);
1279
    abort();
1280
}
1281

    
1282
#if !defined(CONFIG_USER_ONLY)
1283

    
1284
/* NOTE: if flush_global is true, also flush global entries (not
1285
   implemented yet) */
1286
void tlb_flush(CPUState *env, int flush_global)
1287
{
1288
    int i;
1289

    
1290
#if defined(DEBUG_TLB)
1291
    printf("tlb_flush:\n");
1292
#endif
1293
    /* must reset current TB so that interrupts cannot modify the
1294
       links while we are modifying them */
1295
    env->current_tb = NULL;
1296

    
1297
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1298
        env->tlb_read[0][i].address = -1;
1299
        env->tlb_write[0][i].address = -1;
1300
        env->tlb_read[1][i].address = -1;
1301
        env->tlb_write[1][i].address = -1;
1302
    }
1303

    
1304
    virt_page_flush();
1305
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1306

    
1307
#if !defined(CONFIG_SOFTMMU)
1308
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1309
#endif
1310
#ifdef USE_KQEMU
1311
    if (env->kqemu_enabled) {
1312
        kqemu_flush(env, flush_global);
1313
    }
1314
#endif
1315
    tlb_flush_count++;
1316
}
1317

    
1318
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1319
{
1320
    if (addr == (tlb_entry->address & 
1321
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1322
        tlb_entry->address = -1;
1323
}
1324

    
1325
void tlb_flush_page(CPUState *env, target_ulong addr)
1326
{
1327
    int i, n;
1328
    VirtPageDesc *vp;
1329
    PageDesc *p;
1330
    TranslationBlock *tb;
1331

    
1332
#if defined(DEBUG_TLB)
1333
    printf("tlb_flush_page: 0x%08x\n", addr);
1334
#endif
1335
    /* must reset current TB so that interrupts cannot modify the
1336
       links while we are modifying them */
1337
    env->current_tb = NULL;
1338

    
1339
    addr &= TARGET_PAGE_MASK;
1340
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1341
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1342
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1343
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1344
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1345

    
1346
    /* remove from the virtual pc hash table all the TB at this
1347
       virtual address */
1348
    
1349
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1350
    if (vp && vp->valid_tag == virt_valid_tag) {
1351
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1352
        if (p) {
1353
            /* we remove all the links to the TBs in this virtual page */
1354
            tb = p->first_tb;
1355
            while (tb != NULL) {
1356
                n = (long)tb & 3;
1357
                tb = (TranslationBlock *)((long)tb & ~3);
1358
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1359
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1360
                    tb_invalidate(tb);
1361
                }
1362
                tb = tb->page_next[n];
1363
            }
1364
        }
1365
        vp->valid_tag = 0;
1366
    }
1367

    
1368
#if !defined(CONFIG_SOFTMMU)
1369
    if (addr < MMAP_AREA_END)
1370
        munmap((void *)addr, TARGET_PAGE_SIZE);
1371
#endif
1372
#ifdef USE_KQEMU
1373
    if (env->kqemu_enabled) {
1374
        kqemu_flush_page(env, addr);
1375
    }
1376
#endif
1377
}
1378

    
1379
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1380
{
1381
    if (addr == (tlb_entry->address & 
1382
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1383
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1384
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1385
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1386
    }
1387
}
1388

    
1389
/* update the TLBs so that writes to code in the virtual page 'addr'
1390
   can be detected */
1391
static void tlb_protect_code(CPUState *env, target_ulong addr)
1392
{
1393
    int i;
1394

    
1395
    addr &= TARGET_PAGE_MASK;
1396
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1397
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1398
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1399
#if !defined(CONFIG_SOFTMMU)
1400
    /* NOTE: as we generated the code for this page, it is already at
1401
       least readable */
1402
    if (addr < MMAP_AREA_END)
1403
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1404
#endif
1405
}
1406

    
1407
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1408
                                       unsigned long phys_addr)
1409
{
1410
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1411
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1412
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1413
    }
1414
}
1415

    
1416
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1417
   tested self modifying code */
1418
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1419
{
1420
    int i;
1421

    
1422
    phys_addr &= TARGET_PAGE_MASK;
1423
    phys_addr += (long)phys_ram_base;
1424
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1425
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1426
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1427
}
1428

    
1429
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1430
                                         unsigned long start, unsigned long length)
1431
{
1432
    unsigned long addr;
1433
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1434
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1435
        if ((addr - start) < length) {
1436
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1437
        }
1438
    }
1439
}
1440

    
1441
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1442
                                     int dirty_flags)
1443
{
1444
    CPUState *env;
1445
    unsigned long length, start1;
1446
    int i, mask, len;
1447
    uint8_t *p;
1448

    
1449
    start &= TARGET_PAGE_MASK;
1450
    end = TARGET_PAGE_ALIGN(end);
1451

    
1452
    length = end - start;
1453
    if (length == 0)
1454
        return;
1455
    mask = ~dirty_flags;
1456
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1457
    len = length >> TARGET_PAGE_BITS;
1458
    for(i = 0; i < len; i++)
1459
        p[i] &= mask;
1460

    
1461
    env = cpu_single_env;
1462
    /* we modify the TLB cache so that the dirty bit will be set again
1463
       when accessing the range */
1464
    start1 = start + (unsigned long)phys_ram_base;
1465
    for(i = 0; i < CPU_TLB_SIZE; i++)
1466
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1467
    for(i = 0; i < CPU_TLB_SIZE; i++)
1468
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1469

    
1470
#if !defined(CONFIG_SOFTMMU)
1471
    /* XXX: this is expensive */
1472
    {
1473
        VirtPageDesc *p;
1474
        int j;
1475
        target_ulong addr;
1476

    
1477
        for(i = 0; i < L1_SIZE; i++) {
1478
            p = l1_virt_map[i];
1479
            if (p) {
1480
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1481
                for(j = 0; j < L2_SIZE; j++) {
1482
                    if (p->valid_tag == virt_valid_tag &&
1483
                        p->phys_addr >= start && p->phys_addr < end &&
1484
                        (p->prot & PROT_WRITE)) {
1485
                        if (addr < MMAP_AREA_END) {
1486
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1487
                                     p->prot & ~PROT_WRITE);
1488
                        }
1489
                    }
1490
                    addr += TARGET_PAGE_SIZE;
1491
                    p++;
1492
                }
1493
            }
1494
        }
1495
    }
1496
#endif
1497
}
1498

    
1499
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1500
                                    unsigned long start)
1501
{
1502
    unsigned long addr;
1503
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1504
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1505
        if (addr == start) {
1506
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1507
        }
1508
    }
1509
}
1510

    
1511
/* update the TLB corresponding to virtual page vaddr and phys addr
1512
   addr so that it is no longer dirty */
1513
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1514
{
1515
    CPUState *env = cpu_single_env;
1516
    int i;
1517

    
1518
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1519

    
1520
    addr &= TARGET_PAGE_MASK;
1521
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1522
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1523
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1524
}
1525

    
1526
/* add a new TLB entry. At most one entry for a given virtual address
1527
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1528
   (can only happen in non SOFTMMU mode for I/O pages or pages
1529
   conflicting with the host address space). */
1530
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1531
                 target_phys_addr_t paddr, int prot, 
1532
                 int is_user, int is_softmmu)
1533
{
1534
    PhysPageDesc *p;
1535
    unsigned long pd;
1536
    TranslationBlock *first_tb;
1537
    unsigned int index;
1538
    target_ulong address;
1539
    unsigned long addend;
1540
    int ret;
1541

    
1542
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1543
    first_tb = NULL;
1544
    if (!p) {
1545
        pd = IO_MEM_UNASSIGNED;
1546
    } else {
1547
        PageDesc *p1;
1548
        pd = p->phys_offset;
1549
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1550
            /* NOTE: we also allocate the page at this stage */
1551
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1552
            first_tb = p1->first_tb;
1553
        }
1554
    }
1555
#if defined(DEBUG_TLB)
1556
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1557
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1558
#endif
1559

    
1560
    ret = 0;
1561
#if !defined(CONFIG_SOFTMMU)
1562
    if (is_softmmu) 
1563
#endif
1564
    {
1565
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1566
            /* IO memory case */
1567
            address = vaddr | pd;
1568
            addend = paddr;
1569
        } else {
1570
            /* standard memory */
1571
            address = vaddr;
1572
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1573
        }
1574
        
1575
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1576
        addend -= vaddr;
1577
        if (prot & PAGE_READ) {
1578
            env->tlb_read[is_user][index].address = address;
1579
            env->tlb_read[is_user][index].addend = addend;
1580
        } else {
1581
            env->tlb_read[is_user][index].address = -1;
1582
            env->tlb_read[is_user][index].addend = -1;
1583
        }
1584
        if (prot & PAGE_WRITE) {
1585
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1586
                /* ROM: access is ignored (same as unassigned) */
1587
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1588
                env->tlb_write[is_user][index].addend = addend;
1589
            } else 
1590
                /* XXX: the PowerPC code seems not ready to handle
1591
                   self modifying code with DCBI */
1592
#if defined(TARGET_HAS_SMC) || 1
1593
            if (first_tb) {
1594
                /* if code is present, we use a specific memory
1595
                   handler. It works only for physical memory access */
1596
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1597
                env->tlb_write[is_user][index].addend = addend;
1598
            } else 
1599
#endif
1600
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1601
                       !cpu_physical_memory_is_dirty(pd)) {
1602
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1603
                env->tlb_write[is_user][index].addend = addend;
1604
            } else {
1605
                env->tlb_write[is_user][index].address = address;
1606
                env->tlb_write[is_user][index].addend = addend;
1607
            }
1608
        } else {
1609
            env->tlb_write[is_user][index].address = -1;
1610
            env->tlb_write[is_user][index].addend = -1;
1611
        }
1612
    }
1613
#if !defined(CONFIG_SOFTMMU)
1614
    else {
1615
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1616
            /* IO access: no mapping is done as it will be handled by the
1617
               soft MMU */
1618
            if (!(env->hflags & HF_SOFTMMU_MASK))
1619
                ret = 2;
1620
        } else {
1621
            void *map_addr;
1622

    
1623
            if (vaddr >= MMAP_AREA_END) {
1624
                ret = 2;
1625
            } else {
1626
                if (prot & PROT_WRITE) {
1627
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1628
#if defined(TARGET_HAS_SMC) || 1
1629
                        first_tb ||
1630
#endif
1631
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1632
                         !cpu_physical_memory_is_dirty(pd))) {
1633
                        /* ROM: we do as if code was inside */
1634
                        /* if code is present, we only map as read only and save the
1635
                           original mapping */
1636
                        VirtPageDesc *vp;
1637
                        
1638
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1639
                        vp->phys_addr = pd;
1640
                        vp->prot = prot;
1641
                        vp->valid_tag = virt_valid_tag;
1642
                        prot &= ~PAGE_WRITE;
1643
                    }
1644
                }
1645
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1646
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1647
                if (map_addr == MAP_FAILED) {
1648
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1649
                              paddr, vaddr);
1650
                }
1651
            }
1652
        }
1653
    }
1654
#endif
1655
    return ret;
1656
}
1657

    
1658
/* called from signal handler: invalidate the code and unprotect the
1659
   page. Return TRUE if the fault was succesfully handled. */
1660
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1661
{
1662
#if !defined(CONFIG_SOFTMMU)
1663
    VirtPageDesc *vp;
1664

    
1665
#if defined(DEBUG_TLB)
1666
    printf("page_unprotect: addr=0x%08x\n", addr);
1667
#endif
1668
    addr &= TARGET_PAGE_MASK;
1669

    
1670
    /* if it is not mapped, no need to worry here */
1671
    if (addr >= MMAP_AREA_END)
1672
        return 0;
1673
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1674
    if (!vp)
1675
        return 0;
1676
    /* NOTE: in this case, validate_tag is _not_ tested as it
1677
       validates only the code TLB */
1678
    if (vp->valid_tag != virt_valid_tag)
1679
        return 0;
1680
    if (!(vp->prot & PAGE_WRITE))
1681
        return 0;
1682
#if defined(DEBUG_TLB)
1683
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1684
           addr, vp->phys_addr, vp->prot);
1685
#endif
1686
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1687
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1688
                  (unsigned long)addr, vp->prot);
1689
    /* set the dirty bit */
1690
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1691
    /* flush the code inside */
1692
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1693
    return 1;
1694
#else
1695
    return 0;
1696
#endif
1697
}
1698

    
1699
#else
1700

    
1701
void tlb_flush(CPUState *env, int flush_global)
1702
{
1703
}
1704

    
1705
void tlb_flush_page(CPUState *env, target_ulong addr)
1706
{
1707
}
1708

    
1709
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1710
                 target_phys_addr_t paddr, int prot, 
1711
                 int is_user, int is_softmmu)
1712
{
1713
    return 0;
1714
}
1715

    
1716
/* dump memory mappings */
1717
void page_dump(FILE *f)
1718
{
1719
    unsigned long start, end;
1720
    int i, j, prot, prot1;
1721
    PageDesc *p;
1722

    
1723
    fprintf(f, "%-8s %-8s %-8s %s\n",
1724
            "start", "end", "size", "prot");
1725
    start = -1;
1726
    end = -1;
1727
    prot = 0;
1728
    for(i = 0; i <= L1_SIZE; i++) {
1729
        if (i < L1_SIZE)
1730
            p = l1_map[i];
1731
        else
1732
            p = NULL;
1733
        for(j = 0;j < L2_SIZE; j++) {
1734
            if (!p)
1735
                prot1 = 0;
1736
            else
1737
                prot1 = p[j].flags;
1738
            if (prot1 != prot) {
1739
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1740
                if (start != -1) {
1741
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1742
                            start, end, end - start, 
1743
                            prot & PAGE_READ ? 'r' : '-',
1744
                            prot & PAGE_WRITE ? 'w' : '-',
1745
                            prot & PAGE_EXEC ? 'x' : '-');
1746
                }
1747
                if (prot1 != 0)
1748
                    start = end;
1749
                else
1750
                    start = -1;
1751
                prot = prot1;
1752
            }
1753
            if (!p)
1754
                break;
1755
        }
1756
    }
1757
}
1758

    
1759
int page_get_flags(unsigned long address)
1760
{
1761
    PageDesc *p;
1762

    
1763
    p = page_find(address >> TARGET_PAGE_BITS);
1764
    if (!p)
1765
        return 0;
1766
    return p->flags;
1767
}
1768

    
1769
/* modify the flags of a page and invalidate the code if
1770
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1771
   depending on PAGE_WRITE */
1772
void page_set_flags(unsigned long start, unsigned long end, int flags)
1773
{
1774
    PageDesc *p;
1775
    unsigned long addr;
1776

    
1777
    start = start & TARGET_PAGE_MASK;
1778
    end = TARGET_PAGE_ALIGN(end);
1779
    if (flags & PAGE_WRITE)
1780
        flags |= PAGE_WRITE_ORG;
1781
    spin_lock(&tb_lock);
1782
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1783
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1784
        /* if the write protection is set, then we invalidate the code
1785
           inside */
1786
        if (!(p->flags & PAGE_WRITE) && 
1787
            (flags & PAGE_WRITE) &&
1788
            p->first_tb) {
1789
            tb_invalidate_phys_page(addr, 0, NULL);
1790
        }
1791
        p->flags = flags;
1792
    }
1793
    spin_unlock(&tb_lock);
1794
}
1795

    
1796
/* called from signal handler: invalidate the code and unprotect the
1797
   page. Return TRUE if the fault was succesfully handled. */
1798
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1799
{
1800
    unsigned int page_index, prot, pindex;
1801
    PageDesc *p, *p1;
1802
    unsigned long host_start, host_end, addr;
1803

    
1804
    host_start = address & qemu_host_page_mask;
1805
    page_index = host_start >> TARGET_PAGE_BITS;
1806
    p1 = page_find(page_index);
1807
    if (!p1)
1808
        return 0;
1809
    host_end = host_start + qemu_host_page_size;
1810
    p = p1;
1811
    prot = 0;
1812
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1813
        prot |= p->flags;
1814
        p++;
1815
    }
1816
    /* if the page was really writable, then we change its
1817
       protection back to writable */
1818
    if (prot & PAGE_WRITE_ORG) {
1819
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1820
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1821
            mprotect((void *)host_start, qemu_host_page_size, 
1822
                     (prot & PAGE_BITS) | PAGE_WRITE);
1823
            p1[pindex].flags |= PAGE_WRITE;
1824
            /* and since the content will be modified, we must invalidate
1825
               the corresponding translated code. */
1826
            tb_invalidate_phys_page(address, pc, puc);
1827
#ifdef DEBUG_TB_CHECK
1828
            tb_invalidate_check(address);
1829
#endif
1830
            return 1;
1831
        }
1832
    }
1833
    return 0;
1834
}
1835

    
1836
/* call this function when system calls directly modify a memory area */
1837
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1838
{
1839
    unsigned long start, end, addr;
1840

    
1841
    start = (unsigned long)data;
1842
    end = start + data_size;
1843
    start &= TARGET_PAGE_MASK;
1844
    end = TARGET_PAGE_ALIGN(end);
1845
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1846
        page_unprotect(addr, 0, NULL);
1847
    }
1848
}
1849

    
1850
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1851
{
1852
}
1853
#endif /* defined(CONFIG_USER_ONLY) */
1854

    
1855
/* register physical memory. 'size' must be a multiple of the target
1856
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1857
   io memory page */
1858
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1859
                                  unsigned long size,
1860
                                  unsigned long phys_offset)
1861
{
1862
    unsigned long addr, end_addr;
1863
    PhysPageDesc *p;
1864

    
1865
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1866
    end_addr = start_addr + size;
1867
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1868
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1869
        p->phys_offset = phys_offset;
1870
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1871
            phys_offset += TARGET_PAGE_SIZE;
1872
    }
1873
}
1874

    
1875
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1876
{
1877
    return 0;
1878
}
1879

    
1880
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1881
{
1882
}
1883

    
1884
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1885
    unassigned_mem_readb,
1886
    unassigned_mem_readb,
1887
    unassigned_mem_readb,
1888
};
1889

    
1890
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1891
    unassigned_mem_writeb,
1892
    unassigned_mem_writeb,
1893
    unassigned_mem_writeb,
1894
};
1895

    
1896
/* self modifying code support in soft mmu mode : writing to a page
1897
   containing code comes to these functions */
1898

    
1899
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1900
{
1901
    unsigned long phys_addr;
1902

    
1903
    phys_addr = addr - (unsigned long)phys_ram_base;
1904
#if !defined(CONFIG_USER_ONLY)
1905
    tb_invalidate_phys_page_fast(phys_addr, 1);
1906
#endif
1907
    stb_p((uint8_t *)(long)addr, val);
1908
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1909
}
1910

    
1911
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1912
{
1913
    unsigned long phys_addr;
1914

    
1915
    phys_addr = addr - (unsigned long)phys_ram_base;
1916
#if !defined(CONFIG_USER_ONLY)
1917
    tb_invalidate_phys_page_fast(phys_addr, 2);
1918
#endif
1919
    stw_p((uint8_t *)(long)addr, val);
1920
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1921
}
1922

    
1923
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1924
{
1925
    unsigned long phys_addr;
1926

    
1927
    phys_addr = addr - (unsigned long)phys_ram_base;
1928
#if !defined(CONFIG_USER_ONLY)
1929
    tb_invalidate_phys_page_fast(phys_addr, 4);
1930
#endif
1931
    stl_p((uint8_t *)(long)addr, val);
1932
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1933
}
1934

    
1935
static CPUReadMemoryFunc *code_mem_read[3] = {
1936
    NULL, /* never used */
1937
    NULL, /* never used */
1938
    NULL, /* never used */
1939
};
1940

    
1941
static CPUWriteMemoryFunc *code_mem_write[3] = {
1942
    code_mem_writeb,
1943
    code_mem_writew,
1944
    code_mem_writel,
1945
};
1946

    
1947
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1948
{
1949
    stb_p((uint8_t *)(long)addr, val);
1950
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1951
}
1952

    
1953
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1954
{
1955
    stw_p((uint8_t *)(long)addr, val);
1956
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1957
}
1958

    
1959
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1960
{
1961
    stl_p((uint8_t *)(long)addr, val);
1962
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1963
}
1964

    
1965
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1966
    notdirty_mem_writeb,
1967
    notdirty_mem_writew,
1968
    notdirty_mem_writel,
1969
};
1970

    
1971
static void io_mem_init(void)
1972
{
1973
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1974
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1975
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1976
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1977
    io_mem_nb = 5;
1978

    
1979
    /* alloc dirty bits array */
1980
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1981
}
1982

    
1983
/* mem_read and mem_write are arrays of functions containing the
1984
   function to access byte (index 0), word (index 1) and dword (index
1985
   2). All functions must be supplied. If io_index is non zero, the
1986
   corresponding io zone is modified. If it is zero, a new io zone is
1987
   allocated. The return value can be used with
1988
   cpu_register_physical_memory(). (-1) is returned if error. */
1989
int cpu_register_io_memory(int io_index,
1990
                           CPUReadMemoryFunc **mem_read,
1991
                           CPUWriteMemoryFunc **mem_write,
1992
                           void *opaque)
1993
{
1994
    int i;
1995

    
1996
    if (io_index <= 0) {
1997
        if (io_index >= IO_MEM_NB_ENTRIES)
1998
            return -1;
1999
        io_index = io_mem_nb++;
2000
    } else {
2001
        if (io_index >= IO_MEM_NB_ENTRIES)
2002
            return -1;
2003
    }
2004
    
2005
    for(i = 0;i < 3; i++) {
2006
        io_mem_read[io_index][i] = mem_read[i];
2007
        io_mem_write[io_index][i] = mem_write[i];
2008
    }
2009
    io_mem_opaque[io_index] = opaque;
2010
    return io_index << IO_MEM_SHIFT;
2011
}
2012

    
2013
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2014
{
2015
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2016
}
2017

    
2018
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2019
{
2020
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2021
}
2022

    
2023
/* physical memory access (slow version, mainly for debug) */
2024
#if defined(CONFIG_USER_ONLY)
2025
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2026
                            int len, int is_write)
2027
{
2028
    int l, flags;
2029
    target_ulong page;
2030

    
2031
    while (len > 0) {
2032
        page = addr & TARGET_PAGE_MASK;
2033
        l = (page + TARGET_PAGE_SIZE) - addr;
2034
        if (l > len)
2035
            l = len;
2036
        flags = page_get_flags(page);
2037
        if (!(flags & PAGE_VALID))
2038
            return;
2039
        if (is_write) {
2040
            if (!(flags & PAGE_WRITE))
2041
                return;
2042
            memcpy((uint8_t *)addr, buf, len);
2043
        } else {
2044
            if (!(flags & PAGE_READ))
2045
                return;
2046
            memcpy(buf, (uint8_t *)addr, len);
2047
        }
2048
        len -= l;
2049
        buf += l;
2050
        addr += l;
2051
    }
2052
}
2053

    
2054
/* never used */
2055
uint32_t ldl_phys(target_phys_addr_t addr)
2056
{
2057
    return 0;
2058
}
2059

    
2060
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2061
{
2062
}
2063

    
2064
void stl_phys(target_phys_addr_t addr, uint32_t val)
2065
{
2066
}
2067

    
2068
#else
2069
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2070
                            int len, int is_write)
2071
{
2072
    int l, io_index;
2073
    uint8_t *ptr;
2074
    uint32_t val;
2075
    target_phys_addr_t page;
2076
    unsigned long pd;
2077
    PhysPageDesc *p;
2078
    
2079
    while (len > 0) {
2080
        page = addr & TARGET_PAGE_MASK;
2081
        l = (page + TARGET_PAGE_SIZE) - addr;
2082
        if (l > len)
2083
            l = len;
2084
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2085
        if (!p) {
2086
            pd = IO_MEM_UNASSIGNED;
2087
        } else {
2088
            pd = p->phys_offset;
2089
        }
2090
        
2091
        if (is_write) {
2092
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2093
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2094
                if (l >= 4 && ((addr & 3) == 0)) {
2095
                    /* 32 bit read access */
2096
                    val = ldl_p(buf);
2097
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2098
                    l = 4;
2099
                } else if (l >= 2 && ((addr & 1) == 0)) {
2100
                    /* 16 bit read access */
2101
                    val = lduw_p(buf);
2102
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2103
                    l = 2;
2104
                } else {
2105
                    /* 8 bit access */
2106
                    val = ldub_p(buf);
2107
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2108
                    l = 1;
2109
                }
2110
            } else {
2111
                unsigned long addr1;
2112
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2113
                /* RAM case */
2114
                ptr = phys_ram_base + addr1;
2115
                memcpy(ptr, buf, l);
2116
                /* invalidate code */
2117
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2118
                /* set dirty bit */
2119
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2120
            }
2121
        } else {
2122
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2123
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2124
                /* I/O case */
2125
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2126
                if (l >= 4 && ((addr & 3) == 0)) {
2127
                    /* 32 bit read access */
2128
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2129
                    stl_p(buf, val);
2130
                    l = 4;
2131
                } else if (l >= 2 && ((addr & 1) == 0)) {
2132
                    /* 16 bit read access */
2133
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2134
                    stw_p(buf, val);
2135
                    l = 2;
2136
                } else {
2137
                    /* 8 bit access */
2138
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2139
                    stb_p(buf, val);
2140
                    l = 1;
2141
                }
2142
            } else {
2143
                /* RAM case */
2144
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2145
                    (addr & ~TARGET_PAGE_MASK);
2146
                memcpy(buf, ptr, l);
2147
            }
2148
        }
2149
        len -= l;
2150
        buf += l;
2151
        addr += l;
2152
    }
2153
}
2154

    
2155
/* warning: addr must be aligned */
2156
uint32_t ldl_phys(target_phys_addr_t addr)
2157
{
2158
    int io_index;
2159
    uint8_t *ptr;
2160
    uint32_t val;
2161
    unsigned long pd;
2162
    PhysPageDesc *p;
2163

    
2164
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2165
    if (!p) {
2166
        pd = IO_MEM_UNASSIGNED;
2167
    } else {
2168
        pd = p->phys_offset;
2169
    }
2170
        
2171
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2172
        (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2173
        /* I/O case */
2174
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2175
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2176
    } else {
2177
        /* RAM case */
2178
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2179
            (addr & ~TARGET_PAGE_MASK);
2180
        val = ldl_p(ptr);
2181
    }
2182
    return val;
2183
}
2184

    
2185
/* warning: addr must be aligned. The ram page is not masked as dirty
2186
   and the code inside is not invalidated. It is useful if the dirty
2187
   bits are used to track modified PTEs */
2188
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2189
{
2190
    int io_index;
2191
    uint8_t *ptr;
2192
    unsigned long pd;
2193
    PhysPageDesc *p;
2194

    
2195
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2196
    if (!p) {
2197
        pd = IO_MEM_UNASSIGNED;
2198
    } else {
2199
        pd = p->phys_offset;
2200
    }
2201
        
2202
    if ((pd & ~TARGET_PAGE_MASK) != 0) {
2203
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2204
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2205
    } else {
2206
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2207
            (addr & ~TARGET_PAGE_MASK);
2208
        stl_p(ptr, val);
2209
    }
2210
}
2211

    
2212
/* warning: addr must be aligned */
2213
/* XXX: optimize code invalidation test */
2214
void stl_phys(target_phys_addr_t addr, uint32_t val)
2215
{
2216
    int io_index;
2217
    uint8_t *ptr;
2218
    unsigned long pd;
2219
    PhysPageDesc *p;
2220

    
2221
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2222
    if (!p) {
2223
        pd = IO_MEM_UNASSIGNED;
2224
    } else {
2225
        pd = p->phys_offset;
2226
    }
2227
        
2228
    if ((pd & ~TARGET_PAGE_MASK) != 0) {
2229
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2230
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2231
    } else {
2232
        unsigned long addr1;
2233
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2234
        /* RAM case */
2235
        ptr = phys_ram_base + addr1;
2236
        stl_p(ptr, val);
2237
        /* invalidate code */
2238
        tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2239
        /* set dirty bit */
2240
        phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2241
    }
2242
}
2243

    
2244
#endif
2245

    
2246
/* virtual memory access for debug */
2247
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2248
                        uint8_t *buf, int len, int is_write)
2249
{
2250
    int l;
2251
    target_ulong page, phys_addr;
2252

    
2253
    while (len > 0) {
2254
        page = addr & TARGET_PAGE_MASK;
2255
        phys_addr = cpu_get_phys_page_debug(env, page);
2256
        /* if no physical page mapped, return an error */
2257
        if (phys_addr == -1)
2258
            return -1;
2259
        l = (page + TARGET_PAGE_SIZE) - addr;
2260
        if (l > len)
2261
            l = len;
2262
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2263
                               buf, l, is_write);
2264
        len -= l;
2265
        buf += l;
2266
        addr += l;
2267
    }
2268
    return 0;
2269
}
2270

    
2271
void dump_exec_info(FILE *f,
2272
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2273
{
2274
    int i, target_code_size, max_target_code_size;
2275
    int direct_jmp_count, direct_jmp2_count, cross_page;
2276
    TranslationBlock *tb;
2277
    
2278
    target_code_size = 0;
2279
    max_target_code_size = 0;
2280
    cross_page = 0;
2281
    direct_jmp_count = 0;
2282
    direct_jmp2_count = 0;
2283
    for(i = 0; i < nb_tbs; i++) {
2284
        tb = &tbs[i];
2285
        target_code_size += tb->size;
2286
        if (tb->size > max_target_code_size)
2287
            max_target_code_size = tb->size;
2288
        if (tb->page_addr[1] != -1)
2289
            cross_page++;
2290
        if (tb->tb_next_offset[0] != 0xffff) {
2291
            direct_jmp_count++;
2292
            if (tb->tb_next_offset[1] != 0xffff) {
2293
                direct_jmp2_count++;
2294
            }
2295
        }
2296
    }
2297
    /* XXX: avoid using doubles ? */
2298
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2299
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2300
                nb_tbs ? target_code_size / nb_tbs : 0,
2301
                max_target_code_size);
2302
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2303
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2304
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2305
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2306
            cross_page, 
2307
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2308
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2309
                direct_jmp_count, 
2310
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2311
                direct_jmp2_count,
2312
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2313
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2314
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2315
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2316
}
2317

    
2318
#if !defined(CONFIG_USER_ONLY) 
2319

    
2320
#define MMUSUFFIX _cmmu
2321
#define GETPC() NULL
2322
#define env cpu_single_env
2323
#define SOFTMMU_CODE_ACCESS
2324

    
2325
#define SHIFT 0
2326
#include "softmmu_template.h"
2327

    
2328
#define SHIFT 1
2329
#include "softmmu_template.h"
2330

    
2331
#define SHIFT 2
2332
#include "softmmu_template.h"
2333

    
2334
#define SHIFT 3
2335
#include "softmmu_template.h"
2336

    
2337
#undef env
2338

    
2339
#endif