Statistics
| Branch: | Revision:

root / exec.c @ 8df1cd07

History | View | Annotate | Download (68.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57
int nb_tbs;
58
/* any access to the tbs or the page table must use this lock */
59
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
60

    
61
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62
uint8_t *code_gen_ptr;
63

    
64
int phys_ram_size;
65
int phys_ram_fd;
66
uint8_t *phys_ram_base;
67
uint8_t *phys_ram_dirty;
68

    
69
typedef struct PageDesc {
70
    /* list of TBs intersecting this ram page */
71
    TranslationBlock *first_tb;
72
    /* in order to optimize self modifying code, we count the number
73
       of lookups we do to a given page to use a bitmap */
74
    unsigned int code_write_count;
75
    uint8_t *code_bitmap;
76
#if defined(CONFIG_USER_ONLY)
77
    unsigned long flags;
78
#endif
79
} PageDesc;
80

    
81
typedef struct PhysPageDesc {
82
    /* offset in host memory of the page + io_index in the low 12 bits */
83
    unsigned long phys_offset;
84
} PhysPageDesc;
85

    
86
typedef struct VirtPageDesc {
87
    /* physical address of code page. It is valid only if 'valid_tag'
88
       matches 'virt_valid_tag' */ 
89
    target_ulong phys_addr; 
90
    unsigned int valid_tag;
91
#if !defined(CONFIG_SOFTMMU)
92
    /* original page access rights. It is valid only if 'valid_tag'
93
       matches 'virt_valid_tag' */
94
    unsigned int prot;
95
#endif
96
} VirtPageDesc;
97

    
98
#define L2_BITS 10
99
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100

    
101
#define L1_SIZE (1 << L1_BITS)
102
#define L2_SIZE (1 << L2_BITS)
103

    
104
static void io_mem_init(void);
105

    
106
unsigned long qemu_real_host_page_size;
107
unsigned long qemu_host_page_bits;
108
unsigned long qemu_host_page_size;
109
unsigned long qemu_host_page_mask;
110

    
111
/* XXX: for system emulation, it could just be an array */
112
static PageDesc *l1_map[L1_SIZE];
113
static PhysPageDesc *l1_phys_map[L1_SIZE];
114

    
115
#if !defined(CONFIG_USER_ONLY)
116
static VirtPageDesc *l1_virt_map[L1_SIZE];
117
static unsigned int virt_valid_tag;
118
#endif
119

    
120
/* io memory support */
121
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
123
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
124
static int io_mem_nb;
125

    
126
/* log support */
127
char *logfilename = "/tmp/qemu.log";
128
FILE *logfile;
129
int loglevel;
130

    
131
/* statistics */
132
static int tlb_flush_count;
133
static int tb_flush_count;
134
static int tb_phys_invalidate_count;
135

    
136
static void page_init(void)
137
{
138
    /* NOTE: we can always suppose that qemu_host_page_size >=
139
       TARGET_PAGE_SIZE */
140
#ifdef _WIN32
141
    {
142
        SYSTEM_INFO system_info;
143
        DWORD old_protect;
144
        
145
        GetSystemInfo(&system_info);
146
        qemu_real_host_page_size = system_info.dwPageSize;
147
        
148
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
149
                       PAGE_EXECUTE_READWRITE, &old_protect);
150
    }
151
#else
152
    qemu_real_host_page_size = getpagesize();
153
    {
154
        unsigned long start, end;
155

    
156
        start = (unsigned long)code_gen_buffer;
157
        start &= ~(qemu_real_host_page_size - 1);
158
        
159
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
160
        end += qemu_real_host_page_size - 1;
161
        end &= ~(qemu_real_host_page_size - 1);
162
        
163
        mprotect((void *)start, end - start, 
164
                 PROT_READ | PROT_WRITE | PROT_EXEC);
165
    }
166
#endif
167

    
168
    if (qemu_host_page_size == 0)
169
        qemu_host_page_size = qemu_real_host_page_size;
170
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
171
        qemu_host_page_size = TARGET_PAGE_SIZE;
172
    qemu_host_page_bits = 0;
173
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
174
        qemu_host_page_bits++;
175
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
176
#if !defined(CONFIG_USER_ONLY)
177
    virt_valid_tag = 1;
178
#endif
179
}
180

    
181
static inline PageDesc *page_find_alloc(unsigned int index)
182
{
183
    PageDesc **lp, *p;
184

    
185
    lp = &l1_map[index >> L2_BITS];
186
    p = *lp;
187
    if (!p) {
188
        /* allocate if not found */
189
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
190
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
191
        *lp = p;
192
    }
193
    return p + (index & (L2_SIZE - 1));
194
}
195

    
196
static inline PageDesc *page_find(unsigned int index)
197
{
198
    PageDesc *p;
199

    
200
    p = l1_map[index >> L2_BITS];
201
    if (!p)
202
        return 0;
203
    return p + (index & (L2_SIZE - 1));
204
}
205

    
206
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
207
{
208
    PhysPageDesc **lp, *p;
209

    
210
    lp = &l1_phys_map[index >> L2_BITS];
211
    p = *lp;
212
    if (!p) {
213
        /* allocate if not found */
214
        p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
215
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
216
        *lp = p;
217
    }
218
    return p + (index & (L2_SIZE - 1));
219
}
220

    
221
static inline PhysPageDesc *phys_page_find(unsigned int index)
222
{
223
    PhysPageDesc *p;
224

    
225
    p = l1_phys_map[index >> L2_BITS];
226
    if (!p)
227
        return 0;
228
    return p + (index & (L2_SIZE - 1));
229
}
230

    
231
#if !defined(CONFIG_USER_ONLY)
232
static void tlb_protect_code(CPUState *env, target_ulong addr);
233
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
234

    
235
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
236
{
237
    VirtPageDesc **lp, *p;
238

    
239
    /* XXX: should not truncate for 64 bit addresses */
240
#if TARGET_LONG_BITS > 32
241
    index &= (L1_SIZE - 1);
242
#endif
243
    lp = &l1_virt_map[index >> L2_BITS];
244
    p = *lp;
245
    if (!p) {
246
        /* allocate if not found */
247
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
248
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
249
        *lp = p;
250
    }
251
    return p + (index & (L2_SIZE - 1));
252
}
253

    
254
static inline VirtPageDesc *virt_page_find(unsigned int index)
255
{
256
    VirtPageDesc *p;
257

    
258
    p = l1_virt_map[index >> L2_BITS];
259
    if (!p)
260
        return 0;
261
    return p + (index & (L2_SIZE - 1));
262
}
263

    
264
static void virt_page_flush(void)
265
{
266
    int i, j;
267
    VirtPageDesc *p;
268
    
269
    virt_valid_tag++;
270

    
271
    if (virt_valid_tag == 0) {
272
        virt_valid_tag = 1;
273
        for(i = 0; i < L1_SIZE; i++) {
274
            p = l1_virt_map[i];
275
            if (p) {
276
                for(j = 0; j < L2_SIZE; j++)
277
                    p[j].valid_tag = 0;
278
            }
279
        }
280
    }
281
}
282
#else
283
static void virt_page_flush(void)
284
{
285
}
286
#endif
287

    
288
void cpu_exec_init(void)
289
{
290
    if (!code_gen_ptr) {
291
        code_gen_ptr = code_gen_buffer;
292
        page_init();
293
        io_mem_init();
294
    }
295
}
296

    
297
static inline void invalidate_page_bitmap(PageDesc *p)
298
{
299
    if (p->code_bitmap) {
300
        qemu_free(p->code_bitmap);
301
        p->code_bitmap = NULL;
302
    }
303
    p->code_write_count = 0;
304
}
305

    
306
/* set to NULL all the 'first_tb' fields in all PageDescs */
307
static void page_flush_tb(void)
308
{
309
    int i, j;
310
    PageDesc *p;
311

    
312
    for(i = 0; i < L1_SIZE; i++) {
313
        p = l1_map[i];
314
        if (p) {
315
            for(j = 0; j < L2_SIZE; j++) {
316
                p->first_tb = NULL;
317
                invalidate_page_bitmap(p);
318
                p++;
319
            }
320
        }
321
    }
322
}
323

    
324
/* flush all the translation blocks */
325
/* XXX: tb_flush is currently not thread safe */
326
void tb_flush(CPUState *env)
327
{
328
#if defined(DEBUG_FLUSH)
329
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
330
           code_gen_ptr - code_gen_buffer, 
331
           nb_tbs, 
332
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
333
#endif
334
    nb_tbs = 0;
335
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
336
    virt_page_flush();
337

    
338
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
339
    page_flush_tb();
340

    
341
    code_gen_ptr = code_gen_buffer;
342
    /* XXX: flush processor icache at this point if cache flush is
343
       expensive */
344
    tb_flush_count++;
345
}
346

    
347
#ifdef DEBUG_TB_CHECK
348

    
349
static void tb_invalidate_check(unsigned long address)
350
{
351
    TranslationBlock *tb;
352
    int i;
353
    address &= TARGET_PAGE_MASK;
354
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
355
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
356
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
357
                  address >= tb->pc + tb->size)) {
358
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
359
                       address, tb->pc, tb->size);
360
            }
361
        }
362
    }
363
}
364

    
365
/* verify that all the pages have correct rights for code */
366
static void tb_page_check(void)
367
{
368
    TranslationBlock *tb;
369
    int i, flags1, flags2;
370
    
371
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
372
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
373
            flags1 = page_get_flags(tb->pc);
374
            flags2 = page_get_flags(tb->pc + tb->size - 1);
375
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
376
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
377
                       tb->pc, tb->size, flags1, flags2);
378
            }
379
        }
380
    }
381
}
382

    
383
void tb_jmp_check(TranslationBlock *tb)
384
{
385
    TranslationBlock *tb1;
386
    unsigned int n1;
387

    
388
    /* suppress any remaining jumps to this TB */
389
    tb1 = tb->jmp_first;
390
    for(;;) {
391
        n1 = (long)tb1 & 3;
392
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
393
        if (n1 == 2)
394
            break;
395
        tb1 = tb1->jmp_next[n1];
396
    }
397
    /* check end of list */
398
    if (tb1 != tb) {
399
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
400
    }
401
}
402

    
403
#endif
404

    
405
/* invalidate one TB */
406
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
407
                             int next_offset)
408
{
409
    TranslationBlock *tb1;
410
    for(;;) {
411
        tb1 = *ptb;
412
        if (tb1 == tb) {
413
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
414
            break;
415
        }
416
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
417
    }
418
}
419

    
420
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
421
{
422
    TranslationBlock *tb1;
423
    unsigned int n1;
424

    
425
    for(;;) {
426
        tb1 = *ptb;
427
        n1 = (long)tb1 & 3;
428
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
429
        if (tb1 == tb) {
430
            *ptb = tb1->page_next[n1];
431
            break;
432
        }
433
        ptb = &tb1->page_next[n1];
434
    }
435
}
436

    
437
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
438
{
439
    TranslationBlock *tb1, **ptb;
440
    unsigned int n1;
441

    
442
    ptb = &tb->jmp_next[n];
443
    tb1 = *ptb;
444
    if (tb1) {
445
        /* find tb(n) in circular list */
446
        for(;;) {
447
            tb1 = *ptb;
448
            n1 = (long)tb1 & 3;
449
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
450
            if (n1 == n && tb1 == tb)
451
                break;
452
            if (n1 == 2) {
453
                ptb = &tb1->jmp_first;
454
            } else {
455
                ptb = &tb1->jmp_next[n1];
456
            }
457
        }
458
        /* now we can suppress tb(n) from the list */
459
        *ptb = tb->jmp_next[n];
460

    
461
        tb->jmp_next[n] = NULL;
462
    }
463
}
464

    
465
/* reset the jump entry 'n' of a TB so that it is not chained to
466
   another TB */
467
static inline void tb_reset_jump(TranslationBlock *tb, int n)
468
{
469
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
470
}
471

    
472
static inline void tb_invalidate(TranslationBlock *tb)
473
{
474
    unsigned int h, n1;
475
    TranslationBlock *tb1, *tb2, **ptb;
476
    
477
    tb_invalidated_flag = 1;
478

    
479
    /* remove the TB from the hash list */
480
    h = tb_hash_func(tb->pc);
481
    ptb = &tb_hash[h];
482
    for(;;) {
483
        tb1 = *ptb;
484
        /* NOTE: the TB is not necessarily linked in the hash. It
485
           indicates that it is not currently used */
486
        if (tb1 == NULL)
487
            return;
488
        if (tb1 == tb) {
489
            *ptb = tb1->hash_next;
490
            break;
491
        }
492
        ptb = &tb1->hash_next;
493
    }
494

    
495
    /* suppress this TB from the two jump lists */
496
    tb_jmp_remove(tb, 0);
497
    tb_jmp_remove(tb, 1);
498

    
499
    /* suppress any remaining jumps to this TB */
500
    tb1 = tb->jmp_first;
501
    for(;;) {
502
        n1 = (long)tb1 & 3;
503
        if (n1 == 2)
504
            break;
505
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
506
        tb2 = tb1->jmp_next[n1];
507
        tb_reset_jump(tb1, n1);
508
        tb1->jmp_next[n1] = NULL;
509
        tb1 = tb2;
510
    }
511
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
512
}
513

    
514
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
515
{
516
    PageDesc *p;
517
    unsigned int h;
518
    target_ulong phys_pc;
519
    
520
    /* remove the TB from the hash list */
521
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
522
    h = tb_phys_hash_func(phys_pc);
523
    tb_remove(&tb_phys_hash[h], tb, 
524
              offsetof(TranslationBlock, phys_hash_next));
525

    
526
    /* remove the TB from the page list */
527
    if (tb->page_addr[0] != page_addr) {
528
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
529
        tb_page_remove(&p->first_tb, tb);
530
        invalidate_page_bitmap(p);
531
    }
532
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
533
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
534
        tb_page_remove(&p->first_tb, tb);
535
        invalidate_page_bitmap(p);
536
    }
537

    
538
    tb_invalidate(tb);
539
    tb_phys_invalidate_count++;
540
}
541

    
542
static inline void set_bits(uint8_t *tab, int start, int len)
543
{
544
    int end, mask, end1;
545

    
546
    end = start + len;
547
    tab += start >> 3;
548
    mask = 0xff << (start & 7);
549
    if ((start & ~7) == (end & ~7)) {
550
        if (start < end) {
551
            mask &= ~(0xff << (end & 7));
552
            *tab |= mask;
553
        }
554
    } else {
555
        *tab++ |= mask;
556
        start = (start + 8) & ~7;
557
        end1 = end & ~7;
558
        while (start < end1) {
559
            *tab++ = 0xff;
560
            start += 8;
561
        }
562
        if (start < end) {
563
            mask = ~(0xff << (end & 7));
564
            *tab |= mask;
565
        }
566
    }
567
}
568

    
569
static void build_page_bitmap(PageDesc *p)
570
{
571
    int n, tb_start, tb_end;
572
    TranslationBlock *tb;
573
    
574
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
575
    if (!p->code_bitmap)
576
        return;
577
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
578

    
579
    tb = p->first_tb;
580
    while (tb != NULL) {
581
        n = (long)tb & 3;
582
        tb = (TranslationBlock *)((long)tb & ~3);
583
        /* NOTE: this is subtle as a TB may span two physical pages */
584
        if (n == 0) {
585
            /* NOTE: tb_end may be after the end of the page, but
586
               it is not a problem */
587
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
588
            tb_end = tb_start + tb->size;
589
            if (tb_end > TARGET_PAGE_SIZE)
590
                tb_end = TARGET_PAGE_SIZE;
591
        } else {
592
            tb_start = 0;
593
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
594
        }
595
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
596
        tb = tb->page_next[n];
597
    }
598
}
599

    
600
#ifdef TARGET_HAS_PRECISE_SMC
601

    
602
static void tb_gen_code(CPUState *env, 
603
                        target_ulong pc, target_ulong cs_base, int flags,
604
                        int cflags)
605
{
606
    TranslationBlock *tb;
607
    uint8_t *tc_ptr;
608
    target_ulong phys_pc, phys_page2, virt_page2;
609
    int code_gen_size;
610

    
611
    phys_pc = get_phys_addr_code(env, pc);
612
    tb = tb_alloc(pc);
613
    if (!tb) {
614
        /* flush must be done */
615
        tb_flush(env);
616
        /* cannot fail at this point */
617
        tb = tb_alloc(pc);
618
    }
619
    tc_ptr = code_gen_ptr;
620
    tb->tc_ptr = tc_ptr;
621
    tb->cs_base = cs_base;
622
    tb->flags = flags;
623
    tb->cflags = cflags;
624
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
625
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
626
    
627
    /* check next page if needed */
628
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
629
    phys_page2 = -1;
630
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
631
        phys_page2 = get_phys_addr_code(env, virt_page2);
632
    }
633
    tb_link_phys(tb, phys_pc, phys_page2);
634
}
635
#endif
636
    
637
/* invalidate all TBs which intersect with the target physical page
638
   starting in range [start;end[. NOTE: start and end must refer to
639
   the same physical page. 'is_cpu_write_access' should be true if called
640
   from a real cpu write access: the virtual CPU will exit the current
641
   TB if code is modified inside this TB. */
642
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
643
                                   int is_cpu_write_access)
644
{
645
    int n, current_tb_modified, current_tb_not_found, current_flags;
646
    CPUState *env = cpu_single_env;
647
    PageDesc *p;
648
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
649
    target_ulong tb_start, tb_end;
650
    target_ulong current_pc, current_cs_base;
651

    
652
    p = page_find(start >> TARGET_PAGE_BITS);
653
    if (!p) 
654
        return;
655
    if (!p->code_bitmap && 
656
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
657
        is_cpu_write_access) {
658
        /* build code bitmap */
659
        build_page_bitmap(p);
660
    }
661

    
662
    /* we remove all the TBs in the range [start, end[ */
663
    /* XXX: see if in some cases it could be faster to invalidate all the code */
664
    current_tb_not_found = is_cpu_write_access;
665
    current_tb_modified = 0;
666
    current_tb = NULL; /* avoid warning */
667
    current_pc = 0; /* avoid warning */
668
    current_cs_base = 0; /* avoid warning */
669
    current_flags = 0; /* avoid warning */
670
    tb = p->first_tb;
671
    while (tb != NULL) {
672
        n = (long)tb & 3;
673
        tb = (TranslationBlock *)((long)tb & ~3);
674
        tb_next = tb->page_next[n];
675
        /* NOTE: this is subtle as a TB may span two physical pages */
676
        if (n == 0) {
677
            /* NOTE: tb_end may be after the end of the page, but
678
               it is not a problem */
679
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
680
            tb_end = tb_start + tb->size;
681
        } else {
682
            tb_start = tb->page_addr[1];
683
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
684
        }
685
        if (!(tb_end <= start || tb_start >= end)) {
686
#ifdef TARGET_HAS_PRECISE_SMC
687
            if (current_tb_not_found) {
688
                current_tb_not_found = 0;
689
                current_tb = NULL;
690
                if (env->mem_write_pc) {
691
                    /* now we have a real cpu fault */
692
                    current_tb = tb_find_pc(env->mem_write_pc);
693
                }
694
            }
695
            if (current_tb == tb &&
696
                !(current_tb->cflags & CF_SINGLE_INSN)) {
697
                /* If we are modifying the current TB, we must stop
698
                its execution. We could be more precise by checking
699
                that the modification is after the current PC, but it
700
                would require a specialized function to partially
701
                restore the CPU state */
702
                
703
                current_tb_modified = 1;
704
                cpu_restore_state(current_tb, env, 
705
                                  env->mem_write_pc, NULL);
706
#if defined(TARGET_I386)
707
                current_flags = env->hflags;
708
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
709
                current_cs_base = (target_ulong)env->segs[R_CS].base;
710
                current_pc = current_cs_base + env->eip;
711
#else
712
#error unsupported CPU
713
#endif
714
            }
715
#endif /* TARGET_HAS_PRECISE_SMC */
716
            saved_tb = env->current_tb;
717
            env->current_tb = NULL;
718
            tb_phys_invalidate(tb, -1);
719
            env->current_tb = saved_tb;
720
            if (env->interrupt_request && env->current_tb)
721
                cpu_interrupt(env, env->interrupt_request);
722
        }
723
        tb = tb_next;
724
    }
725
#if !defined(CONFIG_USER_ONLY)
726
    /* if no code remaining, no need to continue to use slow writes */
727
    if (!p->first_tb) {
728
        invalidate_page_bitmap(p);
729
        if (is_cpu_write_access) {
730
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
731
        }
732
    }
733
#endif
734
#ifdef TARGET_HAS_PRECISE_SMC
735
    if (current_tb_modified) {
736
        /* we generate a block containing just the instruction
737
           modifying the memory. It will ensure that it cannot modify
738
           itself */
739
        env->current_tb = NULL;
740
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
741
                    CF_SINGLE_INSN);
742
        cpu_resume_from_signal(env, NULL);
743
    }
744
#endif
745
}
746

    
747
/* len must be <= 8 and start must be a multiple of len */
748
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
749
{
750
    PageDesc *p;
751
    int offset, b;
752
#if 0
753
    if (1) {
754
        if (loglevel) {
755
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
756
                   cpu_single_env->mem_write_vaddr, len, 
757
                   cpu_single_env->eip, 
758
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
759
        }
760
    }
761
#endif
762
    p = page_find(start >> TARGET_PAGE_BITS);
763
    if (!p) 
764
        return;
765
    if (p->code_bitmap) {
766
        offset = start & ~TARGET_PAGE_MASK;
767
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
768
        if (b & ((1 << len) - 1))
769
            goto do_invalidate;
770
    } else {
771
    do_invalidate:
772
        tb_invalidate_phys_page_range(start, start + len, 1);
773
    }
774
}
775

    
776
#if !defined(CONFIG_SOFTMMU)
777
static void tb_invalidate_phys_page(target_ulong addr, 
778
                                    unsigned long pc, void *puc)
779
{
780
    int n, current_flags, current_tb_modified;
781
    target_ulong current_pc, current_cs_base;
782
    PageDesc *p;
783
    TranslationBlock *tb, *current_tb;
784
#ifdef TARGET_HAS_PRECISE_SMC
785
    CPUState *env = cpu_single_env;
786
#endif
787

    
788
    addr &= TARGET_PAGE_MASK;
789
    p = page_find(addr >> TARGET_PAGE_BITS);
790
    if (!p) 
791
        return;
792
    tb = p->first_tb;
793
    current_tb_modified = 0;
794
    current_tb = NULL;
795
    current_pc = 0; /* avoid warning */
796
    current_cs_base = 0; /* avoid warning */
797
    current_flags = 0; /* avoid warning */
798
#ifdef TARGET_HAS_PRECISE_SMC
799
    if (tb && pc != 0) {
800
        current_tb = tb_find_pc(pc);
801
    }
802
#endif
803
    while (tb != NULL) {
804
        n = (long)tb & 3;
805
        tb = (TranslationBlock *)((long)tb & ~3);
806
#ifdef TARGET_HAS_PRECISE_SMC
807
        if (current_tb == tb &&
808
            !(current_tb->cflags & CF_SINGLE_INSN)) {
809
                /* If we are modifying the current TB, we must stop
810
                   its execution. We could be more precise by checking
811
                   that the modification is after the current PC, but it
812
                   would require a specialized function to partially
813
                   restore the CPU state */
814
            
815
            current_tb_modified = 1;
816
            cpu_restore_state(current_tb, env, pc, puc);
817
#if defined(TARGET_I386)
818
            current_flags = env->hflags;
819
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
820
            current_cs_base = (target_ulong)env->segs[R_CS].base;
821
            current_pc = current_cs_base + env->eip;
822
#else
823
#error unsupported CPU
824
#endif
825
        }
826
#endif /* TARGET_HAS_PRECISE_SMC */
827
        tb_phys_invalidate(tb, addr);
828
        tb = tb->page_next[n];
829
    }
830
    p->first_tb = NULL;
831
#ifdef TARGET_HAS_PRECISE_SMC
832
    if (current_tb_modified) {
833
        /* we generate a block containing just the instruction
834
           modifying the memory. It will ensure that it cannot modify
835
           itself */
836
        env->current_tb = NULL;
837
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
838
                    CF_SINGLE_INSN);
839
        cpu_resume_from_signal(env, puc);
840
    }
841
#endif
842
}
843
#endif
844

    
845
/* add the tb in the target page and protect it if necessary */
846
static inline void tb_alloc_page(TranslationBlock *tb, 
847
                                 unsigned int n, unsigned int page_addr)
848
{
849
    PageDesc *p;
850
    TranslationBlock *last_first_tb;
851

    
852
    tb->page_addr[n] = page_addr;
853
    p = page_find(page_addr >> TARGET_PAGE_BITS);
854
    tb->page_next[n] = p->first_tb;
855
    last_first_tb = p->first_tb;
856
    p->first_tb = (TranslationBlock *)((long)tb | n);
857
    invalidate_page_bitmap(p);
858

    
859
#if defined(TARGET_HAS_SMC) || 1
860

    
861
#if defined(CONFIG_USER_ONLY)
862
    if (p->flags & PAGE_WRITE) {
863
        unsigned long host_start, host_end, addr;
864
        int prot;
865

    
866
        /* force the host page as non writable (writes will have a
867
           page fault + mprotect overhead) */
868
        host_start = page_addr & qemu_host_page_mask;
869
        host_end = host_start + qemu_host_page_size;
870
        prot = 0;
871
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
872
            prot |= page_get_flags(addr);
873
        mprotect((void *)host_start, qemu_host_page_size, 
874
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
875
#ifdef DEBUG_TB_INVALIDATE
876
        printf("protecting code page: 0x%08lx\n", 
877
               host_start);
878
#endif
879
        p->flags &= ~PAGE_WRITE;
880
    }
881
#else
882
    /* if some code is already present, then the pages are already
883
       protected. So we handle the case where only the first TB is
884
       allocated in a physical page */
885
    if (!last_first_tb) {
886
        target_ulong virt_addr;
887

    
888
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
889
        tlb_protect_code(cpu_single_env, virt_addr);        
890
    }
891
#endif
892

    
893
#endif /* TARGET_HAS_SMC */
894
}
895

    
896
/* Allocate a new translation block. Flush the translation buffer if
897
   too many translation blocks or too much generated code. */
898
TranslationBlock *tb_alloc(target_ulong pc)
899
{
900
    TranslationBlock *tb;
901

    
902
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
903
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
904
        return NULL;
905
    tb = &tbs[nb_tbs++];
906
    tb->pc = pc;
907
    tb->cflags = 0;
908
    return tb;
909
}
910

    
911
/* add a new TB and link it to the physical page tables. phys_page2 is
912
   (-1) to indicate that only one page contains the TB. */
913
void tb_link_phys(TranslationBlock *tb, 
914
                  target_ulong phys_pc, target_ulong phys_page2)
915
{
916
    unsigned int h;
917
    TranslationBlock **ptb;
918

    
919
    /* add in the physical hash table */
920
    h = tb_phys_hash_func(phys_pc);
921
    ptb = &tb_phys_hash[h];
922
    tb->phys_hash_next = *ptb;
923
    *ptb = tb;
924

    
925
    /* add in the page list */
926
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927
    if (phys_page2 != -1)
928
        tb_alloc_page(tb, 1, phys_page2);
929
    else
930
        tb->page_addr[1] = -1;
931
#ifdef DEBUG_TB_CHECK
932
    tb_page_check();
933
#endif
934
}
935

    
936
/* link the tb with the other TBs */
937
void tb_link(TranslationBlock *tb)
938
{
939
#if !defined(CONFIG_USER_ONLY)
940
    {
941
        VirtPageDesc *vp;
942
        target_ulong addr;
943
        
944
        /* save the code memory mappings (needed to invalidate the code) */
945
        addr = tb->pc & TARGET_PAGE_MASK;
946
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
947
#ifdef DEBUG_TLB_CHECK 
948
        if (vp->valid_tag == virt_valid_tag &&
949
            vp->phys_addr != tb->page_addr[0]) {
950
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
951
                   addr, tb->page_addr[0], vp->phys_addr);
952
        }
953
#endif
954
        vp->phys_addr = tb->page_addr[0];
955
        if (vp->valid_tag != virt_valid_tag) {
956
            vp->valid_tag = virt_valid_tag;
957
#if !defined(CONFIG_SOFTMMU)
958
            vp->prot = 0;
959
#endif
960
        }
961
        
962
        if (tb->page_addr[1] != -1) {
963
            addr += TARGET_PAGE_SIZE;
964
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
965
#ifdef DEBUG_TLB_CHECK 
966
            if (vp->valid_tag == virt_valid_tag &&
967
                vp->phys_addr != tb->page_addr[1]) { 
968
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
969
                       addr, tb->page_addr[1], vp->phys_addr);
970
            }
971
#endif
972
            vp->phys_addr = tb->page_addr[1];
973
            if (vp->valid_tag != virt_valid_tag) {
974
                vp->valid_tag = virt_valid_tag;
975
#if !defined(CONFIG_SOFTMMU)
976
                vp->prot = 0;
977
#endif
978
            }
979
        }
980
    }
981
#endif
982

    
983
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
984
    tb->jmp_next[0] = NULL;
985
    tb->jmp_next[1] = NULL;
986
#ifdef USE_CODE_COPY
987
    tb->cflags &= ~CF_FP_USED;
988
    if (tb->cflags & CF_TB_FP_USED)
989
        tb->cflags |= CF_FP_USED;
990
#endif
991

    
992
    /* init original jump addresses */
993
    if (tb->tb_next_offset[0] != 0xffff)
994
        tb_reset_jump(tb, 0);
995
    if (tb->tb_next_offset[1] != 0xffff)
996
        tb_reset_jump(tb, 1);
997
}
998

    
999
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1000
   tb[1].tc_ptr. Return NULL if not found */
1001
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1002
{
1003
    int m_min, m_max, m;
1004
    unsigned long v;
1005
    TranslationBlock *tb;
1006

    
1007
    if (nb_tbs <= 0)
1008
        return NULL;
1009
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1010
        tc_ptr >= (unsigned long)code_gen_ptr)
1011
        return NULL;
1012
    /* binary search (cf Knuth) */
1013
    m_min = 0;
1014
    m_max = nb_tbs - 1;
1015
    while (m_min <= m_max) {
1016
        m = (m_min + m_max) >> 1;
1017
        tb = &tbs[m];
1018
        v = (unsigned long)tb->tc_ptr;
1019
        if (v == tc_ptr)
1020
            return tb;
1021
        else if (tc_ptr < v) {
1022
            m_max = m - 1;
1023
        } else {
1024
            m_min = m + 1;
1025
        }
1026
    } 
1027
    return &tbs[m_max];
1028
}
1029

    
1030
static void tb_reset_jump_recursive(TranslationBlock *tb);
1031

    
1032
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1033
{
1034
    TranslationBlock *tb1, *tb_next, **ptb;
1035
    unsigned int n1;
1036

    
1037
    tb1 = tb->jmp_next[n];
1038
    if (tb1 != NULL) {
1039
        /* find head of list */
1040
        for(;;) {
1041
            n1 = (long)tb1 & 3;
1042
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1043
            if (n1 == 2)
1044
                break;
1045
            tb1 = tb1->jmp_next[n1];
1046
        }
1047
        /* we are now sure now that tb jumps to tb1 */
1048
        tb_next = tb1;
1049

    
1050
        /* remove tb from the jmp_first list */
1051
        ptb = &tb_next->jmp_first;
1052
        for(;;) {
1053
            tb1 = *ptb;
1054
            n1 = (long)tb1 & 3;
1055
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1056
            if (n1 == n && tb1 == tb)
1057
                break;
1058
            ptb = &tb1->jmp_next[n1];
1059
        }
1060
        *ptb = tb->jmp_next[n];
1061
        tb->jmp_next[n] = NULL;
1062
        
1063
        /* suppress the jump to next tb in generated code */
1064
        tb_reset_jump(tb, n);
1065

    
1066
        /* suppress jumps in the tb on which we could have jumped */
1067
        tb_reset_jump_recursive(tb_next);
1068
    }
1069
}
1070

    
1071
static void tb_reset_jump_recursive(TranslationBlock *tb)
1072
{
1073
    tb_reset_jump_recursive2(tb, 0);
1074
    tb_reset_jump_recursive2(tb, 1);
1075
}
1076

    
1077
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1078
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1079
{
1080
    target_ulong phys_addr;
1081

    
1082
    phys_addr = cpu_get_phys_page_debug(env, pc);
1083
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1084
}
1085
#endif
1086

    
1087
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1088
   breakpoint is reached */
1089
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1090
{
1091
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1092
    int i;
1093
    
1094
    for(i = 0; i < env->nb_breakpoints; i++) {
1095
        if (env->breakpoints[i] == pc)
1096
            return 0;
1097
    }
1098

    
1099
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1100
        return -1;
1101
    env->breakpoints[env->nb_breakpoints++] = pc;
1102
    
1103
    breakpoint_invalidate(env, pc);
1104
    return 0;
1105
#else
1106
    return -1;
1107
#endif
1108
}
1109

    
1110
/* remove a breakpoint */
1111
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1112
{
1113
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1114
    int i;
1115
    for(i = 0; i < env->nb_breakpoints; i++) {
1116
        if (env->breakpoints[i] == pc)
1117
            goto found;
1118
    }
1119
    return -1;
1120
 found:
1121
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1122
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1123
    env->nb_breakpoints--;
1124

    
1125
    breakpoint_invalidate(env, pc);
1126
    return 0;
1127
#else
1128
    return -1;
1129
#endif
1130
}
1131

    
1132
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1133
   CPU loop after each instruction */
1134
void cpu_single_step(CPUState *env, int enabled)
1135
{
1136
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1137
    if (env->singlestep_enabled != enabled) {
1138
        env->singlestep_enabled = enabled;
1139
        /* must flush all the translated code to avoid inconsistancies */
1140
        /* XXX: only flush what is necessary */
1141
        tb_flush(env);
1142
    }
1143
#endif
1144
}
1145

    
1146
/* enable or disable low levels log */
1147
void cpu_set_log(int log_flags)
1148
{
1149
    loglevel = log_flags;
1150
    if (loglevel && !logfile) {
1151
        logfile = fopen(logfilename, "w");
1152
        if (!logfile) {
1153
            perror(logfilename);
1154
            _exit(1);
1155
        }
1156
#if !defined(CONFIG_SOFTMMU)
1157
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1158
        {
1159
            static uint8_t logfile_buf[4096];
1160
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1161
        }
1162
#else
1163
        setvbuf(logfile, NULL, _IOLBF, 0);
1164
#endif
1165
    }
1166
}
1167

    
1168
void cpu_set_log_filename(const char *filename)
1169
{
1170
    logfilename = strdup(filename);
1171
}
1172

    
1173
/* mask must never be zero, except for A20 change call */
1174
void cpu_interrupt(CPUState *env, int mask)
1175
{
1176
    TranslationBlock *tb;
1177
    static int interrupt_lock;
1178

    
1179
    env->interrupt_request |= mask;
1180
    /* if the cpu is currently executing code, we must unlink it and
1181
       all the potentially executing TB */
1182
    tb = env->current_tb;
1183
    if (tb && !testandset(&interrupt_lock)) {
1184
        env->current_tb = NULL;
1185
        tb_reset_jump_recursive(tb);
1186
        interrupt_lock = 0;
1187
    }
1188
}
1189

    
1190
void cpu_reset_interrupt(CPUState *env, int mask)
1191
{
1192
    env->interrupt_request &= ~mask;
1193
}
1194

    
1195
CPULogItem cpu_log_items[] = {
1196
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1197
      "show generated host assembly code for each compiled TB" },
1198
    { CPU_LOG_TB_IN_ASM, "in_asm",
1199
      "show target assembly code for each compiled TB" },
1200
    { CPU_LOG_TB_OP, "op", 
1201
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1202
#ifdef TARGET_I386
1203
    { CPU_LOG_TB_OP_OPT, "op_opt",
1204
      "show micro ops after optimization for each compiled TB" },
1205
#endif
1206
    { CPU_LOG_INT, "int",
1207
      "show interrupts/exceptions in short format" },
1208
    { CPU_LOG_EXEC, "exec",
1209
      "show trace before each executed TB (lots of logs)" },
1210
    { CPU_LOG_TB_CPU, "cpu",
1211
      "show CPU state before bloc translation" },
1212
#ifdef TARGET_I386
1213
    { CPU_LOG_PCALL, "pcall",
1214
      "show protected mode far calls/returns/exceptions" },
1215
#endif
1216
#ifdef DEBUG_IOPORT
1217
    { CPU_LOG_IOPORT, "ioport",
1218
      "show all i/o ports accesses" },
1219
#endif
1220
    { 0, NULL, NULL },
1221
};
1222

    
1223
static int cmp1(const char *s1, int n, const char *s2)
1224
{
1225
    if (strlen(s2) != n)
1226
        return 0;
1227
    return memcmp(s1, s2, n) == 0;
1228
}
1229
      
1230
/* takes a comma separated list of log masks. Return 0 if error. */
1231
int cpu_str_to_log_mask(const char *str)
1232
{
1233
    CPULogItem *item;
1234
    int mask;
1235
    const char *p, *p1;
1236

    
1237
    p = str;
1238
    mask = 0;
1239
    for(;;) {
1240
        p1 = strchr(p, ',');
1241
        if (!p1)
1242
            p1 = p + strlen(p);
1243
        if(cmp1(p,p1-p,"all")) {
1244
                for(item = cpu_log_items; item->mask != 0; item++) {
1245
                        mask |= item->mask;
1246
                }
1247
        } else {
1248
        for(item = cpu_log_items; item->mask != 0; item++) {
1249
            if (cmp1(p, p1 - p, item->name))
1250
                goto found;
1251
        }
1252
        return 0;
1253
        }
1254
    found:
1255
        mask |= item->mask;
1256
        if (*p1 != ',')
1257
            break;
1258
        p = p1 + 1;
1259
    }
1260
    return mask;
1261
}
1262

    
1263
void cpu_abort(CPUState *env, const char *fmt, ...)
1264
{
1265
    va_list ap;
1266

    
1267
    va_start(ap, fmt);
1268
    fprintf(stderr, "qemu: fatal: ");
1269
    vfprintf(stderr, fmt, ap);
1270
    fprintf(stderr, "\n");
1271
#ifdef TARGET_I386
1272
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1273
#else
1274
    cpu_dump_state(env, stderr, fprintf, 0);
1275
#endif
1276
    va_end(ap);
1277
    abort();
1278
}
1279

    
1280
#if !defined(CONFIG_USER_ONLY)
1281

    
1282
/* NOTE: if flush_global is true, also flush global entries (not
1283
   implemented yet) */
1284
void tlb_flush(CPUState *env, int flush_global)
1285
{
1286
    int i;
1287

    
1288
#if defined(DEBUG_TLB)
1289
    printf("tlb_flush:\n");
1290
#endif
1291
    /* must reset current TB so that interrupts cannot modify the
1292
       links while we are modifying them */
1293
    env->current_tb = NULL;
1294

    
1295
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1296
        env->tlb_read[0][i].address = -1;
1297
        env->tlb_write[0][i].address = -1;
1298
        env->tlb_read[1][i].address = -1;
1299
        env->tlb_write[1][i].address = -1;
1300
    }
1301

    
1302
    virt_page_flush();
1303
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1304

    
1305
#if !defined(CONFIG_SOFTMMU)
1306
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1307
#endif
1308
    tlb_flush_count++;
1309
}
1310

    
1311
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1312
{
1313
    if (addr == (tlb_entry->address & 
1314
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1315
        tlb_entry->address = -1;
1316
}
1317

    
1318
void tlb_flush_page(CPUState *env, target_ulong addr)
1319
{
1320
    int i, n;
1321
    VirtPageDesc *vp;
1322
    PageDesc *p;
1323
    TranslationBlock *tb;
1324

    
1325
#if defined(DEBUG_TLB)
1326
    printf("tlb_flush_page: 0x%08x\n", addr);
1327
#endif
1328
    /* must reset current TB so that interrupts cannot modify the
1329
       links while we are modifying them */
1330
    env->current_tb = NULL;
1331

    
1332
    addr &= TARGET_PAGE_MASK;
1333
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1334
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1335
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1336
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1337
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1338

    
1339
    /* remove from the virtual pc hash table all the TB at this
1340
       virtual address */
1341
    
1342
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1343
    if (vp && vp->valid_tag == virt_valid_tag) {
1344
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1345
        if (p) {
1346
            /* we remove all the links to the TBs in this virtual page */
1347
            tb = p->first_tb;
1348
            while (tb != NULL) {
1349
                n = (long)tb & 3;
1350
                tb = (TranslationBlock *)((long)tb & ~3);
1351
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1352
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1353
                    tb_invalidate(tb);
1354
                }
1355
                tb = tb->page_next[n];
1356
            }
1357
        }
1358
        vp->valid_tag = 0;
1359
    }
1360

    
1361
#if !defined(CONFIG_SOFTMMU)
1362
    if (addr < MMAP_AREA_END)
1363
        munmap((void *)addr, TARGET_PAGE_SIZE);
1364
#endif
1365
}
1366

    
1367
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1368
{
1369
    if (addr == (tlb_entry->address & 
1370
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1371
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1372
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1373
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1374
    }
1375
}
1376

    
1377
/* update the TLBs so that writes to code in the virtual page 'addr'
1378
   can be detected */
1379
static void tlb_protect_code(CPUState *env, target_ulong addr)
1380
{
1381
    int i;
1382

    
1383
    addr &= TARGET_PAGE_MASK;
1384
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1385
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1386
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1387
#if !defined(CONFIG_SOFTMMU)
1388
    /* NOTE: as we generated the code for this page, it is already at
1389
       least readable */
1390
    if (addr < MMAP_AREA_END)
1391
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1392
#endif
1393
}
1394

    
1395
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1396
                                       unsigned long phys_addr)
1397
{
1398
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1399
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1400
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1401
    }
1402
}
1403

    
1404
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1405
   tested self modifying code */
1406
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1407
{
1408
    int i;
1409

    
1410
    phys_addr &= TARGET_PAGE_MASK;
1411
    phys_addr += (long)phys_ram_base;
1412
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1413
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1414
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1415
}
1416

    
1417
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1418
                                         unsigned long start, unsigned long length)
1419
{
1420
    unsigned long addr;
1421
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1422
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1423
        if ((addr - start) < length) {
1424
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1425
        }
1426
    }
1427
}
1428

    
1429
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1430
{
1431
    CPUState *env;
1432
    unsigned long length, start1;
1433
    int i;
1434

    
1435
    start &= TARGET_PAGE_MASK;
1436
    end = TARGET_PAGE_ALIGN(end);
1437

    
1438
    length = end - start;
1439
    if (length == 0)
1440
        return;
1441
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1442

    
1443
    env = cpu_single_env;
1444
    /* we modify the TLB cache so that the dirty bit will be set again
1445
       when accessing the range */
1446
    start1 = start + (unsigned long)phys_ram_base;
1447
    for(i = 0; i < CPU_TLB_SIZE; i++)
1448
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1449
    for(i = 0; i < CPU_TLB_SIZE; i++)
1450
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1451

    
1452
#if !defined(CONFIG_SOFTMMU)
1453
    /* XXX: this is expensive */
1454
    {
1455
        VirtPageDesc *p;
1456
        int j;
1457
        target_ulong addr;
1458

    
1459
        for(i = 0; i < L1_SIZE; i++) {
1460
            p = l1_virt_map[i];
1461
            if (p) {
1462
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1463
                for(j = 0; j < L2_SIZE; j++) {
1464
                    if (p->valid_tag == virt_valid_tag &&
1465
                        p->phys_addr >= start && p->phys_addr < end &&
1466
                        (p->prot & PROT_WRITE)) {
1467
                        if (addr < MMAP_AREA_END) {
1468
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1469
                                     p->prot & ~PROT_WRITE);
1470
                        }
1471
                    }
1472
                    addr += TARGET_PAGE_SIZE;
1473
                    p++;
1474
                }
1475
            }
1476
        }
1477
    }
1478
#endif
1479
}
1480

    
1481
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1482
                                    unsigned long start)
1483
{
1484
    unsigned long addr;
1485
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1486
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1487
        if (addr == start) {
1488
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1489
        }
1490
    }
1491
}
1492

    
1493
/* update the TLB corresponding to virtual page vaddr and phys addr
1494
   addr so that it is no longer dirty */
1495
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1496
{
1497
    CPUState *env = cpu_single_env;
1498
    int i;
1499

    
1500
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1501

    
1502
    addr &= TARGET_PAGE_MASK;
1503
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1504
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1505
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1506
}
1507

    
1508
/* add a new TLB entry. At most one entry for a given virtual address
1509
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1510
   (can only happen in non SOFTMMU mode for I/O pages or pages
1511
   conflicting with the host address space). */
1512
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1513
                 target_phys_addr_t paddr, int prot, 
1514
                 int is_user, int is_softmmu)
1515
{
1516
    PhysPageDesc *p;
1517
    unsigned long pd;
1518
    TranslationBlock *first_tb;
1519
    unsigned int index;
1520
    target_ulong address;
1521
    unsigned long addend;
1522
    int ret;
1523

    
1524
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1525
    first_tb = NULL;
1526
    if (!p) {
1527
        pd = IO_MEM_UNASSIGNED;
1528
    } else {
1529
        PageDesc *p1;
1530
        pd = p->phys_offset;
1531
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1532
            /* NOTE: we also allocate the page at this stage */
1533
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1534
            first_tb = p1->first_tb;
1535
        }
1536
    }
1537
#if defined(DEBUG_TLB)
1538
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1539
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1540
#endif
1541

    
1542
    ret = 0;
1543
#if !defined(CONFIG_SOFTMMU)
1544
    if (is_softmmu) 
1545
#endif
1546
    {
1547
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1548
            /* IO memory case */
1549
            address = vaddr | pd;
1550
            addend = paddr;
1551
        } else {
1552
            /* standard memory */
1553
            address = vaddr;
1554
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1555
        }
1556
        
1557
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1558
        addend -= vaddr;
1559
        if (prot & PAGE_READ) {
1560
            env->tlb_read[is_user][index].address = address;
1561
            env->tlb_read[is_user][index].addend = addend;
1562
        } else {
1563
            env->tlb_read[is_user][index].address = -1;
1564
            env->tlb_read[is_user][index].addend = -1;
1565
        }
1566
        if (prot & PAGE_WRITE) {
1567
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1568
                /* ROM: access is ignored (same as unassigned) */
1569
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1570
                env->tlb_write[is_user][index].addend = addend;
1571
            } else 
1572
                /* XXX: the PowerPC code seems not ready to handle
1573
                   self modifying code with DCBI */
1574
#if defined(TARGET_HAS_SMC) || 1
1575
            if (first_tb) {
1576
                /* if code is present, we use a specific memory
1577
                   handler. It works only for physical memory access */
1578
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1579
                env->tlb_write[is_user][index].addend = addend;
1580
            } else 
1581
#endif
1582
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1583
                       !cpu_physical_memory_is_dirty(pd)) {
1584
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1585
                env->tlb_write[is_user][index].addend = addend;
1586
            } else {
1587
                env->tlb_write[is_user][index].address = address;
1588
                env->tlb_write[is_user][index].addend = addend;
1589
            }
1590
        } else {
1591
            env->tlb_write[is_user][index].address = -1;
1592
            env->tlb_write[is_user][index].addend = -1;
1593
        }
1594
    }
1595
#if !defined(CONFIG_SOFTMMU)
1596
    else {
1597
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1598
            /* IO access: no mapping is done as it will be handled by the
1599
               soft MMU */
1600
            if (!(env->hflags & HF_SOFTMMU_MASK))
1601
                ret = 2;
1602
        } else {
1603
            void *map_addr;
1604

    
1605
            if (vaddr >= MMAP_AREA_END) {
1606
                ret = 2;
1607
            } else {
1608
                if (prot & PROT_WRITE) {
1609
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1610
#if defined(TARGET_HAS_SMC) || 1
1611
                        first_tb ||
1612
#endif
1613
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1614
                         !cpu_physical_memory_is_dirty(pd))) {
1615
                        /* ROM: we do as if code was inside */
1616
                        /* if code is present, we only map as read only and save the
1617
                           original mapping */
1618
                        VirtPageDesc *vp;
1619
                        
1620
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1621
                        vp->phys_addr = pd;
1622
                        vp->prot = prot;
1623
                        vp->valid_tag = virt_valid_tag;
1624
                        prot &= ~PAGE_WRITE;
1625
                    }
1626
                }
1627
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1628
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1629
                if (map_addr == MAP_FAILED) {
1630
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1631
                              paddr, vaddr);
1632
                }
1633
            }
1634
        }
1635
    }
1636
#endif
1637
    return ret;
1638
}
1639

    
1640
/* called from signal handler: invalidate the code and unprotect the
1641
   page. Return TRUE if the fault was succesfully handled. */
1642
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1643
{
1644
#if !defined(CONFIG_SOFTMMU)
1645
    VirtPageDesc *vp;
1646

    
1647
#if defined(DEBUG_TLB)
1648
    printf("page_unprotect: addr=0x%08x\n", addr);
1649
#endif
1650
    addr &= TARGET_PAGE_MASK;
1651

    
1652
    /* if it is not mapped, no need to worry here */
1653
    if (addr >= MMAP_AREA_END)
1654
        return 0;
1655
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1656
    if (!vp)
1657
        return 0;
1658
    /* NOTE: in this case, validate_tag is _not_ tested as it
1659
       validates only the code TLB */
1660
    if (vp->valid_tag != virt_valid_tag)
1661
        return 0;
1662
    if (!(vp->prot & PAGE_WRITE))
1663
        return 0;
1664
#if defined(DEBUG_TLB)
1665
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1666
           addr, vp->phys_addr, vp->prot);
1667
#endif
1668
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1669
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1670
                  (unsigned long)addr, vp->prot);
1671
    /* set the dirty bit */
1672
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1673
    /* flush the code inside */
1674
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1675
    return 1;
1676
#else
1677
    return 0;
1678
#endif
1679
}
1680

    
1681
#else
1682

    
1683
void tlb_flush(CPUState *env, int flush_global)
1684
{
1685
}
1686

    
1687
void tlb_flush_page(CPUState *env, target_ulong addr)
1688
{
1689
}
1690

    
1691
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1692
                 target_phys_addr_t paddr, int prot, 
1693
                 int is_user, int is_softmmu)
1694
{
1695
    return 0;
1696
}
1697

    
1698
/* dump memory mappings */
1699
void page_dump(FILE *f)
1700
{
1701
    unsigned long start, end;
1702
    int i, j, prot, prot1;
1703
    PageDesc *p;
1704

    
1705
    fprintf(f, "%-8s %-8s %-8s %s\n",
1706
            "start", "end", "size", "prot");
1707
    start = -1;
1708
    end = -1;
1709
    prot = 0;
1710
    for(i = 0; i <= L1_SIZE; i++) {
1711
        if (i < L1_SIZE)
1712
            p = l1_map[i];
1713
        else
1714
            p = NULL;
1715
        for(j = 0;j < L2_SIZE; j++) {
1716
            if (!p)
1717
                prot1 = 0;
1718
            else
1719
                prot1 = p[j].flags;
1720
            if (prot1 != prot) {
1721
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1722
                if (start != -1) {
1723
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1724
                            start, end, end - start, 
1725
                            prot & PAGE_READ ? 'r' : '-',
1726
                            prot & PAGE_WRITE ? 'w' : '-',
1727
                            prot & PAGE_EXEC ? 'x' : '-');
1728
                }
1729
                if (prot1 != 0)
1730
                    start = end;
1731
                else
1732
                    start = -1;
1733
                prot = prot1;
1734
            }
1735
            if (!p)
1736
                break;
1737
        }
1738
    }
1739
}
1740

    
1741
int page_get_flags(unsigned long address)
1742
{
1743
    PageDesc *p;
1744

    
1745
    p = page_find(address >> TARGET_PAGE_BITS);
1746
    if (!p)
1747
        return 0;
1748
    return p->flags;
1749
}
1750

    
1751
/* modify the flags of a page and invalidate the code if
1752
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1753
   depending on PAGE_WRITE */
1754
void page_set_flags(unsigned long start, unsigned long end, int flags)
1755
{
1756
    PageDesc *p;
1757
    unsigned long addr;
1758

    
1759
    start = start & TARGET_PAGE_MASK;
1760
    end = TARGET_PAGE_ALIGN(end);
1761
    if (flags & PAGE_WRITE)
1762
        flags |= PAGE_WRITE_ORG;
1763
    spin_lock(&tb_lock);
1764
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1765
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1766
        /* if the write protection is set, then we invalidate the code
1767
           inside */
1768
        if (!(p->flags & PAGE_WRITE) && 
1769
            (flags & PAGE_WRITE) &&
1770
            p->first_tb) {
1771
            tb_invalidate_phys_page(addr, 0, NULL);
1772
        }
1773
        p->flags = flags;
1774
    }
1775
    spin_unlock(&tb_lock);
1776
}
1777

    
1778
/* called from signal handler: invalidate the code and unprotect the
1779
   page. Return TRUE if the fault was succesfully handled. */
1780
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1781
{
1782
    unsigned int page_index, prot, pindex;
1783
    PageDesc *p, *p1;
1784
    unsigned long host_start, host_end, addr;
1785

    
1786
    host_start = address & qemu_host_page_mask;
1787
    page_index = host_start >> TARGET_PAGE_BITS;
1788
    p1 = page_find(page_index);
1789
    if (!p1)
1790
        return 0;
1791
    host_end = host_start + qemu_host_page_size;
1792
    p = p1;
1793
    prot = 0;
1794
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1795
        prot |= p->flags;
1796
        p++;
1797
    }
1798
    /* if the page was really writable, then we change its
1799
       protection back to writable */
1800
    if (prot & PAGE_WRITE_ORG) {
1801
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1802
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1803
            mprotect((void *)host_start, qemu_host_page_size, 
1804
                     (prot & PAGE_BITS) | PAGE_WRITE);
1805
            p1[pindex].flags |= PAGE_WRITE;
1806
            /* and since the content will be modified, we must invalidate
1807
               the corresponding translated code. */
1808
            tb_invalidate_phys_page(address, pc, puc);
1809
#ifdef DEBUG_TB_CHECK
1810
            tb_invalidate_check(address);
1811
#endif
1812
            return 1;
1813
        }
1814
    }
1815
    return 0;
1816
}
1817

    
1818
/* call this function when system calls directly modify a memory area */
1819
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1820
{
1821
    unsigned long start, end, addr;
1822

    
1823
    start = (unsigned long)data;
1824
    end = start + data_size;
1825
    start &= TARGET_PAGE_MASK;
1826
    end = TARGET_PAGE_ALIGN(end);
1827
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1828
        page_unprotect(addr, 0, NULL);
1829
    }
1830
}
1831

    
1832
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1833
{
1834
}
1835
#endif /* defined(CONFIG_USER_ONLY) */
1836

    
1837
/* register physical memory. 'size' must be a multiple of the target
1838
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1839
   io memory page */
1840
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1841
                                  unsigned long size,
1842
                                  unsigned long phys_offset)
1843
{
1844
    unsigned long addr, end_addr;
1845
    PhysPageDesc *p;
1846

    
1847
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1848
    end_addr = start_addr + size;
1849
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1850
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1851
        p->phys_offset = phys_offset;
1852
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1853
            phys_offset += TARGET_PAGE_SIZE;
1854
    }
1855
}
1856

    
1857
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1858
{
1859
    return 0;
1860
}
1861

    
1862
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1863
{
1864
}
1865

    
1866
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1867
    unassigned_mem_readb,
1868
    unassigned_mem_readb,
1869
    unassigned_mem_readb,
1870
};
1871

    
1872
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1873
    unassigned_mem_writeb,
1874
    unassigned_mem_writeb,
1875
    unassigned_mem_writeb,
1876
};
1877

    
1878
/* self modifying code support in soft mmu mode : writing to a page
1879
   containing code comes to these functions */
1880

    
1881
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1882
{
1883
    unsigned long phys_addr;
1884

    
1885
    phys_addr = addr - (unsigned long)phys_ram_base;
1886
#if !defined(CONFIG_USER_ONLY)
1887
    tb_invalidate_phys_page_fast(phys_addr, 1);
1888
#endif
1889
    stb_p((uint8_t *)(long)addr, val);
1890
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1891
}
1892

    
1893
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1894
{
1895
    unsigned long phys_addr;
1896

    
1897
    phys_addr = addr - (unsigned long)phys_ram_base;
1898
#if !defined(CONFIG_USER_ONLY)
1899
    tb_invalidate_phys_page_fast(phys_addr, 2);
1900
#endif
1901
    stw_p((uint8_t *)(long)addr, val);
1902
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1903
}
1904

    
1905
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1906
{
1907
    unsigned long phys_addr;
1908

    
1909
    phys_addr = addr - (unsigned long)phys_ram_base;
1910
#if !defined(CONFIG_USER_ONLY)
1911
    tb_invalidate_phys_page_fast(phys_addr, 4);
1912
#endif
1913
    stl_p((uint8_t *)(long)addr, val);
1914
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1915
}
1916

    
1917
static CPUReadMemoryFunc *code_mem_read[3] = {
1918
    NULL, /* never used */
1919
    NULL, /* never used */
1920
    NULL, /* never used */
1921
};
1922

    
1923
static CPUWriteMemoryFunc *code_mem_write[3] = {
1924
    code_mem_writeb,
1925
    code_mem_writew,
1926
    code_mem_writel,
1927
};
1928

    
1929
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1930
{
1931
    stb_p((uint8_t *)(long)addr, val);
1932
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1933
}
1934

    
1935
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1936
{
1937
    stw_p((uint8_t *)(long)addr, val);
1938
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1939
}
1940

    
1941
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1942
{
1943
    stl_p((uint8_t *)(long)addr, val);
1944
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1945
}
1946

    
1947
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1948
    notdirty_mem_writeb,
1949
    notdirty_mem_writew,
1950
    notdirty_mem_writel,
1951
};
1952

    
1953
static void io_mem_init(void)
1954
{
1955
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1956
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1957
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1958
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1959
    io_mem_nb = 5;
1960

    
1961
    /* alloc dirty bits array */
1962
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1963
}
1964

    
1965
/* mem_read and mem_write are arrays of functions containing the
1966
   function to access byte (index 0), word (index 1) and dword (index
1967
   2). All functions must be supplied. If io_index is non zero, the
1968
   corresponding io zone is modified. If it is zero, a new io zone is
1969
   allocated. The return value can be used with
1970
   cpu_register_physical_memory(). (-1) is returned if error. */
1971
int cpu_register_io_memory(int io_index,
1972
                           CPUReadMemoryFunc **mem_read,
1973
                           CPUWriteMemoryFunc **mem_write,
1974
                           void *opaque)
1975
{
1976
    int i;
1977

    
1978
    if (io_index <= 0) {
1979
        if (io_index >= IO_MEM_NB_ENTRIES)
1980
            return -1;
1981
        io_index = io_mem_nb++;
1982
    } else {
1983
        if (io_index >= IO_MEM_NB_ENTRIES)
1984
            return -1;
1985
    }
1986
    
1987
    for(i = 0;i < 3; i++) {
1988
        io_mem_read[io_index][i] = mem_read[i];
1989
        io_mem_write[io_index][i] = mem_write[i];
1990
    }
1991
    io_mem_opaque[io_index] = opaque;
1992
    return io_index << IO_MEM_SHIFT;
1993
}
1994

    
1995
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1996
{
1997
    return io_mem_write[io_index >> IO_MEM_SHIFT];
1998
}
1999

    
2000
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2001
{
2002
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2003
}
2004

    
2005
/* physical memory access (slow version, mainly for debug) */
2006
#if defined(CONFIG_USER_ONLY)
2007
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2008
                            int len, int is_write)
2009
{
2010
    int l, flags;
2011
    target_ulong page;
2012

    
2013
    while (len > 0) {
2014
        page = addr & TARGET_PAGE_MASK;
2015
        l = (page + TARGET_PAGE_SIZE) - addr;
2016
        if (l > len)
2017
            l = len;
2018
        flags = page_get_flags(page);
2019
        if (!(flags & PAGE_VALID))
2020
            return;
2021
        if (is_write) {
2022
            if (!(flags & PAGE_WRITE))
2023
                return;
2024
            memcpy((uint8_t *)addr, buf, len);
2025
        } else {
2026
            if (!(flags & PAGE_READ))
2027
                return;
2028
            memcpy(buf, (uint8_t *)addr, len);
2029
        }
2030
        len -= l;
2031
        buf += l;
2032
        addr += l;
2033
    }
2034
}
2035

    
2036
/* never used */
2037
uint32_t ldl_phys(target_phys_addr_t addr)
2038
{
2039
    return 0;
2040
}
2041

    
2042
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2043
{
2044
}
2045

    
2046
void stl_phys(target_phys_addr_t addr, uint32_t val)
2047
{
2048
}
2049

    
2050
#else
2051
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2052
                            int len, int is_write)
2053
{
2054
    int l, io_index;
2055
    uint8_t *ptr;
2056
    uint32_t val;
2057
    target_phys_addr_t page;
2058
    unsigned long pd;
2059
    PhysPageDesc *p;
2060
    
2061
    while (len > 0) {
2062
        page = addr & TARGET_PAGE_MASK;
2063
        l = (page + TARGET_PAGE_SIZE) - addr;
2064
        if (l > len)
2065
            l = len;
2066
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2067
        if (!p) {
2068
            pd = IO_MEM_UNASSIGNED;
2069
        } else {
2070
            pd = p->phys_offset;
2071
        }
2072
        
2073
        if (is_write) {
2074
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2075
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2076
                if (l >= 4 && ((addr & 3) == 0)) {
2077
                    /* 32 bit read access */
2078
                    val = ldl_p(buf);
2079
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2080
                    l = 4;
2081
                } else if (l >= 2 && ((addr & 1) == 0)) {
2082
                    /* 16 bit read access */
2083
                    val = lduw_p(buf);
2084
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2085
                    l = 2;
2086
                } else {
2087
                    /* 8 bit access */
2088
                    val = ldub_p(buf);
2089
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2090
                    l = 1;
2091
                }
2092
            } else {
2093
                unsigned long addr1;
2094
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2095
                /* RAM case */
2096
                ptr = phys_ram_base + addr1;
2097
                memcpy(ptr, buf, l);
2098
                /* invalidate code */
2099
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2100
                /* set dirty bit */
2101
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;                
2102
            }
2103
        } else {
2104
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2105
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2106
                /* I/O case */
2107
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2108
                if (l >= 4 && ((addr & 3) == 0)) {
2109
                    /* 32 bit read access */
2110
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2111
                    stl_p(buf, val);
2112
                    l = 4;
2113
                } else if (l >= 2 && ((addr & 1) == 0)) {
2114
                    /* 16 bit read access */
2115
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2116
                    stw_p(buf, val);
2117
                    l = 2;
2118
                } else {
2119
                    /* 8 bit access */
2120
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2121
                    stb_p(buf, val);
2122
                    l = 1;
2123
                }
2124
            } else {
2125
                /* RAM case */
2126
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2127
                    (addr & ~TARGET_PAGE_MASK);
2128
                memcpy(buf, ptr, l);
2129
            }
2130
        }
2131
        len -= l;
2132
        buf += l;
2133
        addr += l;
2134
    }
2135
}
2136

    
2137
/* warning: addr must be aligned */
2138
uint32_t ldl_phys(target_phys_addr_t addr)
2139
{
2140
    int io_index;
2141
    uint8_t *ptr;
2142
    uint32_t val;
2143
    unsigned long pd;
2144
    PhysPageDesc *p;
2145

    
2146
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2147
    if (!p) {
2148
        pd = IO_MEM_UNASSIGNED;
2149
    } else {
2150
        pd = p->phys_offset;
2151
    }
2152
        
2153
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2154
        (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2155
        /* I/O case */
2156
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2157
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2158
    } else {
2159
        /* RAM case */
2160
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2161
            (addr & ~TARGET_PAGE_MASK);
2162
        val = ldl_p(ptr);
2163
    }
2164
    return val;
2165
}
2166

    
2167
/* warning: addr must be aligned. The ram page is not masked as dirty
2168
   and the code inside is not invalidated. It is useful if the dirty
2169
   bits are used to track modified PTEs */
2170
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2171
{
2172
    int io_index;
2173
    uint8_t *ptr;
2174
    unsigned long pd;
2175
    PhysPageDesc *p;
2176

    
2177
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2178
    if (!p) {
2179
        pd = IO_MEM_UNASSIGNED;
2180
    } else {
2181
        pd = p->phys_offset;
2182
    }
2183
        
2184
    if ((pd & ~TARGET_PAGE_MASK) != 0) {
2185
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2186
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2187
    } else {
2188
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2189
            (addr & ~TARGET_PAGE_MASK);
2190
        stl_p(ptr, val);
2191
    }
2192
}
2193

    
2194
/* warning: addr must be aligned */
2195
/* XXX: optimize code invalidation test */
2196
void stl_phys(target_phys_addr_t addr, uint32_t val)
2197
{
2198
    int io_index;
2199
    uint8_t *ptr;
2200
    unsigned long pd;
2201
    PhysPageDesc *p;
2202

    
2203
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2204
    if (!p) {
2205
        pd = IO_MEM_UNASSIGNED;
2206
    } else {
2207
        pd = p->phys_offset;
2208
    }
2209
        
2210
    if ((pd & ~TARGET_PAGE_MASK) != 0) {
2211
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2212
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2213
    } else {
2214
        unsigned long addr1;
2215
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2216
        /* RAM case */
2217
        ptr = phys_ram_base + addr1;
2218
        stl_p(ptr, val);
2219
        /* invalidate code */
2220
        tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2221
        /* set dirty bit */
2222
        phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;
2223
    }
2224
}
2225

    
2226
#endif
2227

    
2228
/* virtual memory access for debug */
2229
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2230
                        uint8_t *buf, int len, int is_write)
2231
{
2232
    int l;
2233
    target_ulong page, phys_addr;
2234

    
2235
    while (len > 0) {
2236
        page = addr & TARGET_PAGE_MASK;
2237
        phys_addr = cpu_get_phys_page_debug(env, page);
2238
        /* if no physical page mapped, return an error */
2239
        if (phys_addr == -1)
2240
            return -1;
2241
        l = (page + TARGET_PAGE_SIZE) - addr;
2242
        if (l > len)
2243
            l = len;
2244
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2245
                               buf, l, is_write);
2246
        len -= l;
2247
        buf += l;
2248
        addr += l;
2249
    }
2250
    return 0;
2251
}
2252

    
2253
void dump_exec_info(FILE *f,
2254
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2255
{
2256
    int i, target_code_size, max_target_code_size;
2257
    int direct_jmp_count, direct_jmp2_count, cross_page;
2258
    TranslationBlock *tb;
2259
    
2260
    target_code_size = 0;
2261
    max_target_code_size = 0;
2262
    cross_page = 0;
2263
    direct_jmp_count = 0;
2264
    direct_jmp2_count = 0;
2265
    for(i = 0; i < nb_tbs; i++) {
2266
        tb = &tbs[i];
2267
        target_code_size += tb->size;
2268
        if (tb->size > max_target_code_size)
2269
            max_target_code_size = tb->size;
2270
        if (tb->page_addr[1] != -1)
2271
            cross_page++;
2272
        if (tb->tb_next_offset[0] != 0xffff) {
2273
            direct_jmp_count++;
2274
            if (tb->tb_next_offset[1] != 0xffff) {
2275
                direct_jmp2_count++;
2276
            }
2277
        }
2278
    }
2279
    /* XXX: avoid using doubles ? */
2280
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2281
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2282
                nb_tbs ? target_code_size / nb_tbs : 0,
2283
                max_target_code_size);
2284
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2285
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2286
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2287
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2288
            cross_page, 
2289
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2290
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2291
                direct_jmp_count, 
2292
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2293
                direct_jmp2_count,
2294
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2295
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2296
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2297
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2298
}
2299

    
2300
#if !defined(CONFIG_USER_ONLY) 
2301

    
2302
#define MMUSUFFIX _cmmu
2303
#define GETPC() NULL
2304
#define env cpu_single_env
2305
#define SOFTMMU_CODE_ACCESS
2306

    
2307
#define SHIFT 0
2308
#include "softmmu_template.h"
2309

    
2310
#define SHIFT 1
2311
#include "softmmu_template.h"
2312

    
2313
#define SHIFT 2
2314
#include "softmmu_template.h"
2315

    
2316
#define SHIFT 3
2317
#include "softmmu_template.h"
2318

    
2319
#undef env
2320

    
2321
#endif