Statistics
| Branch: | Revision:

root / exec.c @ 14ce26e7

History | View | Annotate | Download (63.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57
int nb_tbs;
58
/* any access to the tbs or the page table must use this lock */
59
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
60

    
61
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62
uint8_t *code_gen_ptr;
63

    
64
int phys_ram_size;
65
int phys_ram_fd;
66
uint8_t *phys_ram_base;
67
uint8_t *phys_ram_dirty;
68

    
69
typedef struct PageDesc {
70
    /* list of TBs intersecting this ram page */
71
    TranslationBlock *first_tb;
72
    /* in order to optimize self modifying code, we count the number
73
       of lookups we do to a given page to use a bitmap */
74
    unsigned int code_write_count;
75
    uint8_t *code_bitmap;
76
#if defined(CONFIG_USER_ONLY)
77
    unsigned long flags;
78
#endif
79
} PageDesc;
80

    
81
typedef struct PhysPageDesc {
82
    /* offset in host memory of the page + io_index in the low 12 bits */
83
    unsigned long phys_offset;
84
} PhysPageDesc;
85

    
86
typedef struct VirtPageDesc {
87
    /* physical address of code page. It is valid only if 'valid_tag'
88
       matches 'virt_valid_tag' */ 
89
    target_ulong phys_addr; 
90
    unsigned int valid_tag;
91
#if !defined(CONFIG_SOFTMMU)
92
    /* original page access rights. It is valid only if 'valid_tag'
93
       matches 'virt_valid_tag' */
94
    unsigned int prot;
95
#endif
96
} VirtPageDesc;
97

    
98
#define L2_BITS 10
99
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100

    
101
#define L1_SIZE (1 << L1_BITS)
102
#define L2_SIZE (1 << L2_BITS)
103

    
104
static void io_mem_init(void);
105

    
106
unsigned long qemu_real_host_page_size;
107
unsigned long qemu_host_page_bits;
108
unsigned long qemu_host_page_size;
109
unsigned long qemu_host_page_mask;
110

    
111
/* XXX: for system emulation, it could just be an array */
112
static PageDesc *l1_map[L1_SIZE];
113
static PhysPageDesc *l1_phys_map[L1_SIZE];
114

    
115
#if !defined(CONFIG_USER_ONLY)
116
static VirtPageDesc *l1_virt_map[L1_SIZE];
117
static unsigned int virt_valid_tag;
118
#endif
119

    
120
/* io memory support */
121
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
123
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
124
static int io_mem_nb;
125

    
126
/* log support */
127
char *logfilename = "/tmp/qemu.log";
128
FILE *logfile;
129
int loglevel;
130

    
131
static void page_init(void)
132
{
133
    /* NOTE: we can always suppose that qemu_host_page_size >=
134
       TARGET_PAGE_SIZE */
135
#ifdef _WIN32
136
    {
137
        SYSTEM_INFO system_info;
138
        DWORD old_protect;
139
        
140
        GetSystemInfo(&system_info);
141
        qemu_real_host_page_size = system_info.dwPageSize;
142
        
143
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
144
                       PAGE_EXECUTE_READWRITE, &old_protect);
145
    }
146
#else
147
    qemu_real_host_page_size = getpagesize();
148
    {
149
        unsigned long start, end;
150

    
151
        start = (unsigned long)code_gen_buffer;
152
        start &= ~(qemu_real_host_page_size - 1);
153
        
154
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
155
        end += qemu_real_host_page_size - 1;
156
        end &= ~(qemu_real_host_page_size - 1);
157
        
158
        mprotect((void *)start, end - start, 
159
                 PROT_READ | PROT_WRITE | PROT_EXEC);
160
    }
161
#endif
162

    
163
    if (qemu_host_page_size == 0)
164
        qemu_host_page_size = qemu_real_host_page_size;
165
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
166
        qemu_host_page_size = TARGET_PAGE_SIZE;
167
    qemu_host_page_bits = 0;
168
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
169
        qemu_host_page_bits++;
170
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
171
#if !defined(CONFIG_USER_ONLY)
172
    virt_valid_tag = 1;
173
#endif
174
}
175

    
176
static inline PageDesc *page_find_alloc(unsigned int index)
177
{
178
    PageDesc **lp, *p;
179

    
180
    lp = &l1_map[index >> L2_BITS];
181
    p = *lp;
182
    if (!p) {
183
        /* allocate if not found */
184
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
185
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
186
        *lp = p;
187
    }
188
    return p + (index & (L2_SIZE - 1));
189
}
190

    
191
static inline PageDesc *page_find(unsigned int index)
192
{
193
    PageDesc *p;
194

    
195
    p = l1_map[index >> L2_BITS];
196
    if (!p)
197
        return 0;
198
    return p + (index & (L2_SIZE - 1));
199
}
200

    
201
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
202
{
203
    PhysPageDesc **lp, *p;
204

    
205
    lp = &l1_phys_map[index >> L2_BITS];
206
    p = *lp;
207
    if (!p) {
208
        /* allocate if not found */
209
        p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
210
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
211
        *lp = p;
212
    }
213
    return p + (index & (L2_SIZE - 1));
214
}
215

    
216
static inline PhysPageDesc *phys_page_find(unsigned int index)
217
{
218
    PhysPageDesc *p;
219

    
220
    p = l1_phys_map[index >> L2_BITS];
221
    if (!p)
222
        return 0;
223
    return p + (index & (L2_SIZE - 1));
224
}
225

    
226
#if !defined(CONFIG_USER_ONLY)
227
static void tlb_protect_code(CPUState *env, target_ulong addr);
228
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
229

    
230
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
231
{
232
    VirtPageDesc **lp, *p;
233

    
234
    /* XXX: should not truncate for 64 bit addresses */
235
#if TARGET_LONG_BITS > 32
236
    index &= (L1_SIZE - 1);
237
#endif
238
    lp = &l1_virt_map[index >> L2_BITS];
239
    p = *lp;
240
    if (!p) {
241
        /* allocate if not found */
242
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
243
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
244
        *lp = p;
245
    }
246
    return p + (index & (L2_SIZE - 1));
247
}
248

    
249
static inline VirtPageDesc *virt_page_find(unsigned int index)
250
{
251
    VirtPageDesc *p;
252

    
253
    p = l1_virt_map[index >> L2_BITS];
254
    if (!p)
255
        return 0;
256
    return p + (index & (L2_SIZE - 1));
257
}
258

    
259
static void virt_page_flush(void)
260
{
261
    int i, j;
262
    VirtPageDesc *p;
263
    
264
    virt_valid_tag++;
265

    
266
    if (virt_valid_tag == 0) {
267
        virt_valid_tag = 1;
268
        for(i = 0; i < L1_SIZE; i++) {
269
            p = l1_virt_map[i];
270
            if (p) {
271
                for(j = 0; j < L2_SIZE; j++)
272
                    p[j].valid_tag = 0;
273
            }
274
        }
275
    }
276
}
277
#else
278
static void virt_page_flush(void)
279
{
280
}
281
#endif
282

    
283
void cpu_exec_init(void)
284
{
285
    if (!code_gen_ptr) {
286
        code_gen_ptr = code_gen_buffer;
287
        page_init();
288
        io_mem_init();
289
    }
290
}
291

    
292
static inline void invalidate_page_bitmap(PageDesc *p)
293
{
294
    if (p->code_bitmap) {
295
        qemu_free(p->code_bitmap);
296
        p->code_bitmap = NULL;
297
    }
298
    p->code_write_count = 0;
299
}
300

    
301
/* set to NULL all the 'first_tb' fields in all PageDescs */
302
static void page_flush_tb(void)
303
{
304
    int i, j;
305
    PageDesc *p;
306

    
307
    for(i = 0; i < L1_SIZE; i++) {
308
        p = l1_map[i];
309
        if (p) {
310
            for(j = 0; j < L2_SIZE; j++) {
311
                p->first_tb = NULL;
312
                invalidate_page_bitmap(p);
313
                p++;
314
            }
315
        }
316
    }
317
}
318

    
319
/* flush all the translation blocks */
320
/* XXX: tb_flush is currently not thread safe */
321
void tb_flush(CPUState *env)
322
{
323
#if defined(DEBUG_FLUSH)
324
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
325
           code_gen_ptr - code_gen_buffer, 
326
           nb_tbs, 
327
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
328
#endif
329
    nb_tbs = 0;
330
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
331
    virt_page_flush();
332

    
333
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
334
    page_flush_tb();
335

    
336
    code_gen_ptr = code_gen_buffer;
337
    /* XXX: flush processor icache at this point if cache flush is
338
       expensive */
339
}
340

    
341
#ifdef DEBUG_TB_CHECK
342

    
343
static void tb_invalidate_check(unsigned long address)
344
{
345
    TranslationBlock *tb;
346
    int i;
347
    address &= TARGET_PAGE_MASK;
348
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
349
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
350
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
351
                  address >= tb->pc + tb->size)) {
352
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
353
                       address, tb->pc, tb->size);
354
            }
355
        }
356
    }
357
}
358

    
359
/* verify that all the pages have correct rights for code */
360
static void tb_page_check(void)
361
{
362
    TranslationBlock *tb;
363
    int i, flags1, flags2;
364
    
365
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
366
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
367
            flags1 = page_get_flags(tb->pc);
368
            flags2 = page_get_flags(tb->pc + tb->size - 1);
369
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
370
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
371
                       tb->pc, tb->size, flags1, flags2);
372
            }
373
        }
374
    }
375
}
376

    
377
void tb_jmp_check(TranslationBlock *tb)
378
{
379
    TranslationBlock *tb1;
380
    unsigned int n1;
381

    
382
    /* suppress any remaining jumps to this TB */
383
    tb1 = tb->jmp_first;
384
    for(;;) {
385
        n1 = (long)tb1 & 3;
386
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
387
        if (n1 == 2)
388
            break;
389
        tb1 = tb1->jmp_next[n1];
390
    }
391
    /* check end of list */
392
    if (tb1 != tb) {
393
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
394
    }
395
}
396

    
397
#endif
398

    
399
/* invalidate one TB */
400
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
401
                             int next_offset)
402
{
403
    TranslationBlock *tb1;
404
    for(;;) {
405
        tb1 = *ptb;
406
        if (tb1 == tb) {
407
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
408
            break;
409
        }
410
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
411
    }
412
}
413

    
414
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
415
{
416
    TranslationBlock *tb1;
417
    unsigned int n1;
418

    
419
    for(;;) {
420
        tb1 = *ptb;
421
        n1 = (long)tb1 & 3;
422
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
423
        if (tb1 == tb) {
424
            *ptb = tb1->page_next[n1];
425
            break;
426
        }
427
        ptb = &tb1->page_next[n1];
428
    }
429
}
430

    
431
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
432
{
433
    TranslationBlock *tb1, **ptb;
434
    unsigned int n1;
435

    
436
    ptb = &tb->jmp_next[n];
437
    tb1 = *ptb;
438
    if (tb1) {
439
        /* find tb(n) in circular list */
440
        for(;;) {
441
            tb1 = *ptb;
442
            n1 = (long)tb1 & 3;
443
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
444
            if (n1 == n && tb1 == tb)
445
                break;
446
            if (n1 == 2) {
447
                ptb = &tb1->jmp_first;
448
            } else {
449
                ptb = &tb1->jmp_next[n1];
450
            }
451
        }
452
        /* now we can suppress tb(n) from the list */
453
        *ptb = tb->jmp_next[n];
454

    
455
        tb->jmp_next[n] = NULL;
456
    }
457
}
458

    
459
/* reset the jump entry 'n' of a TB so that it is not chained to
460
   another TB */
461
static inline void tb_reset_jump(TranslationBlock *tb, int n)
462
{
463
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
464
}
465

    
466
static inline void tb_invalidate(TranslationBlock *tb)
467
{
468
    unsigned int h, n1;
469
    TranslationBlock *tb1, *tb2, **ptb;
470
    
471
    tb_invalidated_flag = 1;
472

    
473
    /* remove the TB from the hash list */
474
    h = tb_hash_func(tb->pc);
475
    ptb = &tb_hash[h];
476
    for(;;) {
477
        tb1 = *ptb;
478
        /* NOTE: the TB is not necessarily linked in the hash. It
479
           indicates that it is not currently used */
480
        if (tb1 == NULL)
481
            return;
482
        if (tb1 == tb) {
483
            *ptb = tb1->hash_next;
484
            break;
485
        }
486
        ptb = &tb1->hash_next;
487
    }
488

    
489
    /* suppress this TB from the two jump lists */
490
    tb_jmp_remove(tb, 0);
491
    tb_jmp_remove(tb, 1);
492

    
493
    /* suppress any remaining jumps to this TB */
494
    tb1 = tb->jmp_first;
495
    for(;;) {
496
        n1 = (long)tb1 & 3;
497
        if (n1 == 2)
498
            break;
499
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
500
        tb2 = tb1->jmp_next[n1];
501
        tb_reset_jump(tb1, n1);
502
        tb1->jmp_next[n1] = NULL;
503
        tb1 = tb2;
504
    }
505
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
506
}
507

    
508
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
509
{
510
    PageDesc *p;
511
    unsigned int h;
512
    target_ulong phys_pc;
513
    
514
    /* remove the TB from the hash list */
515
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
516
    h = tb_phys_hash_func(phys_pc);
517
    tb_remove(&tb_phys_hash[h], tb, 
518
              offsetof(TranslationBlock, phys_hash_next));
519

    
520
    /* remove the TB from the page list */
521
    if (tb->page_addr[0] != page_addr) {
522
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
523
        tb_page_remove(&p->first_tb, tb);
524
        invalidate_page_bitmap(p);
525
    }
526
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
527
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
528
        tb_page_remove(&p->first_tb, tb);
529
        invalidate_page_bitmap(p);
530
    }
531

    
532
    tb_invalidate(tb);
533
}
534

    
535
static inline void set_bits(uint8_t *tab, int start, int len)
536
{
537
    int end, mask, end1;
538

    
539
    end = start + len;
540
    tab += start >> 3;
541
    mask = 0xff << (start & 7);
542
    if ((start & ~7) == (end & ~7)) {
543
        if (start < end) {
544
            mask &= ~(0xff << (end & 7));
545
            *tab |= mask;
546
        }
547
    } else {
548
        *tab++ |= mask;
549
        start = (start + 8) & ~7;
550
        end1 = end & ~7;
551
        while (start < end1) {
552
            *tab++ = 0xff;
553
            start += 8;
554
        }
555
        if (start < end) {
556
            mask = ~(0xff << (end & 7));
557
            *tab |= mask;
558
        }
559
    }
560
}
561

    
562
static void build_page_bitmap(PageDesc *p)
563
{
564
    int n, tb_start, tb_end;
565
    TranslationBlock *tb;
566
    
567
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
568
    if (!p->code_bitmap)
569
        return;
570
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
571

    
572
    tb = p->first_tb;
573
    while (tb != NULL) {
574
        n = (long)tb & 3;
575
        tb = (TranslationBlock *)((long)tb & ~3);
576
        /* NOTE: this is subtle as a TB may span two physical pages */
577
        if (n == 0) {
578
            /* NOTE: tb_end may be after the end of the page, but
579
               it is not a problem */
580
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
581
            tb_end = tb_start + tb->size;
582
            if (tb_end > TARGET_PAGE_SIZE)
583
                tb_end = TARGET_PAGE_SIZE;
584
        } else {
585
            tb_start = 0;
586
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
587
        }
588
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
589
        tb = tb->page_next[n];
590
    }
591
}
592

    
593
#ifdef TARGET_HAS_PRECISE_SMC
594

    
595
static void tb_gen_code(CPUState *env, 
596
                        target_ulong pc, target_ulong cs_base, int flags,
597
                        int cflags)
598
{
599
    TranslationBlock *tb;
600
    uint8_t *tc_ptr;
601
    target_ulong phys_pc, phys_page2, virt_page2;
602
    int code_gen_size;
603

    
604
    phys_pc = get_phys_addr_code(env, pc);
605
    tb = tb_alloc(pc);
606
    if (!tb) {
607
        /* flush must be done */
608
        tb_flush(env);
609
        /* cannot fail at this point */
610
        tb = tb_alloc(pc);
611
    }
612
    tc_ptr = code_gen_ptr;
613
    tb->tc_ptr = tc_ptr;
614
    tb->cs_base = cs_base;
615
    tb->flags = flags;
616
    tb->cflags = cflags;
617
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
618
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
619
    
620
    /* check next page if needed */
621
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
622
    phys_page2 = -1;
623
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
624
        phys_page2 = get_phys_addr_code(env, virt_page2);
625
    }
626
    tb_link_phys(tb, phys_pc, phys_page2);
627
}
628
#endif
629
    
630
/* invalidate all TBs which intersect with the target physical page
631
   starting in range [start;end[. NOTE: start and end must refer to
632
   the same physical page. 'is_cpu_write_access' should be true if called
633
   from a real cpu write access: the virtual CPU will exit the current
634
   TB if code is modified inside this TB. */
635
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
636
                                   int is_cpu_write_access)
637
{
638
    int n, current_tb_modified, current_tb_not_found, current_flags;
639
    CPUState *env = cpu_single_env;
640
    PageDesc *p;
641
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
642
    target_ulong tb_start, tb_end;
643
    target_ulong current_pc, current_cs_base;
644

    
645
    p = page_find(start >> TARGET_PAGE_BITS);
646
    if (!p) 
647
        return;
648
    if (!p->code_bitmap && 
649
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
650
        is_cpu_write_access) {
651
        /* build code bitmap */
652
        build_page_bitmap(p);
653
    }
654

    
655
    /* we remove all the TBs in the range [start, end[ */
656
    /* XXX: see if in some cases it could be faster to invalidate all the code */
657
    current_tb_not_found = is_cpu_write_access;
658
    current_tb_modified = 0;
659
    current_tb = NULL; /* avoid warning */
660
    current_pc = 0; /* avoid warning */
661
    current_cs_base = 0; /* avoid warning */
662
    current_flags = 0; /* avoid warning */
663
    tb = p->first_tb;
664
    while (tb != NULL) {
665
        n = (long)tb & 3;
666
        tb = (TranslationBlock *)((long)tb & ~3);
667
        tb_next = tb->page_next[n];
668
        /* NOTE: this is subtle as a TB may span two physical pages */
669
        if (n == 0) {
670
            /* NOTE: tb_end may be after the end of the page, but
671
               it is not a problem */
672
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
673
            tb_end = tb_start + tb->size;
674
        } else {
675
            tb_start = tb->page_addr[1];
676
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
677
        }
678
        if (!(tb_end <= start || tb_start >= end)) {
679
#ifdef TARGET_HAS_PRECISE_SMC
680
            if (current_tb_not_found) {
681
                current_tb_not_found = 0;
682
                current_tb = NULL;
683
                if (env->mem_write_pc) {
684
                    /* now we have a real cpu fault */
685
                    current_tb = tb_find_pc(env->mem_write_pc);
686
                }
687
            }
688
            if (current_tb == tb &&
689
                !(current_tb->cflags & CF_SINGLE_INSN)) {
690
                /* If we are modifying the current TB, we must stop
691
                its execution. We could be more precise by checking
692
                that the modification is after the current PC, but it
693
                would require a specialized function to partially
694
                restore the CPU state */
695
                
696
                current_tb_modified = 1;
697
                cpu_restore_state(current_tb, env, 
698
                                  env->mem_write_pc, NULL);
699
#if defined(TARGET_I386)
700
                current_flags = env->hflags;
701
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
702
                current_cs_base = (target_ulong)env->segs[R_CS].base;
703
                current_pc = current_cs_base + env->eip;
704
#else
705
#error unsupported CPU
706
#endif
707
            }
708
#endif /* TARGET_HAS_PRECISE_SMC */
709
            saved_tb = env->current_tb;
710
            env->current_tb = NULL;
711
            tb_phys_invalidate(tb, -1);
712
            env->current_tb = saved_tb;
713
            if (env->interrupt_request && env->current_tb)
714
                cpu_interrupt(env, env->interrupt_request);
715
        }
716
        tb = tb_next;
717
    }
718
#if !defined(CONFIG_USER_ONLY)
719
    /* if no code remaining, no need to continue to use slow writes */
720
    if (!p->first_tb) {
721
        invalidate_page_bitmap(p);
722
        if (is_cpu_write_access) {
723
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
724
        }
725
    }
726
#endif
727
#ifdef TARGET_HAS_PRECISE_SMC
728
    if (current_tb_modified) {
729
        /* we generate a block containing just the instruction
730
           modifying the memory. It will ensure that it cannot modify
731
           itself */
732
        env->current_tb = NULL;
733
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
734
                    CF_SINGLE_INSN);
735
        cpu_resume_from_signal(env, NULL);
736
    }
737
#endif
738
}
739

    
740
/* len must be <= 8 and start must be a multiple of len */
741
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
742
{
743
    PageDesc *p;
744
    int offset, b;
745
#if 0
746
    if (1) {
747
        if (loglevel) {
748
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
749
                   cpu_single_env->mem_write_vaddr, len, 
750
                   cpu_single_env->eip, 
751
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
752
        }
753
    }
754
#endif
755
    p = page_find(start >> TARGET_PAGE_BITS);
756
    if (!p) 
757
        return;
758
    if (p->code_bitmap) {
759
        offset = start & ~TARGET_PAGE_MASK;
760
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
761
        if (b & ((1 << len) - 1))
762
            goto do_invalidate;
763
    } else {
764
    do_invalidate:
765
        tb_invalidate_phys_page_range(start, start + len, 1);
766
    }
767
}
768

    
769
#if !defined(CONFIG_SOFTMMU)
770
static void tb_invalidate_phys_page(target_ulong addr, 
771
                                    unsigned long pc, void *puc)
772
{
773
    int n, current_flags, current_tb_modified;
774
    target_ulong current_pc, current_cs_base;
775
    PageDesc *p;
776
    TranslationBlock *tb, *current_tb;
777
#ifdef TARGET_HAS_PRECISE_SMC
778
    CPUState *env = cpu_single_env;
779
#endif
780

    
781
    addr &= TARGET_PAGE_MASK;
782
    p = page_find(addr >> TARGET_PAGE_BITS);
783
    if (!p) 
784
        return;
785
    tb = p->first_tb;
786
    current_tb_modified = 0;
787
    current_tb = NULL;
788
    current_pc = 0; /* avoid warning */
789
    current_cs_base = 0; /* avoid warning */
790
    current_flags = 0; /* avoid warning */
791
#ifdef TARGET_HAS_PRECISE_SMC
792
    if (tb && pc != 0) {
793
        current_tb = tb_find_pc(pc);
794
    }
795
#endif
796
    while (tb != NULL) {
797
        n = (long)tb & 3;
798
        tb = (TranslationBlock *)((long)tb & ~3);
799
#ifdef TARGET_HAS_PRECISE_SMC
800
        if (current_tb == tb &&
801
            !(current_tb->cflags & CF_SINGLE_INSN)) {
802
                /* If we are modifying the current TB, we must stop
803
                   its execution. We could be more precise by checking
804
                   that the modification is after the current PC, but it
805
                   would require a specialized function to partially
806
                   restore the CPU state */
807
            
808
            current_tb_modified = 1;
809
            cpu_restore_state(current_tb, env, pc, puc);
810
#if defined(TARGET_I386)
811
            current_flags = env->hflags;
812
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
813
            current_cs_base = (target_ulong)env->segs[R_CS].base;
814
            current_pc = current_cs_base + env->eip;
815
#else
816
#error unsupported CPU
817
#endif
818
        }
819
#endif /* TARGET_HAS_PRECISE_SMC */
820
        tb_phys_invalidate(tb, addr);
821
        tb = tb->page_next[n];
822
    }
823
    p->first_tb = NULL;
824
#ifdef TARGET_HAS_PRECISE_SMC
825
    if (current_tb_modified) {
826
        /* we generate a block containing just the instruction
827
           modifying the memory. It will ensure that it cannot modify
828
           itself */
829
        env->current_tb = NULL;
830
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
831
                    CF_SINGLE_INSN);
832
        cpu_resume_from_signal(env, puc);
833
    }
834
#endif
835
}
836
#endif
837

    
838
/* add the tb in the target page and protect it if necessary */
839
static inline void tb_alloc_page(TranslationBlock *tb, 
840
                                 unsigned int n, unsigned int page_addr)
841
{
842
    PageDesc *p;
843
    TranslationBlock *last_first_tb;
844

    
845
    tb->page_addr[n] = page_addr;
846
    p = page_find(page_addr >> TARGET_PAGE_BITS);
847
    tb->page_next[n] = p->first_tb;
848
    last_first_tb = p->first_tb;
849
    p->first_tb = (TranslationBlock *)((long)tb | n);
850
    invalidate_page_bitmap(p);
851

    
852
#if defined(TARGET_HAS_SMC) || 1
853

    
854
#if defined(CONFIG_USER_ONLY)
855
    if (p->flags & PAGE_WRITE) {
856
        unsigned long host_start, host_end, addr;
857
        int prot;
858

    
859
        /* force the host page as non writable (writes will have a
860
           page fault + mprotect overhead) */
861
        host_start = page_addr & qemu_host_page_mask;
862
        host_end = host_start + qemu_host_page_size;
863
        prot = 0;
864
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
865
            prot |= page_get_flags(addr);
866
        mprotect((void *)host_start, qemu_host_page_size, 
867
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
868
#ifdef DEBUG_TB_INVALIDATE
869
        printf("protecting code page: 0x%08lx\n", 
870
               host_start);
871
#endif
872
        p->flags &= ~PAGE_WRITE;
873
    }
874
#else
875
    /* if some code is already present, then the pages are already
876
       protected. So we handle the case where only the first TB is
877
       allocated in a physical page */
878
    if (!last_first_tb) {
879
        target_ulong virt_addr;
880

    
881
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
882
        tlb_protect_code(cpu_single_env, virt_addr);        
883
    }
884
#endif
885

    
886
#endif /* TARGET_HAS_SMC */
887
}
888

    
889
/* Allocate a new translation block. Flush the translation buffer if
890
   too many translation blocks or too much generated code. */
891
TranslationBlock *tb_alloc(target_ulong pc)
892
{
893
    TranslationBlock *tb;
894

    
895
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
896
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
897
        return NULL;
898
    tb = &tbs[nb_tbs++];
899
    tb->pc = pc;
900
    tb->cflags = 0;
901
    return tb;
902
}
903

    
904
/* add a new TB and link it to the physical page tables. phys_page2 is
905
   (-1) to indicate that only one page contains the TB. */
906
void tb_link_phys(TranslationBlock *tb, 
907
                  target_ulong phys_pc, target_ulong phys_page2)
908
{
909
    unsigned int h;
910
    TranslationBlock **ptb;
911

    
912
    /* add in the physical hash table */
913
    h = tb_phys_hash_func(phys_pc);
914
    ptb = &tb_phys_hash[h];
915
    tb->phys_hash_next = *ptb;
916
    *ptb = tb;
917

    
918
    /* add in the page list */
919
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
920
    if (phys_page2 != -1)
921
        tb_alloc_page(tb, 1, phys_page2);
922
    else
923
        tb->page_addr[1] = -1;
924
#ifdef DEBUG_TB_CHECK
925
    tb_page_check();
926
#endif
927
}
928

    
929
/* link the tb with the other TBs */
930
void tb_link(TranslationBlock *tb)
931
{
932
#if !defined(CONFIG_USER_ONLY)
933
    {
934
        VirtPageDesc *vp;
935
        target_ulong addr;
936
        
937
        /* save the code memory mappings (needed to invalidate the code) */
938
        addr = tb->pc & TARGET_PAGE_MASK;
939
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
940
#ifdef DEBUG_TLB_CHECK 
941
        if (vp->valid_tag == virt_valid_tag &&
942
            vp->phys_addr != tb->page_addr[0]) {
943
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
944
                   addr, tb->page_addr[0], vp->phys_addr);
945
        }
946
#endif
947
        vp->phys_addr = tb->page_addr[0];
948
        if (vp->valid_tag != virt_valid_tag) {
949
            vp->valid_tag = virt_valid_tag;
950
#if !defined(CONFIG_SOFTMMU)
951
            vp->prot = 0;
952
#endif
953
        }
954
        
955
        if (tb->page_addr[1] != -1) {
956
            addr += TARGET_PAGE_SIZE;
957
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
958
#ifdef DEBUG_TLB_CHECK 
959
            if (vp->valid_tag == virt_valid_tag &&
960
                vp->phys_addr != tb->page_addr[1]) { 
961
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
962
                       addr, tb->page_addr[1], vp->phys_addr);
963
            }
964
#endif
965
            vp->phys_addr = tb->page_addr[1];
966
            if (vp->valid_tag != virt_valid_tag) {
967
                vp->valid_tag = virt_valid_tag;
968
#if !defined(CONFIG_SOFTMMU)
969
                vp->prot = 0;
970
#endif
971
            }
972
        }
973
    }
974
#endif
975

    
976
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
977
    tb->jmp_next[0] = NULL;
978
    tb->jmp_next[1] = NULL;
979
#ifdef USE_CODE_COPY
980
    tb->cflags &= ~CF_FP_USED;
981
    if (tb->cflags & CF_TB_FP_USED)
982
        tb->cflags |= CF_FP_USED;
983
#endif
984

    
985
    /* init original jump addresses */
986
    if (tb->tb_next_offset[0] != 0xffff)
987
        tb_reset_jump(tb, 0);
988
    if (tb->tb_next_offset[1] != 0xffff)
989
        tb_reset_jump(tb, 1);
990
}
991

    
992
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
993
   tb[1].tc_ptr. Return NULL if not found */
994
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
995
{
996
    int m_min, m_max, m;
997
    unsigned long v;
998
    TranslationBlock *tb;
999

    
1000
    if (nb_tbs <= 0)
1001
        return NULL;
1002
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1003
        tc_ptr >= (unsigned long)code_gen_ptr)
1004
        return NULL;
1005
    /* binary search (cf Knuth) */
1006
    m_min = 0;
1007
    m_max = nb_tbs - 1;
1008
    while (m_min <= m_max) {
1009
        m = (m_min + m_max) >> 1;
1010
        tb = &tbs[m];
1011
        v = (unsigned long)tb->tc_ptr;
1012
        if (v == tc_ptr)
1013
            return tb;
1014
        else if (tc_ptr < v) {
1015
            m_max = m - 1;
1016
        } else {
1017
            m_min = m + 1;
1018
        }
1019
    } 
1020
    return &tbs[m_max];
1021
}
1022

    
1023
static void tb_reset_jump_recursive(TranslationBlock *tb);
1024

    
1025
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1026
{
1027
    TranslationBlock *tb1, *tb_next, **ptb;
1028
    unsigned int n1;
1029

    
1030
    tb1 = tb->jmp_next[n];
1031
    if (tb1 != NULL) {
1032
        /* find head of list */
1033
        for(;;) {
1034
            n1 = (long)tb1 & 3;
1035
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1036
            if (n1 == 2)
1037
                break;
1038
            tb1 = tb1->jmp_next[n1];
1039
        }
1040
        /* we are now sure now that tb jumps to tb1 */
1041
        tb_next = tb1;
1042

    
1043
        /* remove tb from the jmp_first list */
1044
        ptb = &tb_next->jmp_first;
1045
        for(;;) {
1046
            tb1 = *ptb;
1047
            n1 = (long)tb1 & 3;
1048
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1049
            if (n1 == n && tb1 == tb)
1050
                break;
1051
            ptb = &tb1->jmp_next[n1];
1052
        }
1053
        *ptb = tb->jmp_next[n];
1054
        tb->jmp_next[n] = NULL;
1055
        
1056
        /* suppress the jump to next tb in generated code */
1057
        tb_reset_jump(tb, n);
1058

    
1059
        /* suppress jumps in the tb on which we could have jumped */
1060
        tb_reset_jump_recursive(tb_next);
1061
    }
1062
}
1063

    
1064
static void tb_reset_jump_recursive(TranslationBlock *tb)
1065
{
1066
    tb_reset_jump_recursive2(tb, 0);
1067
    tb_reset_jump_recursive2(tb, 1);
1068
}
1069

    
1070
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1071
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1072
{
1073
    target_ulong phys_addr;
1074

    
1075
    phys_addr = cpu_get_phys_page_debug(env, pc);
1076
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1077
}
1078
#endif
1079

    
1080
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1081
   breakpoint is reached */
1082
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1083
{
1084
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1085
    int i;
1086
    
1087
    for(i = 0; i < env->nb_breakpoints; i++) {
1088
        if (env->breakpoints[i] == pc)
1089
            return 0;
1090
    }
1091

    
1092
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1093
        return -1;
1094
    env->breakpoints[env->nb_breakpoints++] = pc;
1095
    
1096
    breakpoint_invalidate(env, pc);
1097
    return 0;
1098
#else
1099
    return -1;
1100
#endif
1101
}
1102

    
1103
/* remove a breakpoint */
1104
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1105
{
1106
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1107
    int i;
1108
    for(i = 0; i < env->nb_breakpoints; i++) {
1109
        if (env->breakpoints[i] == pc)
1110
            goto found;
1111
    }
1112
    return -1;
1113
 found:
1114
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1115
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1116
    env->nb_breakpoints--;
1117

    
1118
    breakpoint_invalidate(env, pc);
1119
    return 0;
1120
#else
1121
    return -1;
1122
#endif
1123
}
1124

    
1125
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1126
   CPU loop after each instruction */
1127
void cpu_single_step(CPUState *env, int enabled)
1128
{
1129
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1130
    if (env->singlestep_enabled != enabled) {
1131
        env->singlestep_enabled = enabled;
1132
        /* must flush all the translated code to avoid inconsistancies */
1133
        /* XXX: only flush what is necessary */
1134
        tb_flush(env);
1135
    }
1136
#endif
1137
}
1138

    
1139
/* enable or disable low levels log */
1140
void cpu_set_log(int log_flags)
1141
{
1142
    loglevel = log_flags;
1143
    if (loglevel && !logfile) {
1144
        logfile = fopen(logfilename, "w");
1145
        if (!logfile) {
1146
            perror(logfilename);
1147
            _exit(1);
1148
        }
1149
#if !defined(CONFIG_SOFTMMU)
1150
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1151
        {
1152
            static uint8_t logfile_buf[4096];
1153
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1154
        }
1155
#else
1156
        setvbuf(logfile, NULL, _IOLBF, 0);
1157
#endif
1158
    }
1159
}
1160

    
1161
void cpu_set_log_filename(const char *filename)
1162
{
1163
    logfilename = strdup(filename);
1164
}
1165

    
1166
/* mask must never be zero, except for A20 change call */
1167
void cpu_interrupt(CPUState *env, int mask)
1168
{
1169
    TranslationBlock *tb;
1170
    static int interrupt_lock;
1171

    
1172
    env->interrupt_request |= mask;
1173
    /* if the cpu is currently executing code, we must unlink it and
1174
       all the potentially executing TB */
1175
    tb = env->current_tb;
1176
    if (tb && !testandset(&interrupt_lock)) {
1177
        env->current_tb = NULL;
1178
        tb_reset_jump_recursive(tb);
1179
        interrupt_lock = 0;
1180
    }
1181
}
1182

    
1183
void cpu_reset_interrupt(CPUState *env, int mask)
1184
{
1185
    env->interrupt_request &= ~mask;
1186
}
1187

    
1188
CPULogItem cpu_log_items[] = {
1189
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1190
      "show generated host assembly code for each compiled TB" },
1191
    { CPU_LOG_TB_IN_ASM, "in_asm",
1192
      "show target assembly code for each compiled TB" },
1193
    { CPU_LOG_TB_OP, "op", 
1194
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1195
#ifdef TARGET_I386
1196
    { CPU_LOG_TB_OP_OPT, "op_opt",
1197
      "show micro ops after optimization for each compiled TB" },
1198
#endif
1199
    { CPU_LOG_INT, "int",
1200
      "show interrupts/exceptions in short format" },
1201
    { CPU_LOG_EXEC, "exec",
1202
      "show trace before each executed TB (lots of logs)" },
1203
    { CPU_LOG_TB_CPU, "cpu",
1204
      "show CPU state before bloc translation" },
1205
#ifdef TARGET_I386
1206
    { CPU_LOG_PCALL, "pcall",
1207
      "show protected mode far calls/returns/exceptions" },
1208
#endif
1209
#ifdef DEBUG_IOPORT
1210
    { CPU_LOG_IOPORT, "ioport",
1211
      "show all i/o ports accesses" },
1212
#endif
1213
    { 0, NULL, NULL },
1214
};
1215

    
1216
static int cmp1(const char *s1, int n, const char *s2)
1217
{
1218
    if (strlen(s2) != n)
1219
        return 0;
1220
    return memcmp(s1, s2, n) == 0;
1221
}
1222
      
1223
/* takes a comma separated list of log masks. Return 0 if error. */
1224
int cpu_str_to_log_mask(const char *str)
1225
{
1226
    CPULogItem *item;
1227
    int mask;
1228
    const char *p, *p1;
1229

    
1230
    p = str;
1231
    mask = 0;
1232
    for(;;) {
1233
        p1 = strchr(p, ',');
1234
        if (!p1)
1235
            p1 = p + strlen(p);
1236
        if(cmp1(p,p1-p,"all")) {
1237
                for(item = cpu_log_items; item->mask != 0; item++) {
1238
                        mask |= item->mask;
1239
                }
1240
        } else {
1241
        for(item = cpu_log_items; item->mask != 0; item++) {
1242
            if (cmp1(p, p1 - p, item->name))
1243
                goto found;
1244
        }
1245
        return 0;
1246
        }
1247
    found:
1248
        mask |= item->mask;
1249
        if (*p1 != ',')
1250
            break;
1251
        p = p1 + 1;
1252
    }
1253
    return mask;
1254
}
1255

    
1256
void cpu_abort(CPUState *env, const char *fmt, ...)
1257
{
1258
    va_list ap;
1259

    
1260
    va_start(ap, fmt);
1261
    fprintf(stderr, "qemu: fatal: ");
1262
    vfprintf(stderr, fmt, ap);
1263
    fprintf(stderr, "\n");
1264
#ifdef TARGET_I386
1265
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1266
#else
1267
    cpu_dump_state(env, stderr, fprintf, 0);
1268
#endif
1269
    va_end(ap);
1270
    abort();
1271
}
1272

    
1273
#if !defined(CONFIG_USER_ONLY)
1274

    
1275
/* NOTE: if flush_global is true, also flush global entries (not
1276
   implemented yet) */
1277
void tlb_flush(CPUState *env, int flush_global)
1278
{
1279
    int i;
1280

    
1281
#if defined(DEBUG_TLB)
1282
    printf("tlb_flush:\n");
1283
#endif
1284
    /* must reset current TB so that interrupts cannot modify the
1285
       links while we are modifying them */
1286
    env->current_tb = NULL;
1287

    
1288
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1289
        env->tlb_read[0][i].address = -1;
1290
        env->tlb_write[0][i].address = -1;
1291
        env->tlb_read[1][i].address = -1;
1292
        env->tlb_write[1][i].address = -1;
1293
    }
1294

    
1295
    virt_page_flush();
1296
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1297

    
1298
#if !defined(CONFIG_SOFTMMU)
1299
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1300
#endif
1301
}
1302

    
1303
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1304
{
1305
    if (addr == (tlb_entry->address & 
1306
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1307
        tlb_entry->address = -1;
1308
}
1309

    
1310
void tlb_flush_page(CPUState *env, target_ulong addr)
1311
{
1312
    int i, n;
1313
    VirtPageDesc *vp;
1314
    PageDesc *p;
1315
    TranslationBlock *tb;
1316

    
1317
#if defined(DEBUG_TLB)
1318
    printf("tlb_flush_page: 0x%08x\n", addr);
1319
#endif
1320
    /* must reset current TB so that interrupts cannot modify the
1321
       links while we are modifying them */
1322
    env->current_tb = NULL;
1323

    
1324
    addr &= TARGET_PAGE_MASK;
1325
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1326
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1327
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1328
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1329
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1330

    
1331
    /* remove from the virtual pc hash table all the TB at this
1332
       virtual address */
1333
    
1334
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1335
    if (vp && vp->valid_tag == virt_valid_tag) {
1336
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1337
        if (p) {
1338
            /* we remove all the links to the TBs in this virtual page */
1339
            tb = p->first_tb;
1340
            while (tb != NULL) {
1341
                n = (long)tb & 3;
1342
                tb = (TranslationBlock *)((long)tb & ~3);
1343
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1344
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1345
                    tb_invalidate(tb);
1346
                }
1347
                tb = tb->page_next[n];
1348
            }
1349
        }
1350
        vp->valid_tag = 0;
1351
    }
1352

    
1353
#if !defined(CONFIG_SOFTMMU)
1354
    if (addr < MMAP_AREA_END)
1355
        munmap((void *)addr, TARGET_PAGE_SIZE);
1356
#endif
1357
}
1358

    
1359
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1360
{
1361
    if (addr == (tlb_entry->address & 
1362
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1363
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1364
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1365
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1366
    }
1367
}
1368

    
1369
/* update the TLBs so that writes to code in the virtual page 'addr'
1370
   can be detected */
1371
static void tlb_protect_code(CPUState *env, target_ulong addr)
1372
{
1373
    int i;
1374

    
1375
    addr &= TARGET_PAGE_MASK;
1376
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1377
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1378
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1379
#if !defined(CONFIG_SOFTMMU)
1380
    /* NOTE: as we generated the code for this page, it is already at
1381
       least readable */
1382
    if (addr < MMAP_AREA_END)
1383
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1384
#endif
1385
}
1386

    
1387
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1388
                                       unsigned long phys_addr)
1389
{
1390
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1391
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1392
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1393
    }
1394
}
1395

    
1396
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1397
   tested self modifying code */
1398
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1399
{
1400
    int i;
1401

    
1402
    phys_addr &= TARGET_PAGE_MASK;
1403
    phys_addr += (long)phys_ram_base;
1404
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1405
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1406
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1407
}
1408

    
1409
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1410
                                         unsigned long start, unsigned long length)
1411
{
1412
    unsigned long addr;
1413
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1414
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1415
        if ((addr - start) < length) {
1416
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1417
        }
1418
    }
1419
}
1420

    
1421
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1422
{
1423
    CPUState *env;
1424
    unsigned long length, start1;
1425
    int i;
1426

    
1427
    start &= TARGET_PAGE_MASK;
1428
    end = TARGET_PAGE_ALIGN(end);
1429

    
1430
    length = end - start;
1431
    if (length == 0)
1432
        return;
1433
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1434

    
1435
    env = cpu_single_env;
1436
    /* we modify the TLB cache so that the dirty bit will be set again
1437
       when accessing the range */
1438
    start1 = start + (unsigned long)phys_ram_base;
1439
    for(i = 0; i < CPU_TLB_SIZE; i++)
1440
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1441
    for(i = 0; i < CPU_TLB_SIZE; i++)
1442
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1443

    
1444
#if !defined(CONFIG_SOFTMMU)
1445
    /* XXX: this is expensive */
1446
    {
1447
        VirtPageDesc *p;
1448
        int j;
1449
        target_ulong addr;
1450

    
1451
        for(i = 0; i < L1_SIZE; i++) {
1452
            p = l1_virt_map[i];
1453
            if (p) {
1454
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1455
                for(j = 0; j < L2_SIZE; j++) {
1456
                    if (p->valid_tag == virt_valid_tag &&
1457
                        p->phys_addr >= start && p->phys_addr < end &&
1458
                        (p->prot & PROT_WRITE)) {
1459
                        if (addr < MMAP_AREA_END) {
1460
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1461
                                     p->prot & ~PROT_WRITE);
1462
                        }
1463
                    }
1464
                    addr += TARGET_PAGE_SIZE;
1465
                    p++;
1466
                }
1467
            }
1468
        }
1469
    }
1470
#endif
1471
}
1472

    
1473
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1474
                                    unsigned long start)
1475
{
1476
    unsigned long addr;
1477
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1478
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1479
        if (addr == start) {
1480
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1481
        }
1482
    }
1483
}
1484

    
1485
/* update the TLB corresponding to virtual page vaddr and phys addr
1486
   addr so that it is no longer dirty */
1487
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1488
{
1489
    CPUState *env = cpu_single_env;
1490
    int i;
1491

    
1492
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1493

    
1494
    addr &= TARGET_PAGE_MASK;
1495
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1496
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1497
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1498
}
1499

    
1500
/* add a new TLB entry. At most one entry for a given virtual address
1501
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1502
   (can only happen in non SOFTMMU mode for I/O pages or pages
1503
   conflicting with the host address space). */
1504
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1505
                 target_phys_addr_t paddr, int prot, 
1506
                 int is_user, int is_softmmu)
1507
{
1508
    PhysPageDesc *p;
1509
    unsigned long pd;
1510
    TranslationBlock *first_tb;
1511
    unsigned int index;
1512
    target_ulong address;
1513
    unsigned long addend;
1514
    int ret;
1515

    
1516
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1517
    first_tb = NULL;
1518
    if (!p) {
1519
        pd = IO_MEM_UNASSIGNED;
1520
    } else {
1521
        PageDesc *p1;
1522
        pd = p->phys_offset;
1523
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1524
            /* NOTE: we also allocate the page at this stage */
1525
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1526
            first_tb = p1->first_tb;
1527
        }
1528
    }
1529
#if defined(DEBUG_TLB)
1530
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1531
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1532
#endif
1533

    
1534
    ret = 0;
1535
#if !defined(CONFIG_SOFTMMU)
1536
    if (is_softmmu) 
1537
#endif
1538
    {
1539
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1540
            /* IO memory case */
1541
            address = vaddr | pd;
1542
            addend = paddr;
1543
        } else {
1544
            /* standard memory */
1545
            address = vaddr;
1546
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1547
        }
1548
        
1549
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1550
        addend -= vaddr;
1551
        if (prot & PAGE_READ) {
1552
            env->tlb_read[is_user][index].address = address;
1553
            env->tlb_read[is_user][index].addend = addend;
1554
        } else {
1555
            env->tlb_read[is_user][index].address = -1;
1556
            env->tlb_read[is_user][index].addend = -1;
1557
        }
1558
        if (prot & PAGE_WRITE) {
1559
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1560
                /* ROM: access is ignored (same as unassigned) */
1561
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1562
                env->tlb_write[is_user][index].addend = addend;
1563
            } else 
1564
                /* XXX: the PowerPC code seems not ready to handle
1565
                   self modifying code with DCBI */
1566
#if defined(TARGET_HAS_SMC) || 1
1567
            if (first_tb) {
1568
                /* if code is present, we use a specific memory
1569
                   handler. It works only for physical memory access */
1570
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1571
                env->tlb_write[is_user][index].addend = addend;
1572
            } else 
1573
#endif
1574
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1575
                       !cpu_physical_memory_is_dirty(pd)) {
1576
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1577
                env->tlb_write[is_user][index].addend = addend;
1578
            } else {
1579
                env->tlb_write[is_user][index].address = address;
1580
                env->tlb_write[is_user][index].addend = addend;
1581
            }
1582
        } else {
1583
            env->tlb_write[is_user][index].address = -1;
1584
            env->tlb_write[is_user][index].addend = -1;
1585
        }
1586
    }
1587
#if !defined(CONFIG_SOFTMMU)
1588
    else {
1589
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1590
            /* IO access: no mapping is done as it will be handled by the
1591
               soft MMU */
1592
            if (!(env->hflags & HF_SOFTMMU_MASK))
1593
                ret = 2;
1594
        } else {
1595
            void *map_addr;
1596

    
1597
            if (vaddr >= MMAP_AREA_END) {
1598
                ret = 2;
1599
            } else {
1600
                if (prot & PROT_WRITE) {
1601
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1602
#if defined(TARGET_HAS_SMC) || 1
1603
                        first_tb ||
1604
#endif
1605
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1606
                         !cpu_physical_memory_is_dirty(pd))) {
1607
                        /* ROM: we do as if code was inside */
1608
                        /* if code is present, we only map as read only and save the
1609
                           original mapping */
1610
                        VirtPageDesc *vp;
1611
                        
1612
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1613
                        vp->phys_addr = pd;
1614
                        vp->prot = prot;
1615
                        vp->valid_tag = virt_valid_tag;
1616
                        prot &= ~PAGE_WRITE;
1617
                    }
1618
                }
1619
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1620
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1621
                if (map_addr == MAP_FAILED) {
1622
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1623
                              paddr, vaddr);
1624
                }
1625
            }
1626
        }
1627
    }
1628
#endif
1629
    return ret;
1630
}
1631

    
1632
/* called from signal handler: invalidate the code and unprotect the
1633
   page. Return TRUE if the fault was succesfully handled. */
1634
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1635
{
1636
#if !defined(CONFIG_SOFTMMU)
1637
    VirtPageDesc *vp;
1638

    
1639
#if defined(DEBUG_TLB)
1640
    printf("page_unprotect: addr=0x%08x\n", addr);
1641
#endif
1642
    addr &= TARGET_PAGE_MASK;
1643

    
1644
    /* if it is not mapped, no need to worry here */
1645
    if (addr >= MMAP_AREA_END)
1646
        return 0;
1647
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1648
    if (!vp)
1649
        return 0;
1650
    /* NOTE: in this case, validate_tag is _not_ tested as it
1651
       validates only the code TLB */
1652
    if (vp->valid_tag != virt_valid_tag)
1653
        return 0;
1654
    if (!(vp->prot & PAGE_WRITE))
1655
        return 0;
1656
#if defined(DEBUG_TLB)
1657
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1658
           addr, vp->phys_addr, vp->prot);
1659
#endif
1660
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1661
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1662
                  (unsigned long)addr, vp->prot);
1663
    /* set the dirty bit */
1664
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1665
    /* flush the code inside */
1666
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1667
    return 1;
1668
#else
1669
    return 0;
1670
#endif
1671
}
1672

    
1673
#else
1674

    
1675
void tlb_flush(CPUState *env, int flush_global)
1676
{
1677
}
1678

    
1679
void tlb_flush_page(CPUState *env, target_ulong addr)
1680
{
1681
}
1682

    
1683
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1684
                 target_phys_addr_t paddr, int prot, 
1685
                 int is_user, int is_softmmu)
1686
{
1687
    return 0;
1688
}
1689

    
1690
/* dump memory mappings */
1691
void page_dump(FILE *f)
1692
{
1693
    unsigned long start, end;
1694
    int i, j, prot, prot1;
1695
    PageDesc *p;
1696

    
1697
    fprintf(f, "%-8s %-8s %-8s %s\n",
1698
            "start", "end", "size", "prot");
1699
    start = -1;
1700
    end = -1;
1701
    prot = 0;
1702
    for(i = 0; i <= L1_SIZE; i++) {
1703
        if (i < L1_SIZE)
1704
            p = l1_map[i];
1705
        else
1706
            p = NULL;
1707
        for(j = 0;j < L2_SIZE; j++) {
1708
            if (!p)
1709
                prot1 = 0;
1710
            else
1711
                prot1 = p[j].flags;
1712
            if (prot1 != prot) {
1713
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1714
                if (start != -1) {
1715
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1716
                            start, end, end - start, 
1717
                            prot & PAGE_READ ? 'r' : '-',
1718
                            prot & PAGE_WRITE ? 'w' : '-',
1719
                            prot & PAGE_EXEC ? 'x' : '-');
1720
                }
1721
                if (prot1 != 0)
1722
                    start = end;
1723
                else
1724
                    start = -1;
1725
                prot = prot1;
1726
            }
1727
            if (!p)
1728
                break;
1729
        }
1730
    }
1731
}
1732

    
1733
int page_get_flags(unsigned long address)
1734
{
1735
    PageDesc *p;
1736

    
1737
    p = page_find(address >> TARGET_PAGE_BITS);
1738
    if (!p)
1739
        return 0;
1740
    return p->flags;
1741
}
1742

    
1743
/* modify the flags of a page and invalidate the code if
1744
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1745
   depending on PAGE_WRITE */
1746
void page_set_flags(unsigned long start, unsigned long end, int flags)
1747
{
1748
    PageDesc *p;
1749
    unsigned long addr;
1750

    
1751
    start = start & TARGET_PAGE_MASK;
1752
    end = TARGET_PAGE_ALIGN(end);
1753
    if (flags & PAGE_WRITE)
1754
        flags |= PAGE_WRITE_ORG;
1755
    spin_lock(&tb_lock);
1756
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1757
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1758
        /* if the write protection is set, then we invalidate the code
1759
           inside */
1760
        if (!(p->flags & PAGE_WRITE) && 
1761
            (flags & PAGE_WRITE) &&
1762
            p->first_tb) {
1763
            tb_invalidate_phys_page(addr, 0, NULL);
1764
        }
1765
        p->flags = flags;
1766
    }
1767
    spin_unlock(&tb_lock);
1768
}
1769

    
1770
/* called from signal handler: invalidate the code and unprotect the
1771
   page. Return TRUE if the fault was succesfully handled. */
1772
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1773
{
1774
    unsigned int page_index, prot, pindex;
1775
    PageDesc *p, *p1;
1776
    unsigned long host_start, host_end, addr;
1777

    
1778
    host_start = address & qemu_host_page_mask;
1779
    page_index = host_start >> TARGET_PAGE_BITS;
1780
    p1 = page_find(page_index);
1781
    if (!p1)
1782
        return 0;
1783
    host_end = host_start + qemu_host_page_size;
1784
    p = p1;
1785
    prot = 0;
1786
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1787
        prot |= p->flags;
1788
        p++;
1789
    }
1790
    /* if the page was really writable, then we change its
1791
       protection back to writable */
1792
    if (prot & PAGE_WRITE_ORG) {
1793
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1794
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1795
            mprotect((void *)host_start, qemu_host_page_size, 
1796
                     (prot & PAGE_BITS) | PAGE_WRITE);
1797
            p1[pindex].flags |= PAGE_WRITE;
1798
            /* and since the content will be modified, we must invalidate
1799
               the corresponding translated code. */
1800
            tb_invalidate_phys_page(address, pc, puc);
1801
#ifdef DEBUG_TB_CHECK
1802
            tb_invalidate_check(address);
1803
#endif
1804
            return 1;
1805
        }
1806
    }
1807
    return 0;
1808
}
1809

    
1810
/* call this function when system calls directly modify a memory area */
1811
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1812
{
1813
    unsigned long start, end, addr;
1814

    
1815
    start = (unsigned long)data;
1816
    end = start + data_size;
1817
    start &= TARGET_PAGE_MASK;
1818
    end = TARGET_PAGE_ALIGN(end);
1819
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1820
        page_unprotect(addr, 0, NULL);
1821
    }
1822
}
1823

    
1824
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1825
{
1826
}
1827
#endif /* defined(CONFIG_USER_ONLY) */
1828

    
1829
/* register physical memory. 'size' must be a multiple of the target
1830
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1831
   io memory page */
1832
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1833
                                  unsigned long size,
1834
                                  unsigned long phys_offset)
1835
{
1836
    unsigned long addr, end_addr;
1837
    PhysPageDesc *p;
1838

    
1839
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1840
    end_addr = start_addr + size;
1841
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1842
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1843
        p->phys_offset = phys_offset;
1844
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1845
            phys_offset += TARGET_PAGE_SIZE;
1846
    }
1847
}
1848

    
1849
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1850
{
1851
    return 0;
1852
}
1853

    
1854
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1855
{
1856
}
1857

    
1858
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1859
    unassigned_mem_readb,
1860
    unassigned_mem_readb,
1861
    unassigned_mem_readb,
1862
};
1863

    
1864
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1865
    unassigned_mem_writeb,
1866
    unassigned_mem_writeb,
1867
    unassigned_mem_writeb,
1868
};
1869

    
1870
/* self modifying code support in soft mmu mode : writing to a page
1871
   containing code comes to these functions */
1872

    
1873
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1874
{
1875
    unsigned long phys_addr;
1876

    
1877
    phys_addr = addr - (unsigned long)phys_ram_base;
1878
#if !defined(CONFIG_USER_ONLY)
1879
    tb_invalidate_phys_page_fast(phys_addr, 1);
1880
#endif
1881
    stb_p((uint8_t *)(long)addr, val);
1882
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1883
}
1884

    
1885
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1886
{
1887
    unsigned long phys_addr;
1888

    
1889
    phys_addr = addr - (unsigned long)phys_ram_base;
1890
#if !defined(CONFIG_USER_ONLY)
1891
    tb_invalidate_phys_page_fast(phys_addr, 2);
1892
#endif
1893
    stw_p((uint8_t *)(long)addr, val);
1894
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1895
}
1896

    
1897
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1898
{
1899
    unsigned long phys_addr;
1900

    
1901
    phys_addr = addr - (unsigned long)phys_ram_base;
1902
#if !defined(CONFIG_USER_ONLY)
1903
    tb_invalidate_phys_page_fast(phys_addr, 4);
1904
#endif
1905
    stl_p((uint8_t *)(long)addr, val);
1906
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1907
}
1908

    
1909
static CPUReadMemoryFunc *code_mem_read[3] = {
1910
    NULL, /* never used */
1911
    NULL, /* never used */
1912
    NULL, /* never used */
1913
};
1914

    
1915
static CPUWriteMemoryFunc *code_mem_write[3] = {
1916
    code_mem_writeb,
1917
    code_mem_writew,
1918
    code_mem_writel,
1919
};
1920

    
1921
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1922
{
1923
    stb_p((uint8_t *)(long)addr, val);
1924
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1925
}
1926

    
1927
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1928
{
1929
    stw_p((uint8_t *)(long)addr, val);
1930
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1931
}
1932

    
1933
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1934
{
1935
    stl_p((uint8_t *)(long)addr, val);
1936
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1937
}
1938

    
1939
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1940
    notdirty_mem_writeb,
1941
    notdirty_mem_writew,
1942
    notdirty_mem_writel,
1943
};
1944

    
1945
static void io_mem_init(void)
1946
{
1947
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1948
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1949
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1950
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1951
    io_mem_nb = 5;
1952

    
1953
    /* alloc dirty bits array */
1954
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1955
}
1956

    
1957
/* mem_read and mem_write are arrays of functions containing the
1958
   function to access byte (index 0), word (index 1) and dword (index
1959
   2). All functions must be supplied. If io_index is non zero, the
1960
   corresponding io zone is modified. If it is zero, a new io zone is
1961
   allocated. The return value can be used with
1962
   cpu_register_physical_memory(). (-1) is returned if error. */
1963
int cpu_register_io_memory(int io_index,
1964
                           CPUReadMemoryFunc **mem_read,
1965
                           CPUWriteMemoryFunc **mem_write,
1966
                           void *opaque)
1967
{
1968
    int i;
1969

    
1970
    if (io_index <= 0) {
1971
        if (io_index >= IO_MEM_NB_ENTRIES)
1972
            return -1;
1973
        io_index = io_mem_nb++;
1974
    } else {
1975
        if (io_index >= IO_MEM_NB_ENTRIES)
1976
            return -1;
1977
    }
1978
    
1979
    for(i = 0;i < 3; i++) {
1980
        io_mem_read[io_index][i] = mem_read[i];
1981
        io_mem_write[io_index][i] = mem_write[i];
1982
    }
1983
    io_mem_opaque[io_index] = opaque;
1984
    return io_index << IO_MEM_SHIFT;
1985
}
1986

    
1987
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1988
{
1989
    return io_mem_write[io_index >> IO_MEM_SHIFT];
1990
}
1991

    
1992
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1993
{
1994
    return io_mem_read[io_index >> IO_MEM_SHIFT];
1995
}
1996

    
1997
/* physical memory access (slow version, mainly for debug) */
1998
#if defined(CONFIG_USER_ONLY)
1999
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2000
                            int len, int is_write)
2001
{
2002
    int l, flags;
2003
    target_ulong page;
2004

    
2005
    while (len > 0) {
2006
        page = addr & TARGET_PAGE_MASK;
2007
        l = (page + TARGET_PAGE_SIZE) - addr;
2008
        if (l > len)
2009
            l = len;
2010
        flags = page_get_flags(page);
2011
        if (!(flags & PAGE_VALID))
2012
            return;
2013
        if (is_write) {
2014
            if (!(flags & PAGE_WRITE))
2015
                return;
2016
            memcpy((uint8_t *)addr, buf, len);
2017
        } else {
2018
            if (!(flags & PAGE_READ))
2019
                return;
2020
            memcpy(buf, (uint8_t *)addr, len);
2021
        }
2022
        len -= l;
2023
        buf += l;
2024
        addr += l;
2025
    }
2026
}
2027
#else
2028
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2029
                            int len, int is_write)
2030
{
2031
    int l, io_index;
2032
    uint8_t *ptr;
2033
    uint32_t val;
2034
    target_phys_addr_t page;
2035
    unsigned long pd;
2036
    PhysPageDesc *p;
2037
    
2038
    while (len > 0) {
2039
        page = addr & TARGET_PAGE_MASK;
2040
        l = (page + TARGET_PAGE_SIZE) - addr;
2041
        if (l > len)
2042
            l = len;
2043
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2044
        if (!p) {
2045
            pd = IO_MEM_UNASSIGNED;
2046
        } else {
2047
            pd = p->phys_offset;
2048
        }
2049
        
2050
        if (is_write) {
2051
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2052
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2053
                if (l >= 4 && ((addr & 3) == 0)) {
2054
                    /* 32 bit read access */
2055
                    val = ldl_p(buf);
2056
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2057
                    l = 4;
2058
                } else if (l >= 2 && ((addr & 1) == 0)) {
2059
                    /* 16 bit read access */
2060
                    val = lduw_p(buf);
2061
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2062
                    l = 2;
2063
                } else {
2064
                    /* 8 bit access */
2065
                    val = ldub_p(buf);
2066
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2067
                    l = 1;
2068
                }
2069
            } else {
2070
                unsigned long addr1;
2071
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2072
                /* RAM case */
2073
                ptr = phys_ram_base + addr1;
2074
                memcpy(ptr, buf, l);
2075
                /* invalidate code */
2076
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2077
                /* set dirty bit */
2078
                phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;                
2079
            }
2080
        } else {
2081
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2082
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2083
                /* I/O case */
2084
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2085
                if (l >= 4 && ((addr & 3) == 0)) {
2086
                    /* 32 bit read access */
2087
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2088
                    stl_p(buf, val);
2089
                    l = 4;
2090
                } else if (l >= 2 && ((addr & 1) == 0)) {
2091
                    /* 16 bit read access */
2092
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2093
                    stw_p(buf, val);
2094
                    l = 2;
2095
                } else {
2096
                    /* 8 bit access */
2097
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2098
                    stb_p(buf, val);
2099
                    l = 1;
2100
                }
2101
            } else {
2102
                /* RAM case */
2103
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2104
                    (addr & ~TARGET_PAGE_MASK);
2105
                memcpy(buf, ptr, l);
2106
            }
2107
        }
2108
        len -= l;
2109
        buf += l;
2110
        addr += l;
2111
    }
2112
}
2113
#endif
2114

    
2115
/* virtual memory access for debug */
2116
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2117
                        uint8_t *buf, int len, int is_write)
2118
{
2119
    int l;
2120
    target_ulong page, phys_addr;
2121

    
2122
    while (len > 0) {
2123
        page = addr & TARGET_PAGE_MASK;
2124
        phys_addr = cpu_get_phys_page_debug(env, page);
2125
        /* if no physical page mapped, return an error */
2126
        if (phys_addr == -1)
2127
            return -1;
2128
        l = (page + TARGET_PAGE_SIZE) - addr;
2129
        if (l > len)
2130
            l = len;
2131
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2132
                               buf, l, is_write);
2133
        len -= l;
2134
        buf += l;
2135
        addr += l;
2136
    }
2137
    return 0;
2138
}
2139

    
2140
#if !defined(CONFIG_USER_ONLY) 
2141

    
2142
#define MMUSUFFIX _cmmu
2143
#define GETPC() NULL
2144
#define env cpu_single_env
2145
#define SOFTMMU_CODE_ACCESS
2146

    
2147
#define SHIFT 0
2148
#include "softmmu_template.h"
2149

    
2150
#define SHIFT 1
2151
#include "softmmu_template.h"
2152

    
2153
#define SHIFT 2
2154
#include "softmmu_template.h"
2155

    
2156
#define SHIFT 3
2157
#include "softmmu_template.h"
2158

    
2159
#undef env
2160

    
2161
#endif