Statistics
| Branch: | Revision:

root / exec.c @ 2edcdce3

History | View | Annotate | Download (54.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <stdarg.h>
23
#include <string.h>
24
#include <errno.h>
25
#include <unistd.h>
26
#include <inttypes.h>
27
#include <sys/mman.h>
28

    
29
#include "config.h"
30
#include "cpu.h"
31
#include "exec-all.h"
32

    
33
//#define DEBUG_TB_INVALIDATE
34
//#define DEBUG_FLUSH
35
//#define DEBUG_TLB
36

    
37
/* make various TB consistency checks */
38
//#define DEBUG_TB_CHECK 
39
//#define DEBUG_TLB_CHECK 
40

    
41
/* threshold to flush the translated code buffer */
42
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
43

    
44
#define SMC_BITMAP_USE_THRESHOLD 10
45

    
46
#define MMAP_AREA_START        0x00000000
47
#define MMAP_AREA_END          0xa8000000
48

    
49
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
51
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
52
int nb_tbs;
53
/* any access to the tbs or the page table must use this lock */
54
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
55

    
56
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
57
uint8_t *code_gen_ptr;
58

    
59
int phys_ram_size;
60
int phys_ram_fd;
61
uint8_t *phys_ram_base;
62
uint8_t *phys_ram_dirty;
63

    
64
typedef struct PageDesc {
65
    /* offset in memory of the page + io_index in the low 12 bits */
66
    unsigned long phys_offset;
67
    /* list of TBs intersecting this physical page */
68
    TranslationBlock *first_tb;
69
    /* in order to optimize self modifying code, we count the number
70
       of lookups we do to a given page to use a bitmap */
71
    unsigned int code_write_count;
72
    uint8_t *code_bitmap;
73
#if defined(CONFIG_USER_ONLY)
74
    unsigned long flags;
75
#endif
76
} PageDesc;
77

    
78
typedef struct VirtPageDesc {
79
    /* physical address of code page. It is valid only if 'valid_tag'
80
       matches 'virt_valid_tag' */ 
81
    target_ulong phys_addr; 
82
    unsigned int valid_tag;
83
#if !defined(CONFIG_SOFTMMU)
84
    /* original page access rights. It is valid only if 'valid_tag'
85
       matches 'virt_valid_tag' */
86
    unsigned int prot;
87
#endif
88
} VirtPageDesc;
89

    
90
#define L2_BITS 10
91
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
92

    
93
#define L1_SIZE (1 << L1_BITS)
94
#define L2_SIZE (1 << L2_BITS)
95

    
96
static void io_mem_init(void);
97

    
98
unsigned long real_host_page_size;
99
unsigned long host_page_bits;
100
unsigned long host_page_size;
101
unsigned long host_page_mask;
102

    
103
static PageDesc *l1_map[L1_SIZE];
104

    
105
#if !defined(CONFIG_USER_ONLY)
106
static VirtPageDesc *l1_virt_map[L1_SIZE];
107
static unsigned int virt_valid_tag;
108
#endif
109

    
110
/* io memory support */
111
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
112
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
113
static int io_mem_nb;
114

    
115
/* log support */
116
char *logfilename = "/tmp/qemu.log";
117
FILE *logfile;
118
int loglevel;
119

    
120
static void page_init(void)
121
{
122
    /* NOTE: we can always suppose that host_page_size >=
123
       TARGET_PAGE_SIZE */
124
    real_host_page_size = getpagesize();
125
    if (host_page_size == 0)
126
        host_page_size = real_host_page_size;
127
    if (host_page_size < TARGET_PAGE_SIZE)
128
        host_page_size = TARGET_PAGE_SIZE;
129
    host_page_bits = 0;
130
    while ((1 << host_page_bits) < host_page_size)
131
        host_page_bits++;
132
    host_page_mask = ~(host_page_size - 1);
133
#if !defined(CONFIG_USER_ONLY)
134
    virt_valid_tag = 1;
135
#endif
136
}
137

    
138
static inline PageDesc *page_find_alloc(unsigned int index)
139
{
140
    PageDesc **lp, *p;
141

    
142
    lp = &l1_map[index >> L2_BITS];
143
    p = *lp;
144
    if (!p) {
145
        /* allocate if not found */
146
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
147
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
148
        *lp = p;
149
    }
150
    return p + (index & (L2_SIZE - 1));
151
}
152

    
153
static inline PageDesc *page_find(unsigned int index)
154
{
155
    PageDesc *p;
156

    
157
    p = l1_map[index >> L2_BITS];
158
    if (!p)
159
        return 0;
160
    return p + (index & (L2_SIZE - 1));
161
}
162

    
163
#if !defined(CONFIG_USER_ONLY)
164
static void tlb_protect_code(CPUState *env, uint32_t addr);
165
static void tlb_unprotect_code(CPUState *env, uint32_t addr);
166
static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
167

    
168
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
169
{
170
    VirtPageDesc **lp, *p;
171

    
172
    lp = &l1_virt_map[index >> L2_BITS];
173
    p = *lp;
174
    if (!p) {
175
        /* allocate if not found */
176
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
177
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
178
        *lp = p;
179
    }
180
    return p + (index & (L2_SIZE - 1));
181
}
182

    
183
static inline VirtPageDesc *virt_page_find(unsigned int index)
184
{
185
    VirtPageDesc *p;
186

    
187
    p = l1_virt_map[index >> L2_BITS];
188
    if (!p)
189
        return 0;
190
    return p + (index & (L2_SIZE - 1));
191
}
192

    
193
static void virt_page_flush(void)
194
{
195
    int i, j;
196
    VirtPageDesc *p;
197
    
198
    virt_valid_tag++;
199

    
200
    if (virt_valid_tag == 0) {
201
        virt_valid_tag = 1;
202
        for(i = 0; i < L1_SIZE; i++) {
203
            p = l1_virt_map[i];
204
            if (p) {
205
                for(j = 0; j < L2_SIZE; j++)
206
                    p[j].valid_tag = 0;
207
            }
208
        }
209
    }
210
}
211
#else
212
static void virt_page_flush(void)
213
{
214
}
215
#endif
216

    
217
void cpu_exec_init(void)
218
{
219
    if (!code_gen_ptr) {
220
        code_gen_ptr = code_gen_buffer;
221
        page_init();
222
        io_mem_init();
223
    }
224
}
225

    
226
static inline void invalidate_page_bitmap(PageDesc *p)
227
{
228
    if (p->code_bitmap) {
229
        qemu_free(p->code_bitmap);
230
        p->code_bitmap = NULL;
231
    }
232
    p->code_write_count = 0;
233
}
234

    
235
/* set to NULL all the 'first_tb' fields in all PageDescs */
236
static void page_flush_tb(void)
237
{
238
    int i, j;
239
    PageDesc *p;
240

    
241
    for(i = 0; i < L1_SIZE; i++) {
242
        p = l1_map[i];
243
        if (p) {
244
            for(j = 0; j < L2_SIZE; j++) {
245
                p->first_tb = NULL;
246
                invalidate_page_bitmap(p);
247
                p++;
248
            }
249
        }
250
    }
251
}
252

    
253
/* flush all the translation blocks */
254
/* XXX: tb_flush is currently not thread safe */
255
void tb_flush(CPUState *env)
256
{
257
    int i;
258
#if defined(DEBUG_FLUSH)
259
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
260
           code_gen_ptr - code_gen_buffer, 
261
           nb_tbs, 
262
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
263
#endif
264
    nb_tbs = 0;
265
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
266
        tb_hash[i] = NULL;
267
    virt_page_flush();
268

    
269
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
270
        tb_phys_hash[i] = NULL;
271
    page_flush_tb();
272

    
273
    code_gen_ptr = code_gen_buffer;
274
    /* XXX: flush processor icache at this point if cache flush is
275
       expensive */
276
}
277

    
278
#ifdef DEBUG_TB_CHECK
279

    
280
static void tb_invalidate_check(unsigned long address)
281
{
282
    TranslationBlock *tb;
283
    int i;
284
    address &= TARGET_PAGE_MASK;
285
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
286
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
287
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
288
                  address >= tb->pc + tb->size)) {
289
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
290
                       address, tb->pc, tb->size);
291
            }
292
        }
293
    }
294
}
295

    
296
/* verify that all the pages have correct rights for code */
297
static void tb_page_check(void)
298
{
299
    TranslationBlock *tb;
300
    int i, flags1, flags2;
301
    
302
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
303
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
304
            flags1 = page_get_flags(tb->pc);
305
            flags2 = page_get_flags(tb->pc + tb->size - 1);
306
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
307
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
308
                       tb->pc, tb->size, flags1, flags2);
309
            }
310
        }
311
    }
312
}
313

    
314
void tb_jmp_check(TranslationBlock *tb)
315
{
316
    TranslationBlock *tb1;
317
    unsigned int n1;
318

    
319
    /* suppress any remaining jumps to this TB */
320
    tb1 = tb->jmp_first;
321
    for(;;) {
322
        n1 = (long)tb1 & 3;
323
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
324
        if (n1 == 2)
325
            break;
326
        tb1 = tb1->jmp_next[n1];
327
    }
328
    /* check end of list */
329
    if (tb1 != tb) {
330
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
331
    }
332
}
333

    
334
#endif
335

    
336
/* invalidate one TB */
337
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
338
                             int next_offset)
339
{
340
    TranslationBlock *tb1;
341
    for(;;) {
342
        tb1 = *ptb;
343
        if (tb1 == tb) {
344
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
345
            break;
346
        }
347
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
348
    }
349
}
350

    
351
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
352
{
353
    TranslationBlock *tb1;
354
    unsigned int n1;
355

    
356
    for(;;) {
357
        tb1 = *ptb;
358
        n1 = (long)tb1 & 3;
359
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
360
        if (tb1 == tb) {
361
            *ptb = tb1->page_next[n1];
362
            break;
363
        }
364
        ptb = &tb1->page_next[n1];
365
    }
366
}
367

    
368
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
369
{
370
    TranslationBlock *tb1, **ptb;
371
    unsigned int n1;
372

    
373
    ptb = &tb->jmp_next[n];
374
    tb1 = *ptb;
375
    if (tb1) {
376
        /* find tb(n) in circular list */
377
        for(;;) {
378
            tb1 = *ptb;
379
            n1 = (long)tb1 & 3;
380
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
381
            if (n1 == n && tb1 == tb)
382
                break;
383
            if (n1 == 2) {
384
                ptb = &tb1->jmp_first;
385
            } else {
386
                ptb = &tb1->jmp_next[n1];
387
            }
388
        }
389
        /* now we can suppress tb(n) from the list */
390
        *ptb = tb->jmp_next[n];
391

    
392
        tb->jmp_next[n] = NULL;
393
    }
394
}
395

    
396
/* reset the jump entry 'n' of a TB so that it is not chained to
397
   another TB */
398
static inline void tb_reset_jump(TranslationBlock *tb, int n)
399
{
400
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
401
}
402

    
403
static inline void tb_invalidate(TranslationBlock *tb)
404
{
405
    unsigned int h, n1;
406
    TranslationBlock *tb1, *tb2, **ptb;
407
    
408
    tb_invalidated_flag = 1;
409

    
410
    /* remove the TB from the hash list */
411
    h = tb_hash_func(tb->pc);
412
    ptb = &tb_hash[h];
413
    for(;;) {
414
        tb1 = *ptb;
415
        /* NOTE: the TB is not necessarily linked in the hash. It
416
           indicates that it is not currently used */
417
        if (tb1 == NULL)
418
            return;
419
        if (tb1 == tb) {
420
            *ptb = tb1->hash_next;
421
            break;
422
        }
423
        ptb = &tb1->hash_next;
424
    }
425

    
426
    /* suppress this TB from the two jump lists */
427
    tb_jmp_remove(tb, 0);
428
    tb_jmp_remove(tb, 1);
429

    
430
    /* suppress any remaining jumps to this TB */
431
    tb1 = tb->jmp_first;
432
    for(;;) {
433
        n1 = (long)tb1 & 3;
434
        if (n1 == 2)
435
            break;
436
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
437
        tb2 = tb1->jmp_next[n1];
438
        tb_reset_jump(tb1, n1);
439
        tb1->jmp_next[n1] = NULL;
440
        tb1 = tb2;
441
    }
442
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
443
}
444

    
445
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
446
{
447
    PageDesc *p;
448
    unsigned int h;
449
    target_ulong phys_pc;
450
    
451
    /* remove the TB from the hash list */
452
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
453
    h = tb_phys_hash_func(phys_pc);
454
    tb_remove(&tb_phys_hash[h], tb, 
455
              offsetof(TranslationBlock, phys_hash_next));
456

    
457
    /* remove the TB from the page list */
458
    if (tb->page_addr[0] != page_addr) {
459
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
460
        tb_page_remove(&p->first_tb, tb);
461
        invalidate_page_bitmap(p);
462
    }
463
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
464
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
465
        tb_page_remove(&p->first_tb, tb);
466
        invalidate_page_bitmap(p);
467
    }
468

    
469
    tb_invalidate(tb);
470
}
471

    
472
static inline void set_bits(uint8_t *tab, int start, int len)
473
{
474
    int end, mask, end1;
475

    
476
    end = start + len;
477
    tab += start >> 3;
478
    mask = 0xff << (start & 7);
479
    if ((start & ~7) == (end & ~7)) {
480
        if (start < end) {
481
            mask &= ~(0xff << (end & 7));
482
            *tab |= mask;
483
        }
484
    } else {
485
        *tab++ |= mask;
486
        start = (start + 8) & ~7;
487
        end1 = end & ~7;
488
        while (start < end1) {
489
            *tab++ = 0xff;
490
            start += 8;
491
        }
492
        if (start < end) {
493
            mask = ~(0xff << (end & 7));
494
            *tab |= mask;
495
        }
496
    }
497
}
498

    
499
static void build_page_bitmap(PageDesc *p)
500
{
501
    int n, tb_start, tb_end;
502
    TranslationBlock *tb;
503
    
504
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
505
    if (!p->code_bitmap)
506
        return;
507
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
508

    
509
    tb = p->first_tb;
510
    while (tb != NULL) {
511
        n = (long)tb & 3;
512
        tb = (TranslationBlock *)((long)tb & ~3);
513
        /* NOTE: this is subtle as a TB may span two physical pages */
514
        if (n == 0) {
515
            /* NOTE: tb_end may be after the end of the page, but
516
               it is not a problem */
517
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
518
            tb_end = tb_start + tb->size;
519
            if (tb_end > TARGET_PAGE_SIZE)
520
                tb_end = TARGET_PAGE_SIZE;
521
        } else {
522
            tb_start = 0;
523
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
524
        }
525
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
526
        tb = tb->page_next[n];
527
    }
528
}
529

    
530
/* invalidate all TBs which intersect with the target physical page
531
   starting in range [start;end[. NOTE: start and end must refer to
532
   the same physical page. 'vaddr' is a virtual address referencing
533
   the physical page of code. It is only used an a hint if there is no
534
   code left. */
535
static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
536
                                          target_ulong vaddr)
537
{
538
    int n;
539
    PageDesc *p;
540
    TranslationBlock *tb, *tb_next;
541
    target_ulong tb_start, tb_end;
542

    
543
    p = page_find(start >> TARGET_PAGE_BITS);
544
    if (!p) 
545
        return;
546
    if (!p->code_bitmap && 
547
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
548
        /* build code bitmap */
549
        build_page_bitmap(p);
550
    }
551

    
552
    /* we remove all the TBs in the range [start, end[ */
553
    /* XXX: see if in some cases it could be faster to invalidate all the code */
554
    tb = p->first_tb;
555
    while (tb != NULL) {
556
        n = (long)tb & 3;
557
        tb = (TranslationBlock *)((long)tb & ~3);
558
        tb_next = tb->page_next[n];
559
        /* NOTE: this is subtle as a TB may span two physical pages */
560
        if (n == 0) {
561
            /* NOTE: tb_end may be after the end of the page, but
562
               it is not a problem */
563
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
564
            tb_end = tb_start + tb->size;
565
        } else {
566
            tb_start = tb->page_addr[1];
567
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
568
        }
569
        if (!(tb_end <= start || tb_start >= end)) {
570
            tb_phys_invalidate(tb, -1);
571
        }
572
        tb = tb_next;
573
    }
574
#if !defined(CONFIG_USER_ONLY)
575
    /* if no code remaining, no need to continue to use slow writes */
576
    if (!p->first_tb) {
577
        invalidate_page_bitmap(p);
578
        tlb_unprotect_code_phys(cpu_single_env, start, vaddr);
579
    }
580
#endif
581
}
582

    
583
/* len must be <= 8 and start must be a multiple of len */
584
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, target_ulong vaddr)
585
{
586
    PageDesc *p;
587
    int offset, b;
588
#if 0
589
    if (cpu_single_env->cr[0] & CR0_PE_MASK) {
590
        printf("modifying code at 0x%x size=%d EIP=%x\n", 
591
               (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len, 
592
               cpu_single_env->eip);
593
    }
594
#endif
595
    p = page_find(start >> TARGET_PAGE_BITS);
596
    if (!p) 
597
        return;
598
    if (p->code_bitmap) {
599
        offset = start & ~TARGET_PAGE_MASK;
600
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
601
        if (b & ((1 << len) - 1))
602
            goto do_invalidate;
603
    } else {
604
    do_invalidate:
605
        tb_invalidate_phys_page_range(start, start + len, vaddr);
606
    }
607
}
608

    
609
/* invalidate all TBs which intersect with the target virtual page
610
   starting in range [start;end[. This function is usually used when
611
   the target processor flushes its I-cache. NOTE: start and end must
612
   refer to the same physical page */
613
void tb_invalidate_page_range(target_ulong start, target_ulong end)
614
{
615
    int n;
616
    PageDesc *p;
617
    TranslationBlock *tb, *tb_next;
618
    target_ulong pc;
619
    target_ulong phys_start;
620

    
621
#if !defined(CONFIG_USER_ONLY)
622
    {
623
        VirtPageDesc *vp;
624
        vp = virt_page_find(start >> TARGET_PAGE_BITS);
625
        if (!vp)
626
            return;
627
        if (vp->valid_tag != virt_valid_tag)
628
            return;
629
        phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
630
    }
631
#else
632
    phys_start = start;
633
#endif    
634
    p = page_find(phys_start >> TARGET_PAGE_BITS);
635
    if (!p) 
636
        return;
637
    /* we remove all the TBs in the range [start, end[ */
638
    /* XXX: see if in some cases it could be faster to invalidate all the code */
639
    tb = p->first_tb;
640
    while (tb != NULL) {
641
        n = (long)tb & 3;
642
        tb = (TranslationBlock *)((long)tb & ~3);
643
        tb_next = tb->page_next[n];
644
        pc = tb->pc;
645
        if (!((pc + tb->size) <= start || pc >= end)) {
646
            tb_phys_invalidate(tb, -1);
647
        }
648
        tb = tb_next;
649
    }
650
#if !defined(CONFIG_USER_ONLY)
651
    /* if no code remaining, no need to continue to use slow writes */
652
    if (!p->first_tb)
653
        tlb_unprotect_code(cpu_single_env, start);
654
#endif
655
}
656

    
657
#if !defined(CONFIG_SOFTMMU)
658
static void tb_invalidate_phys_page(target_ulong addr)
659
{
660
    int n;
661
    PageDesc *p;
662
    TranslationBlock *tb;
663

    
664
    addr &= TARGET_PAGE_MASK;
665
    p = page_find(addr >> TARGET_PAGE_BITS);
666
    if (!p) 
667
        return;
668
    tb = p->first_tb;
669
    while (tb != NULL) {
670
        n = (long)tb & 3;
671
        tb = (TranslationBlock *)((long)tb & ~3);
672
        tb_phys_invalidate(tb, addr);
673
        tb = tb->page_next[n];
674
    }
675
    p->first_tb = NULL;
676
}
677
#endif
678

    
679
/* add the tb in the target page and protect it if necessary */
680
static inline void tb_alloc_page(TranslationBlock *tb, 
681
                                 unsigned int n, unsigned int page_addr)
682
{
683
    PageDesc *p;
684
    TranslationBlock *last_first_tb;
685

    
686
    tb->page_addr[n] = page_addr;
687
    p = page_find(page_addr >> TARGET_PAGE_BITS);
688
    tb->page_next[n] = p->first_tb;
689
    last_first_tb = p->first_tb;
690
    p->first_tb = (TranslationBlock *)((long)tb | n);
691
    invalidate_page_bitmap(p);
692

    
693
#if defined(CONFIG_USER_ONLY)
694
    if (p->flags & PAGE_WRITE) {
695
        unsigned long host_start, host_end, addr;
696
        int prot;
697

    
698
        /* force the host page as non writable (writes will have a
699
           page fault + mprotect overhead) */
700
        host_start = page_addr & host_page_mask;
701
        host_end = host_start + host_page_size;
702
        prot = 0;
703
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
704
            prot |= page_get_flags(addr);
705
        mprotect((void *)host_start, host_page_size, 
706
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
707
#ifdef DEBUG_TB_INVALIDATE
708
        printf("protecting code page: 0x%08lx\n", 
709
               host_start);
710
#endif
711
        p->flags &= ~PAGE_WRITE;
712
    }
713
#else
714
    /* if some code is already present, then the pages are already
715
       protected. So we handle the case where only the first TB is
716
       allocated in a physical page */
717
    if (!last_first_tb) {
718
        target_ulong virt_addr;
719

    
720
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
721
        tlb_protect_code(cpu_single_env, virt_addr);        
722
    }
723
#endif
724
}
725

    
726
/* Allocate a new translation block. Flush the translation buffer if
727
   too many translation blocks or too much generated code. */
728
TranslationBlock *tb_alloc(unsigned long pc)
729
{
730
    TranslationBlock *tb;
731

    
732
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
733
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
734
        return NULL;
735
    tb = &tbs[nb_tbs++];
736
    tb->pc = pc;
737
    return tb;
738
}
739

    
740
/* add a new TB and link it to the physical page tables. phys_page2 is
741
   (-1) to indicate that only one page contains the TB. */
742
void tb_link_phys(TranslationBlock *tb, 
743
                  target_ulong phys_pc, target_ulong phys_page2)
744
{
745
    unsigned int h;
746
    TranslationBlock **ptb;
747

    
748
    /* add in the physical hash table */
749
    h = tb_phys_hash_func(phys_pc);
750
    ptb = &tb_phys_hash[h];
751
    tb->phys_hash_next = *ptb;
752
    *ptb = tb;
753

    
754
    /* add in the page list */
755
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
756
    if (phys_page2 != -1)
757
        tb_alloc_page(tb, 1, phys_page2);
758
    else
759
        tb->page_addr[1] = -1;
760
#ifdef DEBUG_TB_CHECK
761
    tb_page_check();
762
#endif
763
}
764

    
765
/* link the tb with the other TBs */
766
void tb_link(TranslationBlock *tb)
767
{
768
#if !defined(CONFIG_USER_ONLY)
769
    {
770
        VirtPageDesc *vp;
771
        target_ulong addr;
772
        
773
        /* save the code memory mappings (needed to invalidate the code) */
774
        addr = tb->pc & TARGET_PAGE_MASK;
775
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
776
#ifdef DEBUG_TLB_CHECK 
777
        if (vp->valid_tag == virt_valid_tag &&
778
            vp->phys_addr != tb->page_addr[0]) {
779
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
780
                   addr, tb->page_addr[0], vp->phys_addr);
781
        }
782
#endif
783
        vp->phys_addr = tb->page_addr[0];
784
        if (vp->valid_tag != virt_valid_tag) {
785
            vp->valid_tag = virt_valid_tag;
786
#if !defined(CONFIG_SOFTMMU)
787
            vp->prot = 0;
788
#endif
789
        }
790
        
791
        if (tb->page_addr[1] != -1) {
792
            addr += TARGET_PAGE_SIZE;
793
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
794
#ifdef DEBUG_TLB_CHECK 
795
            if (vp->valid_tag == virt_valid_tag &&
796
                vp->phys_addr != tb->page_addr[1]) { 
797
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
798
                       addr, tb->page_addr[1], vp->phys_addr);
799
            }
800
#endif
801
            vp->phys_addr = tb->page_addr[1];
802
            if (vp->valid_tag != virt_valid_tag) {
803
                vp->valid_tag = virt_valid_tag;
804
#if !defined(CONFIG_SOFTMMU)
805
                vp->prot = 0;
806
#endif
807
            }
808
        }
809
    }
810
#endif
811

    
812
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
813
    tb->jmp_next[0] = NULL;
814
    tb->jmp_next[1] = NULL;
815

    
816
    /* init original jump addresses */
817
    if (tb->tb_next_offset[0] != 0xffff)
818
        tb_reset_jump(tb, 0);
819
    if (tb->tb_next_offset[1] != 0xffff)
820
        tb_reset_jump(tb, 1);
821
}
822

    
823
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
824
   tb[1].tc_ptr. Return NULL if not found */
825
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
826
{
827
    int m_min, m_max, m;
828
    unsigned long v;
829
    TranslationBlock *tb;
830

    
831
    if (nb_tbs <= 0)
832
        return NULL;
833
    if (tc_ptr < (unsigned long)code_gen_buffer ||
834
        tc_ptr >= (unsigned long)code_gen_ptr)
835
        return NULL;
836
    /* binary search (cf Knuth) */
837
    m_min = 0;
838
    m_max = nb_tbs - 1;
839
    while (m_min <= m_max) {
840
        m = (m_min + m_max) >> 1;
841
        tb = &tbs[m];
842
        v = (unsigned long)tb->tc_ptr;
843
        if (v == tc_ptr)
844
            return tb;
845
        else if (tc_ptr < v) {
846
            m_max = m - 1;
847
        } else {
848
            m_min = m + 1;
849
        }
850
    } 
851
    return &tbs[m_max];
852
}
853

    
854
static void tb_reset_jump_recursive(TranslationBlock *tb);
855

    
856
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
857
{
858
    TranslationBlock *tb1, *tb_next, **ptb;
859
    unsigned int n1;
860

    
861
    tb1 = tb->jmp_next[n];
862
    if (tb1 != NULL) {
863
        /* find head of list */
864
        for(;;) {
865
            n1 = (long)tb1 & 3;
866
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
867
            if (n1 == 2)
868
                break;
869
            tb1 = tb1->jmp_next[n1];
870
        }
871
        /* we are now sure now that tb jumps to tb1 */
872
        tb_next = tb1;
873

    
874
        /* remove tb from the jmp_first list */
875
        ptb = &tb_next->jmp_first;
876
        for(;;) {
877
            tb1 = *ptb;
878
            n1 = (long)tb1 & 3;
879
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
880
            if (n1 == n && tb1 == tb)
881
                break;
882
            ptb = &tb1->jmp_next[n1];
883
        }
884
        *ptb = tb->jmp_next[n];
885
        tb->jmp_next[n] = NULL;
886
        
887
        /* suppress the jump to next tb in generated code */
888
        tb_reset_jump(tb, n);
889

    
890
        /* suppress jumps in the tb on which we could have jumped */
891
        tb_reset_jump_recursive(tb_next);
892
    }
893
}
894

    
895
static void tb_reset_jump_recursive(TranslationBlock *tb)
896
{
897
    tb_reset_jump_recursive2(tb, 0);
898
    tb_reset_jump_recursive2(tb, 1);
899
}
900

    
901
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
902
   breakpoint is reached */
903
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
904
{
905
#if defined(TARGET_I386)
906
    int i;
907

    
908
    for(i = 0; i < env->nb_breakpoints; i++) {
909
        if (env->breakpoints[i] == pc)
910
            return 0;
911
    }
912

    
913
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
914
        return -1;
915
    env->breakpoints[env->nb_breakpoints++] = pc;
916
    tb_invalidate_page_range(pc, pc + 1);
917
    return 0;
918
#else
919
    return -1;
920
#endif
921
}
922

    
923
/* remove a breakpoint */
924
int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
925
{
926
#if defined(TARGET_I386)
927
    int i;
928
    for(i = 0; i < env->nb_breakpoints; i++) {
929
        if (env->breakpoints[i] == pc)
930
            goto found;
931
    }
932
    return -1;
933
 found:
934
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
935
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
936
    env->nb_breakpoints--;
937
    tb_invalidate_page_range(pc, pc + 1);
938
    return 0;
939
#else
940
    return -1;
941
#endif
942
}
943

    
944
/* enable or disable single step mode. EXCP_DEBUG is returned by the
945
   CPU loop after each instruction */
946
void cpu_single_step(CPUState *env, int enabled)
947
{
948
#if defined(TARGET_I386)
949
    if (env->singlestep_enabled != enabled) {
950
        env->singlestep_enabled = enabled;
951
        /* must flush all the translated code to avoid inconsistancies */
952
        /* XXX: only flush what is necessary */
953
        tb_flush(env);
954
    }
955
#endif
956
}
957

    
958
/* enable or disable low levels log */
959
void cpu_set_log(int log_flags)
960
{
961
    loglevel = log_flags;
962
    if (loglevel && !logfile) {
963
        logfile = fopen(logfilename, "w");
964
        if (!logfile) {
965
            perror(logfilename);
966
            _exit(1);
967
        }
968
#if !defined(CONFIG_SOFTMMU)
969
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
970
        {
971
            static uint8_t logfile_buf[4096];
972
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
973
        }
974
#else
975
        setvbuf(logfile, NULL, _IOLBF, 0);
976
#endif
977
    }
978
}
979

    
980
void cpu_set_log_filename(const char *filename)
981
{
982
    logfilename = strdup(filename);
983
}
984

    
985
/* mask must never be zero, except for A20 change call */
986
void cpu_interrupt(CPUState *env, int mask)
987
{
988
    TranslationBlock *tb;
989
    static int interrupt_lock;
990

    
991
    env->interrupt_request |= mask;
992
    /* if the cpu is currently executing code, we must unlink it and
993
       all the potentially executing TB */
994
    tb = env->current_tb;
995
    if (tb && !testandset(&interrupt_lock)) {
996
        env->current_tb = NULL;
997
        tb_reset_jump_recursive(tb);
998
        interrupt_lock = 0;
999
    }
1000
}
1001

    
1002

    
1003
void cpu_abort(CPUState *env, const char *fmt, ...)
1004
{
1005
    va_list ap;
1006

    
1007
    va_start(ap, fmt);
1008
    fprintf(stderr, "qemu: fatal: ");
1009
    vfprintf(stderr, fmt, ap);
1010
    fprintf(stderr, "\n");
1011
#ifdef TARGET_I386
1012
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1013
#endif
1014
    va_end(ap);
1015
    abort();
1016
}
1017

    
1018
#if !defined(CONFIG_USER_ONLY)
1019

    
1020
/* NOTE: if flush_global is true, also flush global entries (not
1021
   implemented yet) */
1022
void tlb_flush(CPUState *env, int flush_global)
1023
{
1024
    int i;
1025

    
1026
#if defined(DEBUG_TLB)
1027
    printf("tlb_flush:\n");
1028
#endif
1029
    /* must reset current TB so that interrupts cannot modify the
1030
       links while we are modifying them */
1031
    env->current_tb = NULL;
1032

    
1033
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1034
        env->tlb_read[0][i].address = -1;
1035
        env->tlb_write[0][i].address = -1;
1036
        env->tlb_read[1][i].address = -1;
1037
        env->tlb_write[1][i].address = -1;
1038
    }
1039

    
1040
    virt_page_flush();
1041
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1042
        tb_hash[i] = NULL;
1043

    
1044
#if !defined(CONFIG_SOFTMMU)
1045
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1046
#endif
1047
}
1048

    
1049
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1050
{
1051
    if (addr == (tlb_entry->address & 
1052
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1053
        tlb_entry->address = -1;
1054
}
1055

    
1056
void tlb_flush_page(CPUState *env, uint32_t addr)
1057
{
1058
    int i, n;
1059
    VirtPageDesc *vp;
1060
    PageDesc *p;
1061
    TranslationBlock *tb;
1062

    
1063
#if defined(DEBUG_TLB)
1064
    printf("tlb_flush_page: 0x%08x\n", addr);
1065
#endif
1066
    /* must reset current TB so that interrupts cannot modify the
1067
       links while we are modifying them */
1068
    env->current_tb = NULL;
1069

    
1070
    addr &= TARGET_PAGE_MASK;
1071
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1072
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1073
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1074
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1075
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1076

    
1077
    /* remove from the virtual pc hash table all the TB at this
1078
       virtual address */
1079
    
1080
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1081
    if (vp && vp->valid_tag == virt_valid_tag) {
1082
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1083
        if (p) {
1084
            /* we remove all the links to the TBs in this virtual page */
1085
            tb = p->first_tb;
1086
            while (tb != NULL) {
1087
                n = (long)tb & 3;
1088
                tb = (TranslationBlock *)((long)tb & ~3);
1089
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1090
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1091
                    tb_invalidate(tb);
1092
                }
1093
                tb = tb->page_next[n];
1094
            }
1095
        }
1096
        vp->valid_tag = 0;
1097
    }
1098

    
1099
#if !defined(CONFIG_SOFTMMU)
1100
    if (addr < MMAP_AREA_END)
1101
        munmap((void *)addr, TARGET_PAGE_SIZE);
1102
#endif
1103
}
1104

    
1105
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1106
{
1107
    if (addr == (tlb_entry->address & 
1108
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1109
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1110
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1111
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1112
    }
1113
}
1114

    
1115
/* update the TLBs so that writes to code in the virtual page 'addr'
1116
   can be detected */
1117
static void tlb_protect_code(CPUState *env, uint32_t addr)
1118
{
1119
    int i;
1120

    
1121
    addr &= TARGET_PAGE_MASK;
1122
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1123
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1124
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1125
#if !defined(CONFIG_SOFTMMU)
1126
    /* NOTE: as we generated the code for this page, it is already at
1127
       least readable */
1128
    if (addr < MMAP_AREA_END)
1129
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1130
#endif
1131
}
1132

    
1133
static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1134
{
1135
    if (addr == (tlb_entry->address & 
1136
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1137
        (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1138
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1139
    }
1140
}
1141

    
1142
/* update the TLB so that writes in virtual page 'addr' are no longer
1143
   tested self modifying code */
1144
static void tlb_unprotect_code(CPUState *env, uint32_t addr)
1145
{
1146
    int i;
1147

    
1148
    addr &= TARGET_PAGE_MASK;
1149
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1150
    tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1151
    tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1152
}
1153

    
1154
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1155
                                       uint32_t phys_addr)
1156
{
1157
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1158
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1159
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1160
    }
1161
}
1162

    
1163
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1164
   tested self modifying code */
1165
static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr)
1166
{
1167
    int i;
1168

    
1169
    phys_addr &= TARGET_PAGE_MASK;
1170
    phys_addr += (long)phys_ram_base;
1171
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1172
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1173
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1174
}
1175

    
1176
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1177
                                         unsigned long start, unsigned long length)
1178
{
1179
    unsigned long addr;
1180
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1181
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1182
        if ((addr - start) < length) {
1183
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1184
        }
1185
    }
1186
}
1187

    
1188
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1189
{
1190
    CPUState *env;
1191
    target_ulong length, start1;
1192
    int i;
1193

    
1194
    start &= TARGET_PAGE_MASK;
1195
    end = TARGET_PAGE_ALIGN(end);
1196

    
1197
    length = end - start;
1198
    if (length == 0)
1199
        return;
1200
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1201

    
1202
    env = cpu_single_env;
1203
    /* we modify the TLB cache so that the dirty bit will be set again
1204
       when accessing the range */
1205
    start1 = start + (unsigned long)phys_ram_base;
1206
    for(i = 0; i < CPU_TLB_SIZE; i++)
1207
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1208
    for(i = 0; i < CPU_TLB_SIZE; i++)
1209
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1210

    
1211
#if !defined(CONFIG_SOFTMMU)
1212
    /* XXX: this is expensive */
1213
    {
1214
        VirtPageDesc *p;
1215
        int j;
1216
        target_ulong addr;
1217

    
1218
        for(i = 0; i < L1_SIZE; i++) {
1219
            p = l1_virt_map[i];
1220
            if (p) {
1221
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1222
                for(j = 0; j < L2_SIZE; j++) {
1223
                    if (p->valid_tag == virt_valid_tag &&
1224
                        p->phys_addr >= start && p->phys_addr < end &&
1225
                        (p->prot & PROT_WRITE)) {
1226
                        if (addr < MMAP_AREA_END) {
1227
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1228
                                     p->prot & ~PROT_WRITE);
1229
                        }
1230
                    }
1231
                    addr += TARGET_PAGE_SIZE;
1232
                    p++;
1233
                }
1234
            }
1235
        }
1236
    }
1237
#endif
1238
}
1239

    
1240
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1241
                                    unsigned long start)
1242
{
1243
    unsigned long addr;
1244
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1245
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1246
        if (addr == start) {
1247
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1248
        }
1249
    }
1250
}
1251

    
1252
/* update the TLB corresponding to virtual page vaddr and phys addr
1253
   addr so that it is no longer dirty */
1254
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1255
{
1256
    CPUState *env = cpu_single_env;
1257
    int i;
1258

    
1259
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1260

    
1261
    addr &= TARGET_PAGE_MASK;
1262
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1263
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1264
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1265
}
1266

    
1267
/* add a new TLB entry. At most one entry for a given virtual address
1268
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1269
   (can only happen in non SOFTMMU mode for I/O pages or pages
1270
   conflicting with the host address space). */
1271
int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, 
1272
                 int is_user, int is_softmmu)
1273
{
1274
    PageDesc *p;
1275
    target_ulong pd;
1276
    TranslationBlock *first_tb;
1277
    unsigned int index;
1278
    target_ulong address, addend;
1279
    int ret;
1280

    
1281
    p = page_find(paddr >> TARGET_PAGE_BITS);
1282
    if (!p) {
1283
        pd = IO_MEM_UNASSIGNED;
1284
        first_tb = NULL;
1285
    } else {
1286
        pd = p->phys_offset;
1287
        first_tb = p->first_tb;
1288
    }
1289
#if defined(DEBUG_TLB)
1290
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1291
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1292
#endif
1293

    
1294
    ret = 0;
1295
#if !defined(CONFIG_SOFTMMU)
1296
    if (is_softmmu) 
1297
#endif
1298
    {
1299
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1300
            /* IO memory case */
1301
            address = vaddr | pd;
1302
            addend = paddr;
1303
        } else {
1304
            /* standard memory */
1305
            address = vaddr;
1306
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1307
        }
1308
        
1309
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1310
        addend -= vaddr;
1311
        if (prot & PROT_READ) {
1312
            env->tlb_read[is_user][index].address = address;
1313
            env->tlb_read[is_user][index].addend = addend;
1314
        } else {
1315
            env->tlb_read[is_user][index].address = -1;
1316
            env->tlb_read[is_user][index].addend = -1;
1317
        }
1318
        if (prot & PROT_WRITE) {
1319
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1320
                /* ROM: access is ignored (same as unassigned) */
1321
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1322
                env->tlb_write[is_user][index].addend = addend;
1323
            } else if (first_tb) {
1324
                /* if code is present, we use a specific memory
1325
                   handler. It works only for physical memory access */
1326
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1327
                env->tlb_write[is_user][index].addend = addend;
1328
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1329
                       !cpu_physical_memory_is_dirty(pd)) {
1330
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1331
                env->tlb_write[is_user][index].addend = addend;
1332
            } else {
1333
                env->tlb_write[is_user][index].address = address;
1334
                env->tlb_write[is_user][index].addend = addend;
1335
            }
1336
        } else {
1337
            env->tlb_write[is_user][index].address = -1;
1338
            env->tlb_write[is_user][index].addend = -1;
1339
        }
1340
    }
1341
#if !defined(CONFIG_SOFTMMU)
1342
    else {
1343
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1344
            /* IO access: no mapping is done as it will be handled by the
1345
               soft MMU */
1346
            if (!(env->hflags & HF_SOFTMMU_MASK))
1347
                ret = 2;
1348
        } else {
1349
            void *map_addr;
1350

    
1351
            if (vaddr >= MMAP_AREA_END) {
1352
                ret = 2;
1353
            } else {
1354
                if (prot & PROT_WRITE) {
1355
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1356
                        first_tb ||
1357
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1358
                         !cpu_physical_memory_is_dirty(pd))) {
1359
                        /* ROM: we do as if code was inside */
1360
                        /* if code is present, we only map as read only and save the
1361
                           original mapping */
1362
                        VirtPageDesc *vp;
1363
                        
1364
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1365
                        vp->phys_addr = pd;
1366
                        vp->prot = prot;
1367
                        vp->valid_tag = virt_valid_tag;
1368
                        prot &= ~PAGE_WRITE;
1369
                    }
1370
                }
1371
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1372
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1373
                if (map_addr == MAP_FAILED) {
1374
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1375
                              paddr, vaddr);
1376
                }
1377
            }
1378
        }
1379
    }
1380
#endif
1381
    return ret;
1382
}
1383

    
1384
/* called from signal handler: invalidate the code and unprotect the
1385
   page. Return TRUE if the fault was succesfully handled. */
1386
int page_unprotect(unsigned long addr)
1387
{
1388
#if !defined(CONFIG_SOFTMMU)
1389
    VirtPageDesc *vp;
1390

    
1391
#if defined(DEBUG_TLB)
1392
    printf("page_unprotect: addr=0x%08x\n", addr);
1393
#endif
1394
    addr &= TARGET_PAGE_MASK;
1395

    
1396
    /* if it is not mapped, no need to worry here */
1397
    if (addr >= MMAP_AREA_END)
1398
        return 0;
1399
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1400
    if (!vp)
1401
        return 0;
1402
    /* NOTE: in this case, validate_tag is _not_ tested as it
1403
       validates only the code TLB */
1404
    if (vp->valid_tag != virt_valid_tag)
1405
        return 0;
1406
    if (!(vp->prot & PAGE_WRITE))
1407
        return 0;
1408
#if defined(DEBUG_TLB)
1409
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1410
           addr, vp->phys_addr, vp->prot);
1411
#endif
1412
    /* set the dirty bit */
1413
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1414
    /* flush the code inside */
1415
    tb_invalidate_phys_page(vp->phys_addr);
1416
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1417
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1418
                  (unsigned long)addr, vp->prot);
1419
    return 1;
1420
#else
1421
    return 0;
1422
#endif
1423
}
1424

    
1425
#else
1426

    
1427
void tlb_flush(CPUState *env, int flush_global)
1428
{
1429
}
1430

    
1431
void tlb_flush_page(CPUState *env, uint32_t addr)
1432
{
1433
}
1434

    
1435
void tlb_flush_page_write(CPUState *env, uint32_t addr)
1436
{
1437
}
1438

    
1439
int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, 
1440
                 int is_user, int is_softmmu)
1441
{
1442
    return 0;
1443
}
1444

    
1445
/* dump memory mappings */
1446
void page_dump(FILE *f)
1447
{
1448
    unsigned long start, end;
1449
    int i, j, prot, prot1;
1450
    PageDesc *p;
1451

    
1452
    fprintf(f, "%-8s %-8s %-8s %s\n",
1453
            "start", "end", "size", "prot");
1454
    start = -1;
1455
    end = -1;
1456
    prot = 0;
1457
    for(i = 0; i <= L1_SIZE; i++) {
1458
        if (i < L1_SIZE)
1459
            p = l1_map[i];
1460
        else
1461
            p = NULL;
1462
        for(j = 0;j < L2_SIZE; j++) {
1463
            if (!p)
1464
                prot1 = 0;
1465
            else
1466
                prot1 = p[j].flags;
1467
            if (prot1 != prot) {
1468
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1469
                if (start != -1) {
1470
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1471
                            start, end, end - start, 
1472
                            prot & PAGE_READ ? 'r' : '-',
1473
                            prot & PAGE_WRITE ? 'w' : '-',
1474
                            prot & PAGE_EXEC ? 'x' : '-');
1475
                }
1476
                if (prot1 != 0)
1477
                    start = end;
1478
                else
1479
                    start = -1;
1480
                prot = prot1;
1481
            }
1482
            if (!p)
1483
                break;
1484
        }
1485
    }
1486
}
1487

    
1488
int page_get_flags(unsigned long address)
1489
{
1490
    PageDesc *p;
1491

    
1492
    p = page_find(address >> TARGET_PAGE_BITS);
1493
    if (!p)
1494
        return 0;
1495
    return p->flags;
1496
}
1497

    
1498
/* modify the flags of a page and invalidate the code if
1499
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1500
   depending on PAGE_WRITE */
1501
void page_set_flags(unsigned long start, unsigned long end, int flags)
1502
{
1503
    PageDesc *p;
1504
    unsigned long addr;
1505

    
1506
    start = start & TARGET_PAGE_MASK;
1507
    end = TARGET_PAGE_ALIGN(end);
1508
    if (flags & PAGE_WRITE)
1509
        flags |= PAGE_WRITE_ORG;
1510
    spin_lock(&tb_lock);
1511
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1512
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1513
        /* if the write protection is set, then we invalidate the code
1514
           inside */
1515
        if (!(p->flags & PAGE_WRITE) && 
1516
            (flags & PAGE_WRITE) &&
1517
            p->first_tb) {
1518
            tb_invalidate_phys_page(addr);
1519
        }
1520
        p->flags = flags;
1521
    }
1522
    spin_unlock(&tb_lock);
1523
}
1524

    
1525
/* called from signal handler: invalidate the code and unprotect the
1526
   page. Return TRUE if the fault was succesfully handled. */
1527
int page_unprotect(unsigned long address)
1528
{
1529
    unsigned int page_index, prot, pindex;
1530
    PageDesc *p, *p1;
1531
    unsigned long host_start, host_end, addr;
1532

    
1533
    host_start = address & host_page_mask;
1534
    page_index = host_start >> TARGET_PAGE_BITS;
1535
    p1 = page_find(page_index);
1536
    if (!p1)
1537
        return 0;
1538
    host_end = host_start + host_page_size;
1539
    p = p1;
1540
    prot = 0;
1541
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1542
        prot |= p->flags;
1543
        p++;
1544
    }
1545
    /* if the page was really writable, then we change its
1546
       protection back to writable */
1547
    if (prot & PAGE_WRITE_ORG) {
1548
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1549
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1550
            mprotect((void *)host_start, host_page_size, 
1551
                     (prot & PAGE_BITS) | PAGE_WRITE);
1552
            p1[pindex].flags |= PAGE_WRITE;
1553
            /* and since the content will be modified, we must invalidate
1554
               the corresponding translated code. */
1555
            tb_invalidate_phys_page(address);
1556
#ifdef DEBUG_TB_CHECK
1557
            tb_invalidate_check(address);
1558
#endif
1559
            return 1;
1560
        }
1561
    }
1562
    return 0;
1563
}
1564

    
1565
/* call this function when system calls directly modify a memory area */
1566
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1567
{
1568
    unsigned long start, end, addr;
1569

    
1570
    start = (unsigned long)data;
1571
    end = start + data_size;
1572
    start &= TARGET_PAGE_MASK;
1573
    end = TARGET_PAGE_ALIGN(end);
1574
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1575
        page_unprotect(addr);
1576
    }
1577
}
1578

    
1579
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1580
{
1581
}
1582

    
1583
#endif /* defined(CONFIG_USER_ONLY) */
1584

    
1585
/* register physical memory. 'size' must be a multiple of the target
1586
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1587
   io memory page */
1588
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1589
                                  long phys_offset)
1590
{
1591
    unsigned long addr, end_addr;
1592
    PageDesc *p;
1593

    
1594
    end_addr = start_addr + size;
1595
    for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1596
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1597
        p->phys_offset = phys_offset;
1598
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1599
            phys_offset += TARGET_PAGE_SIZE;
1600
    }
1601
}
1602

    
1603
static uint32_t unassigned_mem_readb(uint32_t addr)
1604
{
1605
    return 0;
1606
}
1607

    
1608
static void unassigned_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1609
{
1610
}
1611

    
1612
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1613
    unassigned_mem_readb,
1614
    unassigned_mem_readb,
1615
    unassigned_mem_readb,
1616
};
1617

    
1618
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1619
    unassigned_mem_writeb,
1620
    unassigned_mem_writeb,
1621
    unassigned_mem_writeb,
1622
};
1623

    
1624
/* self modifying code support in soft mmu mode : writing to a page
1625
   containing code comes to these functions */
1626

    
1627
static void code_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1628
{
1629
    unsigned long phys_addr;
1630

    
1631
    phys_addr = addr - (long)phys_ram_base;
1632
#if !defined(CONFIG_USER_ONLY)
1633
    tb_invalidate_phys_page_fast(phys_addr, 1, vaddr);
1634
#endif
1635
    stb_raw((uint8_t *)addr, val);
1636
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1637
}
1638

    
1639
static void code_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
1640
{
1641
    unsigned long phys_addr;
1642

    
1643
    phys_addr = addr - (long)phys_ram_base;
1644
#if !defined(CONFIG_USER_ONLY)
1645
    tb_invalidate_phys_page_fast(phys_addr, 2, vaddr);
1646
#endif
1647
    stw_raw((uint8_t *)addr, val);
1648
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1649
}
1650

    
1651
static void code_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
1652
{
1653
    unsigned long phys_addr;
1654

    
1655
    phys_addr = addr - (long)phys_ram_base;
1656
#if !defined(CONFIG_USER_ONLY)
1657
    tb_invalidate_phys_page_fast(phys_addr, 4, vaddr);
1658
#endif
1659
    stl_raw((uint8_t *)addr, val);
1660
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1661
}
1662

    
1663
static CPUReadMemoryFunc *code_mem_read[3] = {
1664
    NULL, /* never used */
1665
    NULL, /* never used */
1666
    NULL, /* never used */
1667
};
1668

    
1669
static CPUWriteMemoryFunc *code_mem_write[3] = {
1670
    code_mem_writeb,
1671
    code_mem_writew,
1672
    code_mem_writel,
1673
};
1674

    
1675
static void notdirty_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1676
{
1677
    stb_raw((uint8_t *)addr, val);
1678
    tlb_set_dirty(addr, vaddr);
1679
}
1680

    
1681
static void notdirty_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
1682
{
1683
    stw_raw((uint8_t *)addr, val);
1684
    tlb_set_dirty(addr, vaddr);
1685
}
1686

    
1687
static void notdirty_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
1688
{
1689
    stl_raw((uint8_t *)addr, val);
1690
    tlb_set_dirty(addr, vaddr);
1691
}
1692

    
1693
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1694
    notdirty_mem_writeb,
1695
    notdirty_mem_writew,
1696
    notdirty_mem_writel,
1697
};
1698

    
1699
static void io_mem_init(void)
1700
{
1701
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1702
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1703
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1704
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1705
    io_mem_nb = 5;
1706

    
1707
    /* alloc dirty bits array */
1708
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1709
}
1710

    
1711
/* mem_read and mem_write are arrays of functions containing the
1712
   function to access byte (index 0), word (index 1) and dword (index
1713
   2). All functions must be supplied. If io_index is non zero, the
1714
   corresponding io zone is modified. If it is zero, a new io zone is
1715
   allocated. The return value can be used with
1716
   cpu_register_physical_memory(). (-1) is returned if error. */
1717
int cpu_register_io_memory(int io_index,
1718
                           CPUReadMemoryFunc **mem_read,
1719
                           CPUWriteMemoryFunc **mem_write)
1720
{
1721
    int i;
1722

    
1723
    if (io_index <= 0) {
1724
        if (io_index >= IO_MEM_NB_ENTRIES)
1725
            return -1;
1726
        io_index = io_mem_nb++;
1727
    } else {
1728
        if (io_index >= IO_MEM_NB_ENTRIES)
1729
            return -1;
1730
    }
1731
    
1732
    for(i = 0;i < 3; i++) {
1733
        io_mem_read[io_index][i] = mem_read[i];
1734
        io_mem_write[io_index][i] = mem_write[i];
1735
    }
1736
    return io_index << IO_MEM_SHIFT;
1737
}
1738

    
1739
/* physical memory access (slow version, mainly for debug) */
1740
#if defined(CONFIG_USER_ONLY)
1741
void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr, 
1742
                            int len, int is_write)
1743
{
1744
    int l, flags;
1745
    target_ulong page;
1746

    
1747
    while (len > 0) {
1748
        page = addr & TARGET_PAGE_MASK;
1749
        l = (page + TARGET_PAGE_SIZE) - addr;
1750
        if (l > len)
1751
            l = len;
1752
        flags = page_get_flags(page);
1753
        if (!(flags & PAGE_VALID))
1754
            return;
1755
        if (is_write) {
1756
            if (!(flags & PAGE_WRITE))
1757
                return;
1758
            memcpy((uint8_t *)addr, buf, len);
1759
        } else {
1760
            if (!(flags & PAGE_READ))
1761
                return;
1762
            memcpy(buf, (uint8_t *)addr, len);
1763
        }
1764
        len -= l;
1765
        buf += l;
1766
        addr += l;
1767
    }
1768
}
1769
#else
1770
void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr, 
1771
                            int len, int is_write)
1772
{
1773
    int l, io_index;
1774
    uint8_t *ptr;
1775
    uint32_t val;
1776
    target_ulong page, pd;
1777
    PageDesc *p;
1778
    
1779
    while (len > 0) {
1780
        page = addr & TARGET_PAGE_MASK;
1781
        l = (page + TARGET_PAGE_SIZE) - addr;
1782
        if (l > len)
1783
            l = len;
1784
        p = page_find(page >> TARGET_PAGE_BITS);
1785
        if (!p) {
1786
            pd = IO_MEM_UNASSIGNED;
1787
        } else {
1788
            pd = p->phys_offset;
1789
        }
1790
        
1791
        if (is_write) {
1792
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
1793
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1794
                if (l >= 4 && ((addr & 3) == 0)) {
1795
                    /* 32 bit read access */
1796
                    val = ldl_raw(buf);
1797
                    io_mem_write[io_index][2](addr, val, 0);
1798
                    l = 4;
1799
                } else if (l >= 2 && ((addr & 1) == 0)) {
1800
                    /* 16 bit read access */
1801
                    val = lduw_raw(buf);
1802
                    io_mem_write[io_index][1](addr, val, 0);
1803
                    l = 2;
1804
                } else {
1805
                    /* 8 bit access */
1806
                    val = ldub_raw(buf);
1807
                    io_mem_write[io_index][0](addr, val, 0);
1808
                    l = 1;
1809
                }
1810
            } else {
1811
                /* RAM case */
1812
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
1813
                    (addr & ~TARGET_PAGE_MASK);
1814
                memcpy(ptr, buf, l);
1815
            }
1816
        } else {
1817
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1818
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1819
                /* I/O case */
1820
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1821
                if (l >= 4 && ((addr & 3) == 0)) {
1822
                    /* 32 bit read access */
1823
                    val = io_mem_read[io_index][2](addr);
1824
                    stl_raw(buf, val);
1825
                    l = 4;
1826
                } else if (l >= 2 && ((addr & 1) == 0)) {
1827
                    /* 16 bit read access */
1828
                    val = io_mem_read[io_index][1](addr);
1829
                    stw_raw(buf, val);
1830
                    l = 2;
1831
                } else {
1832
                    /* 8 bit access */
1833
                    val = io_mem_read[io_index][0](addr);
1834
                    stb_raw(buf, val);
1835
                    l = 1;
1836
                }
1837
            } else {
1838
                /* RAM case */
1839
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
1840
                    (addr & ~TARGET_PAGE_MASK);
1841
                memcpy(buf, ptr, l);
1842
            }
1843
        }
1844
        len -= l;
1845
        buf += l;
1846
        addr += l;
1847
    }
1848
}
1849
#endif
1850

    
1851
/* virtual memory access for debug */
1852
int cpu_memory_rw_debug(CPUState *env, 
1853
                        uint8_t *buf, target_ulong addr, int len, int is_write)
1854
{
1855
    int l;
1856
    target_ulong page, phys_addr;
1857

    
1858
    while (len > 0) {
1859
        page = addr & TARGET_PAGE_MASK;
1860
        phys_addr = cpu_get_phys_page_debug(env, page);
1861
        /* if no physical page mapped, return an error */
1862
        if (phys_addr == -1)
1863
            return -1;
1864
        l = (page + TARGET_PAGE_SIZE) - addr;
1865
        if (l > len)
1866
            l = len;
1867
        cpu_physical_memory_rw(env, buf, 
1868
                               phys_addr + (addr & ~TARGET_PAGE_MASK), l, 
1869
                               is_write);
1870
        len -= l;
1871
        buf += l;
1872
        addr += l;
1873
    }
1874
    return 0;
1875
}
1876

    
1877
#if !defined(CONFIG_USER_ONLY) 
1878

    
1879
#define MMUSUFFIX _cmmu
1880
#define GETPC() NULL
1881
#define env cpu_single_env
1882

    
1883
#define SHIFT 0
1884
#include "softmmu_template.h"
1885

    
1886
#define SHIFT 1
1887
#include "softmmu_template.h"
1888

    
1889
#define SHIFT 2
1890
#include "softmmu_template.h"
1891

    
1892
#define SHIFT 3
1893
#include "softmmu_template.h"
1894

    
1895
#undef env
1896

    
1897
#endif