Statistics
| Branch: | Revision:

root / exec.c @ 3e11db9a

History | View | Annotate | Download (62.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <stdarg.h>
24
#include <string.h>
25
#include <errno.h>
26
#include <unistd.h>
27
#include <inttypes.h>
28
#if !defined(CONFIG_SOFTMMU)
29
#include <sys/mman.h>
30
#endif
31

    
32
#include "cpu.h"
33
#include "exec-all.h"
34

    
35
//#define DEBUG_TB_INVALIDATE
36
//#define DEBUG_FLUSH
37
//#define DEBUG_TLB
38

    
39
/* make various TB consistency checks */
40
//#define DEBUG_TB_CHECK 
41
//#define DEBUG_TLB_CHECK 
42

    
43
/* threshold to flush the translated code buffer */
44
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45

    
46
#define SMC_BITMAP_USE_THRESHOLD 10
47

    
48
#define MMAP_AREA_START        0x00000000
49
#define MMAP_AREA_END          0xa8000000
50

    
51
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
54
int nb_tbs;
55
/* any access to the tbs or the page table must use this lock */
56
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
57

    
58
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59
uint8_t *code_gen_ptr;
60

    
61
int phys_ram_size;
62
int phys_ram_fd;
63
uint8_t *phys_ram_base;
64
uint8_t *phys_ram_dirty;
65

    
66
typedef struct PageDesc {
67
    /* list of TBs intersecting this ram page */
68
    TranslationBlock *first_tb;
69
    /* in order to optimize self modifying code, we count the number
70
       of lookups we do to a given page to use a bitmap */
71
    unsigned int code_write_count;
72
    uint8_t *code_bitmap;
73
#if defined(CONFIG_USER_ONLY)
74
    unsigned long flags;
75
#endif
76
} PageDesc;
77

    
78
typedef struct PhysPageDesc {
79
    /* offset in host memory of the page + io_index in the low 12 bits */
80
    unsigned long phys_offset;
81
} PhysPageDesc;
82

    
83
typedef struct VirtPageDesc {
84
    /* physical address of code page. It is valid only if 'valid_tag'
85
       matches 'virt_valid_tag' */ 
86
    target_ulong phys_addr; 
87
    unsigned int valid_tag;
88
#if !defined(CONFIG_SOFTMMU)
89
    /* original page access rights. It is valid only if 'valid_tag'
90
       matches 'virt_valid_tag' */
91
    unsigned int prot;
92
#endif
93
} VirtPageDesc;
94

    
95
#define L2_BITS 10
96
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97

    
98
#define L1_SIZE (1 << L1_BITS)
99
#define L2_SIZE (1 << L2_BITS)
100

    
101
static void io_mem_init(void);
102

    
103
unsigned long qemu_real_host_page_size;
104
unsigned long qemu_host_page_bits;
105
unsigned long qemu_host_page_size;
106
unsigned long qemu_host_page_mask;
107

    
108
/* XXX: for system emulation, it could just be an array */
109
static PageDesc *l1_map[L1_SIZE];
110
static PhysPageDesc *l1_phys_map[L1_SIZE];
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
static VirtPageDesc *l1_virt_map[L1_SIZE];
114
static unsigned int virt_valid_tag;
115
#endif
116

    
117
/* io memory support */
118
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
121
static int io_mem_nb;
122

    
123
/* log support */
124
char *logfilename = "/tmp/qemu.log";
125
FILE *logfile;
126
int loglevel;
127

    
128
static void page_init(void)
129
{
130
    /* NOTE: we can always suppose that qemu_host_page_size >=
131
       TARGET_PAGE_SIZE */
132
#ifdef _WIN32
133
    qemu_real_host_page_size = 4096;
134
#else
135
    qemu_real_host_page_size = getpagesize();
136
#endif
137
    if (qemu_host_page_size == 0)
138
        qemu_host_page_size = qemu_real_host_page_size;
139
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
140
        qemu_host_page_size = TARGET_PAGE_SIZE;
141
    qemu_host_page_bits = 0;
142
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
143
        qemu_host_page_bits++;
144
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
145
#if !defined(CONFIG_USER_ONLY)
146
    virt_valid_tag = 1;
147
#endif
148
}
149

    
150
static inline PageDesc *page_find_alloc(unsigned int index)
151
{
152
    PageDesc **lp, *p;
153

    
154
    lp = &l1_map[index >> L2_BITS];
155
    p = *lp;
156
    if (!p) {
157
        /* allocate if not found */
158
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
159
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
160
        *lp = p;
161
    }
162
    return p + (index & (L2_SIZE - 1));
163
}
164

    
165
static inline PageDesc *page_find(unsigned int index)
166
{
167
    PageDesc *p;
168

    
169
    p = l1_map[index >> L2_BITS];
170
    if (!p)
171
        return 0;
172
    return p + (index & (L2_SIZE - 1));
173
}
174

    
175
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
176
{
177
    PhysPageDesc **lp, *p;
178

    
179
    lp = &l1_phys_map[index >> L2_BITS];
180
    p = *lp;
181
    if (!p) {
182
        /* allocate if not found */
183
        p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
184
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
185
        *lp = p;
186
    }
187
    return p + (index & (L2_SIZE - 1));
188
}
189

    
190
static inline PhysPageDesc *phys_page_find(unsigned int index)
191
{
192
    PhysPageDesc *p;
193

    
194
    p = l1_phys_map[index >> L2_BITS];
195
    if (!p)
196
        return 0;
197
    return p + (index & (L2_SIZE - 1));
198
}
199

    
200
#if !defined(CONFIG_USER_ONLY)
201
static void tlb_protect_code(CPUState *env, target_ulong addr);
202
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
203

    
204
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
205
{
206
    VirtPageDesc **lp, *p;
207

    
208
    lp = &l1_virt_map[index >> L2_BITS];
209
    p = *lp;
210
    if (!p) {
211
        /* allocate if not found */
212
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
213
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
214
        *lp = p;
215
    }
216
    return p + (index & (L2_SIZE - 1));
217
}
218

    
219
static inline VirtPageDesc *virt_page_find(unsigned int index)
220
{
221
    VirtPageDesc *p;
222

    
223
    p = l1_virt_map[index >> L2_BITS];
224
    if (!p)
225
        return 0;
226
    return p + (index & (L2_SIZE - 1));
227
}
228

    
229
static void virt_page_flush(void)
230
{
231
    int i, j;
232
    VirtPageDesc *p;
233
    
234
    virt_valid_tag++;
235

    
236
    if (virt_valid_tag == 0) {
237
        virt_valid_tag = 1;
238
        for(i = 0; i < L1_SIZE; i++) {
239
            p = l1_virt_map[i];
240
            if (p) {
241
                for(j = 0; j < L2_SIZE; j++)
242
                    p[j].valid_tag = 0;
243
            }
244
        }
245
    }
246
}
247
#else
248
static void virt_page_flush(void)
249
{
250
}
251
#endif
252

    
253
void cpu_exec_init(void)
254
{
255
    if (!code_gen_ptr) {
256
        code_gen_ptr = code_gen_buffer;
257
        page_init();
258
        io_mem_init();
259
    }
260
}
261

    
262
static inline void invalidate_page_bitmap(PageDesc *p)
263
{
264
    if (p->code_bitmap) {
265
        qemu_free(p->code_bitmap);
266
        p->code_bitmap = NULL;
267
    }
268
    p->code_write_count = 0;
269
}
270

    
271
/* set to NULL all the 'first_tb' fields in all PageDescs */
272
static void page_flush_tb(void)
273
{
274
    int i, j;
275
    PageDesc *p;
276

    
277
    for(i = 0; i < L1_SIZE; i++) {
278
        p = l1_map[i];
279
        if (p) {
280
            for(j = 0; j < L2_SIZE; j++) {
281
                p->first_tb = NULL;
282
                invalidate_page_bitmap(p);
283
                p++;
284
            }
285
        }
286
    }
287
}
288

    
289
/* flush all the translation blocks */
290
/* XXX: tb_flush is currently not thread safe */
291
void tb_flush(CPUState *env)
292
{
293
    int i;
294
#if defined(DEBUG_FLUSH)
295
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
296
           code_gen_ptr - code_gen_buffer, 
297
           nb_tbs, 
298
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
299
#endif
300
    nb_tbs = 0;
301
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
302
        tb_hash[i] = NULL;
303
    virt_page_flush();
304

    
305
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
306
        tb_phys_hash[i] = NULL;
307
    page_flush_tb();
308

    
309
    code_gen_ptr = code_gen_buffer;
310
    /* XXX: flush processor icache at this point if cache flush is
311
       expensive */
312
}
313

    
314
#ifdef DEBUG_TB_CHECK
315

    
316
static void tb_invalidate_check(unsigned long address)
317
{
318
    TranslationBlock *tb;
319
    int i;
320
    address &= TARGET_PAGE_MASK;
321
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
322
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
323
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
324
                  address >= tb->pc + tb->size)) {
325
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326
                       address, tb->pc, tb->size);
327
            }
328
        }
329
    }
330
}
331

    
332
/* verify that all the pages have correct rights for code */
333
static void tb_page_check(void)
334
{
335
    TranslationBlock *tb;
336
    int i, flags1, flags2;
337
    
338
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
339
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
340
            flags1 = page_get_flags(tb->pc);
341
            flags2 = page_get_flags(tb->pc + tb->size - 1);
342
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
343
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344
                       tb->pc, tb->size, flags1, flags2);
345
            }
346
        }
347
    }
348
}
349

    
350
void tb_jmp_check(TranslationBlock *tb)
351
{
352
    TranslationBlock *tb1;
353
    unsigned int n1;
354

    
355
    /* suppress any remaining jumps to this TB */
356
    tb1 = tb->jmp_first;
357
    for(;;) {
358
        n1 = (long)tb1 & 3;
359
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
360
        if (n1 == 2)
361
            break;
362
        tb1 = tb1->jmp_next[n1];
363
    }
364
    /* check end of list */
365
    if (tb1 != tb) {
366
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
367
    }
368
}
369

    
370
#endif
371

    
372
/* invalidate one TB */
373
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
374
                             int next_offset)
375
{
376
    TranslationBlock *tb1;
377
    for(;;) {
378
        tb1 = *ptb;
379
        if (tb1 == tb) {
380
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
381
            break;
382
        }
383
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
384
    }
385
}
386

    
387
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
388
{
389
    TranslationBlock *tb1;
390
    unsigned int n1;
391

    
392
    for(;;) {
393
        tb1 = *ptb;
394
        n1 = (long)tb1 & 3;
395
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
396
        if (tb1 == tb) {
397
            *ptb = tb1->page_next[n1];
398
            break;
399
        }
400
        ptb = &tb1->page_next[n1];
401
    }
402
}
403

    
404
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
405
{
406
    TranslationBlock *tb1, **ptb;
407
    unsigned int n1;
408

    
409
    ptb = &tb->jmp_next[n];
410
    tb1 = *ptb;
411
    if (tb1) {
412
        /* find tb(n) in circular list */
413
        for(;;) {
414
            tb1 = *ptb;
415
            n1 = (long)tb1 & 3;
416
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
417
            if (n1 == n && tb1 == tb)
418
                break;
419
            if (n1 == 2) {
420
                ptb = &tb1->jmp_first;
421
            } else {
422
                ptb = &tb1->jmp_next[n1];
423
            }
424
        }
425
        /* now we can suppress tb(n) from the list */
426
        *ptb = tb->jmp_next[n];
427

    
428
        tb->jmp_next[n] = NULL;
429
    }
430
}
431

    
432
/* reset the jump entry 'n' of a TB so that it is not chained to
433
   another TB */
434
static inline void tb_reset_jump(TranslationBlock *tb, int n)
435
{
436
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
437
}
438

    
439
static inline void tb_invalidate(TranslationBlock *tb)
440
{
441
    unsigned int h, n1;
442
    TranslationBlock *tb1, *tb2, **ptb;
443
    
444
    tb_invalidated_flag = 1;
445

    
446
    /* remove the TB from the hash list */
447
    h = tb_hash_func(tb->pc);
448
    ptb = &tb_hash[h];
449
    for(;;) {
450
        tb1 = *ptb;
451
        /* NOTE: the TB is not necessarily linked in the hash. It
452
           indicates that it is not currently used */
453
        if (tb1 == NULL)
454
            return;
455
        if (tb1 == tb) {
456
            *ptb = tb1->hash_next;
457
            break;
458
        }
459
        ptb = &tb1->hash_next;
460
    }
461

    
462
    /* suppress this TB from the two jump lists */
463
    tb_jmp_remove(tb, 0);
464
    tb_jmp_remove(tb, 1);
465

    
466
    /* suppress any remaining jumps to this TB */
467
    tb1 = tb->jmp_first;
468
    for(;;) {
469
        n1 = (long)tb1 & 3;
470
        if (n1 == 2)
471
            break;
472
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
473
        tb2 = tb1->jmp_next[n1];
474
        tb_reset_jump(tb1, n1);
475
        tb1->jmp_next[n1] = NULL;
476
        tb1 = tb2;
477
    }
478
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
479
}
480

    
481
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
482
{
483
    PageDesc *p;
484
    unsigned int h;
485
    target_ulong phys_pc;
486
    
487
    /* remove the TB from the hash list */
488
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
489
    h = tb_phys_hash_func(phys_pc);
490
    tb_remove(&tb_phys_hash[h], tb, 
491
              offsetof(TranslationBlock, phys_hash_next));
492

    
493
    /* remove the TB from the page list */
494
    if (tb->page_addr[0] != page_addr) {
495
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
496
        tb_page_remove(&p->first_tb, tb);
497
        invalidate_page_bitmap(p);
498
    }
499
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
500
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
501
        tb_page_remove(&p->first_tb, tb);
502
        invalidate_page_bitmap(p);
503
    }
504

    
505
    tb_invalidate(tb);
506
}
507

    
508
static inline void set_bits(uint8_t *tab, int start, int len)
509
{
510
    int end, mask, end1;
511

    
512
    end = start + len;
513
    tab += start >> 3;
514
    mask = 0xff << (start & 7);
515
    if ((start & ~7) == (end & ~7)) {
516
        if (start < end) {
517
            mask &= ~(0xff << (end & 7));
518
            *tab |= mask;
519
        }
520
    } else {
521
        *tab++ |= mask;
522
        start = (start + 8) & ~7;
523
        end1 = end & ~7;
524
        while (start < end1) {
525
            *tab++ = 0xff;
526
            start += 8;
527
        }
528
        if (start < end) {
529
            mask = ~(0xff << (end & 7));
530
            *tab |= mask;
531
        }
532
    }
533
}
534

    
535
static void build_page_bitmap(PageDesc *p)
536
{
537
    int n, tb_start, tb_end;
538
    TranslationBlock *tb;
539
    
540
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
541
    if (!p->code_bitmap)
542
        return;
543
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544

    
545
    tb = p->first_tb;
546
    while (tb != NULL) {
547
        n = (long)tb & 3;
548
        tb = (TranslationBlock *)((long)tb & ~3);
549
        /* NOTE: this is subtle as a TB may span two physical pages */
550
        if (n == 0) {
551
            /* NOTE: tb_end may be after the end of the page, but
552
               it is not a problem */
553
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
554
            tb_end = tb_start + tb->size;
555
            if (tb_end > TARGET_PAGE_SIZE)
556
                tb_end = TARGET_PAGE_SIZE;
557
        } else {
558
            tb_start = 0;
559
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
560
        }
561
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
562
        tb = tb->page_next[n];
563
    }
564
}
565

    
566
#ifdef TARGET_HAS_PRECISE_SMC
567

    
568
static void tb_gen_code(CPUState *env, 
569
                        target_ulong pc, target_ulong cs_base, int flags,
570
                        int cflags)
571
{
572
    TranslationBlock *tb;
573
    uint8_t *tc_ptr;
574
    target_ulong phys_pc, phys_page2, virt_page2;
575
    int code_gen_size;
576

    
577
    phys_pc = get_phys_addr_code(env, (unsigned long)pc);
578
    tb = tb_alloc((unsigned long)pc);
579
    if (!tb) {
580
        /* flush must be done */
581
        tb_flush(env);
582
        /* cannot fail at this point */
583
        tb = tb_alloc((unsigned long)pc);
584
    }
585
    tc_ptr = code_gen_ptr;
586
    tb->tc_ptr = tc_ptr;
587
    tb->cs_base = cs_base;
588
    tb->flags = flags;
589
    tb->cflags = cflags;
590
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
591
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
592
    
593
    /* check next page if needed */
594
    virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
595
    phys_page2 = -1;
596
    if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
597
        phys_page2 = get_phys_addr_code(env, virt_page2);
598
    }
599
    tb_link_phys(tb, phys_pc, phys_page2);
600
}
601
#endif
602
    
603
/* invalidate all TBs which intersect with the target physical page
604
   starting in range [start;end[. NOTE: start and end must refer to
605
   the same physical page. 'is_cpu_write_access' should be true if called
606
   from a real cpu write access: the virtual CPU will exit the current
607
   TB if code is modified inside this TB. */
608
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
609
                                   int is_cpu_write_access)
610
{
611
    int n, current_tb_modified, current_tb_not_found, current_flags;
612
    CPUState *env = cpu_single_env;
613
    PageDesc *p;
614
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
615
    target_ulong tb_start, tb_end;
616
    target_ulong current_pc, current_cs_base;
617

    
618
    p = page_find(start >> TARGET_PAGE_BITS);
619
    if (!p) 
620
        return;
621
    if (!p->code_bitmap && 
622
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
623
        is_cpu_write_access) {
624
        /* build code bitmap */
625
        build_page_bitmap(p);
626
    }
627

    
628
    /* we remove all the TBs in the range [start, end[ */
629
    /* XXX: see if in some cases it could be faster to invalidate all the code */
630
    current_tb_not_found = is_cpu_write_access;
631
    current_tb_modified = 0;
632
    current_tb = NULL; /* avoid warning */
633
    current_pc = 0; /* avoid warning */
634
    current_cs_base = 0; /* avoid warning */
635
    current_flags = 0; /* avoid warning */
636
    tb = p->first_tb;
637
    while (tb != NULL) {
638
        n = (long)tb & 3;
639
        tb = (TranslationBlock *)((long)tb & ~3);
640
        tb_next = tb->page_next[n];
641
        /* NOTE: this is subtle as a TB may span two physical pages */
642
        if (n == 0) {
643
            /* NOTE: tb_end may be after the end of the page, but
644
               it is not a problem */
645
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646
            tb_end = tb_start + tb->size;
647
        } else {
648
            tb_start = tb->page_addr[1];
649
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
650
        }
651
        if (!(tb_end <= start || tb_start >= end)) {
652
#ifdef TARGET_HAS_PRECISE_SMC
653
            if (current_tb_not_found) {
654
                current_tb_not_found = 0;
655
                current_tb = NULL;
656
                if (env->mem_write_pc) {
657
                    /* now we have a real cpu fault */
658
                    current_tb = tb_find_pc(env->mem_write_pc);
659
                }
660
            }
661
            if (current_tb == tb &&
662
                !(current_tb->cflags & CF_SINGLE_INSN)) {
663
                /* If we are modifying the current TB, we must stop
664
                its execution. We could be more precise by checking
665
                that the modification is after the current PC, but it
666
                would require a specialized function to partially
667
                restore the CPU state */
668
                
669
                current_tb_modified = 1;
670
                cpu_restore_state(current_tb, env, 
671
                                  env->mem_write_pc, NULL);
672
#if defined(TARGET_I386)
673
                current_flags = env->hflags;
674
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
675
                current_cs_base = (target_ulong)env->segs[R_CS].base;
676
                current_pc = current_cs_base + env->eip;
677
#else
678
#error unsupported CPU
679
#endif
680
            }
681
#endif /* TARGET_HAS_PRECISE_SMC */
682
            saved_tb = env->current_tb;
683
            env->current_tb = NULL;
684
            tb_phys_invalidate(tb, -1);
685
            env->current_tb = saved_tb;
686
            if (env->interrupt_request && env->current_tb)
687
                cpu_interrupt(env, env->interrupt_request);
688
        }
689
        tb = tb_next;
690
    }
691
#if !defined(CONFIG_USER_ONLY)
692
    /* if no code remaining, no need to continue to use slow writes */
693
    if (!p->first_tb) {
694
        invalidate_page_bitmap(p);
695
        if (is_cpu_write_access) {
696
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
697
        }
698
    }
699
#endif
700
#ifdef TARGET_HAS_PRECISE_SMC
701
    if (current_tb_modified) {
702
        /* we generate a block containing just the instruction
703
           modifying the memory. It will ensure that it cannot modify
704
           itself */
705
        env->current_tb = NULL;
706
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
707
                    CF_SINGLE_INSN);
708
        cpu_resume_from_signal(env, NULL);
709
    }
710
#endif
711
}
712

    
713
/* len must be <= 8 and start must be a multiple of len */
714
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
715
{
716
    PageDesc *p;
717
    int offset, b;
718
#if 0
719
    if (1) {
720
        if (loglevel) {
721
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
722
                   cpu_single_env->mem_write_vaddr, len, 
723
                   cpu_single_env->eip, 
724
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
725
        }
726
    }
727
#endif
728
    p = page_find(start >> TARGET_PAGE_BITS);
729
    if (!p) 
730
        return;
731
    if (p->code_bitmap) {
732
        offset = start & ~TARGET_PAGE_MASK;
733
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
734
        if (b & ((1 << len) - 1))
735
            goto do_invalidate;
736
    } else {
737
    do_invalidate:
738
        tb_invalidate_phys_page_range(start, start + len, 1);
739
    }
740
}
741

    
742
#if !defined(CONFIG_SOFTMMU)
743
static void tb_invalidate_phys_page(target_ulong addr, 
744
                                    unsigned long pc, void *puc)
745
{
746
    int n, current_flags, current_tb_modified;
747
    target_ulong current_pc, current_cs_base;
748
    PageDesc *p;
749
    TranslationBlock *tb, *current_tb;
750
#ifdef TARGET_HAS_PRECISE_SMC
751
    CPUState *env = cpu_single_env;
752
#endif
753

    
754
    addr &= TARGET_PAGE_MASK;
755
    p = page_find(addr >> TARGET_PAGE_BITS);
756
    if (!p) 
757
        return;
758
    tb = p->first_tb;
759
    current_tb_modified = 0;
760
    current_tb = NULL;
761
    current_pc = 0; /* avoid warning */
762
    current_cs_base = 0; /* avoid warning */
763
    current_flags = 0; /* avoid warning */
764
#ifdef TARGET_HAS_PRECISE_SMC
765
    if (tb && pc != 0) {
766
        current_tb = tb_find_pc(pc);
767
    }
768
#endif
769
    while (tb != NULL) {
770
        n = (long)tb & 3;
771
        tb = (TranslationBlock *)((long)tb & ~3);
772
#ifdef TARGET_HAS_PRECISE_SMC
773
        if (current_tb == tb &&
774
            !(current_tb->cflags & CF_SINGLE_INSN)) {
775
                /* If we are modifying the current TB, we must stop
776
                   its execution. We could be more precise by checking
777
                   that the modification is after the current PC, but it
778
                   would require a specialized function to partially
779
                   restore the CPU state */
780
            
781
            current_tb_modified = 1;
782
            cpu_restore_state(current_tb, env, pc, puc);
783
#if defined(TARGET_I386)
784
            current_flags = env->hflags;
785
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786
            current_cs_base = (target_ulong)env->segs[R_CS].base;
787
            current_pc = current_cs_base + env->eip;
788
#else
789
#error unsupported CPU
790
#endif
791
        }
792
#endif /* TARGET_HAS_PRECISE_SMC */
793
        tb_phys_invalidate(tb, addr);
794
        tb = tb->page_next[n];
795
    }
796
    p->first_tb = NULL;
797
#ifdef TARGET_HAS_PRECISE_SMC
798
    if (current_tb_modified) {
799
        /* we generate a block containing just the instruction
800
           modifying the memory. It will ensure that it cannot modify
801
           itself */
802
        env->current_tb = NULL;
803
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
804
                    CF_SINGLE_INSN);
805
        cpu_resume_from_signal(env, puc);
806
    }
807
#endif
808
}
809
#endif
810

    
811
/* add the tb in the target page and protect it if necessary */
812
static inline void tb_alloc_page(TranslationBlock *tb, 
813
                                 unsigned int n, unsigned int page_addr)
814
{
815
    PageDesc *p;
816
    TranslationBlock *last_first_tb;
817

    
818
    tb->page_addr[n] = page_addr;
819
    p = page_find(page_addr >> TARGET_PAGE_BITS);
820
    tb->page_next[n] = p->first_tb;
821
    last_first_tb = p->first_tb;
822
    p->first_tb = (TranslationBlock *)((long)tb | n);
823
    invalidate_page_bitmap(p);
824

    
825
#if defined(TARGET_HAS_SMC) || 1
826

    
827
#if defined(CONFIG_USER_ONLY)
828
    if (p->flags & PAGE_WRITE) {
829
        unsigned long host_start, host_end, addr;
830
        int prot;
831

    
832
        /* force the host page as non writable (writes will have a
833
           page fault + mprotect overhead) */
834
        host_start = page_addr & qemu_host_page_mask;
835
        host_end = host_start + qemu_host_page_size;
836
        prot = 0;
837
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838
            prot |= page_get_flags(addr);
839
        mprotect((void *)host_start, qemu_host_page_size, 
840
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
841
#ifdef DEBUG_TB_INVALIDATE
842
        printf("protecting code page: 0x%08lx\n", 
843
               host_start);
844
#endif
845
        p->flags &= ~PAGE_WRITE;
846
    }
847
#else
848
    /* if some code is already present, then the pages are already
849
       protected. So we handle the case where only the first TB is
850
       allocated in a physical page */
851
    if (!last_first_tb) {
852
        target_ulong virt_addr;
853

    
854
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
855
        tlb_protect_code(cpu_single_env, virt_addr);        
856
    }
857
#endif
858

    
859
#endif /* TARGET_HAS_SMC */
860
}
861

    
862
/* Allocate a new translation block. Flush the translation buffer if
863
   too many translation blocks or too much generated code. */
864
TranslationBlock *tb_alloc(unsigned long pc)
865
{
866
    TranslationBlock *tb;
867

    
868
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
869
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
870
        return NULL;
871
    tb = &tbs[nb_tbs++];
872
    tb->pc = pc;
873
    tb->cflags = 0;
874
    return tb;
875
}
876

    
877
/* add a new TB and link it to the physical page tables. phys_page2 is
878
   (-1) to indicate that only one page contains the TB. */
879
void tb_link_phys(TranslationBlock *tb, 
880
                  target_ulong phys_pc, target_ulong phys_page2)
881
{
882
    unsigned int h;
883
    TranslationBlock **ptb;
884

    
885
    /* add in the physical hash table */
886
    h = tb_phys_hash_func(phys_pc);
887
    ptb = &tb_phys_hash[h];
888
    tb->phys_hash_next = *ptb;
889
    *ptb = tb;
890

    
891
    /* add in the page list */
892
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
893
    if (phys_page2 != -1)
894
        tb_alloc_page(tb, 1, phys_page2);
895
    else
896
        tb->page_addr[1] = -1;
897
#ifdef DEBUG_TB_CHECK
898
    tb_page_check();
899
#endif
900
}
901

    
902
/* link the tb with the other TBs */
903
void tb_link(TranslationBlock *tb)
904
{
905
#if !defined(CONFIG_USER_ONLY)
906
    {
907
        VirtPageDesc *vp;
908
        target_ulong addr;
909
        
910
        /* save the code memory mappings (needed to invalidate the code) */
911
        addr = tb->pc & TARGET_PAGE_MASK;
912
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
913
#ifdef DEBUG_TLB_CHECK 
914
        if (vp->valid_tag == virt_valid_tag &&
915
            vp->phys_addr != tb->page_addr[0]) {
916
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
917
                   addr, tb->page_addr[0], vp->phys_addr);
918
        }
919
#endif
920
        vp->phys_addr = tb->page_addr[0];
921
        if (vp->valid_tag != virt_valid_tag) {
922
            vp->valid_tag = virt_valid_tag;
923
#if !defined(CONFIG_SOFTMMU)
924
            vp->prot = 0;
925
#endif
926
        }
927
        
928
        if (tb->page_addr[1] != -1) {
929
            addr += TARGET_PAGE_SIZE;
930
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
931
#ifdef DEBUG_TLB_CHECK 
932
            if (vp->valid_tag == virt_valid_tag &&
933
                vp->phys_addr != tb->page_addr[1]) { 
934
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
935
                       addr, tb->page_addr[1], vp->phys_addr);
936
            }
937
#endif
938
            vp->phys_addr = tb->page_addr[1];
939
            if (vp->valid_tag != virt_valid_tag) {
940
                vp->valid_tag = virt_valid_tag;
941
#if !defined(CONFIG_SOFTMMU)
942
                vp->prot = 0;
943
#endif
944
            }
945
        }
946
    }
947
#endif
948

    
949
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
950
    tb->jmp_next[0] = NULL;
951
    tb->jmp_next[1] = NULL;
952
#ifdef USE_CODE_COPY
953
    tb->cflags &= ~CF_FP_USED;
954
    if (tb->cflags & CF_TB_FP_USED)
955
        tb->cflags |= CF_FP_USED;
956
#endif
957

    
958
    /* init original jump addresses */
959
    if (tb->tb_next_offset[0] != 0xffff)
960
        tb_reset_jump(tb, 0);
961
    if (tb->tb_next_offset[1] != 0xffff)
962
        tb_reset_jump(tb, 1);
963
}
964

    
965
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
966
   tb[1].tc_ptr. Return NULL if not found */
967
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
968
{
969
    int m_min, m_max, m;
970
    unsigned long v;
971
    TranslationBlock *tb;
972

    
973
    if (nb_tbs <= 0)
974
        return NULL;
975
    if (tc_ptr < (unsigned long)code_gen_buffer ||
976
        tc_ptr >= (unsigned long)code_gen_ptr)
977
        return NULL;
978
    /* binary search (cf Knuth) */
979
    m_min = 0;
980
    m_max = nb_tbs - 1;
981
    while (m_min <= m_max) {
982
        m = (m_min + m_max) >> 1;
983
        tb = &tbs[m];
984
        v = (unsigned long)tb->tc_ptr;
985
        if (v == tc_ptr)
986
            return tb;
987
        else if (tc_ptr < v) {
988
            m_max = m - 1;
989
        } else {
990
            m_min = m + 1;
991
        }
992
    } 
993
    return &tbs[m_max];
994
}
995

    
996
static void tb_reset_jump_recursive(TranslationBlock *tb);
997

    
998
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
999
{
1000
    TranslationBlock *tb1, *tb_next, **ptb;
1001
    unsigned int n1;
1002

    
1003
    tb1 = tb->jmp_next[n];
1004
    if (tb1 != NULL) {
1005
        /* find head of list */
1006
        for(;;) {
1007
            n1 = (long)tb1 & 3;
1008
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009
            if (n1 == 2)
1010
                break;
1011
            tb1 = tb1->jmp_next[n1];
1012
        }
1013
        /* we are now sure now that tb jumps to tb1 */
1014
        tb_next = tb1;
1015

    
1016
        /* remove tb from the jmp_first list */
1017
        ptb = &tb_next->jmp_first;
1018
        for(;;) {
1019
            tb1 = *ptb;
1020
            n1 = (long)tb1 & 3;
1021
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1022
            if (n1 == n && tb1 == tb)
1023
                break;
1024
            ptb = &tb1->jmp_next[n1];
1025
        }
1026
        *ptb = tb->jmp_next[n];
1027
        tb->jmp_next[n] = NULL;
1028
        
1029
        /* suppress the jump to next tb in generated code */
1030
        tb_reset_jump(tb, n);
1031

    
1032
        /* suppress jumps in the tb on which we could have jumped */
1033
        tb_reset_jump_recursive(tb_next);
1034
    }
1035
}
1036

    
1037
static void tb_reset_jump_recursive(TranslationBlock *tb)
1038
{
1039
    tb_reset_jump_recursive2(tb, 0);
1040
    tb_reset_jump_recursive2(tb, 1);
1041
}
1042

    
1043
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044
{
1045
    target_ulong phys_addr;
1046

    
1047
    phys_addr = cpu_get_phys_page_debug(env, pc);
1048
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1049
}
1050

    
1051
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1052
   breakpoint is reached */
1053
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1054
{
1055
#if defined(TARGET_I386) || defined(TARGET_PPC)
1056
    int i;
1057
    
1058
    for(i = 0; i < env->nb_breakpoints; i++) {
1059
        if (env->breakpoints[i] == pc)
1060
            return 0;
1061
    }
1062

    
1063
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1064
        return -1;
1065
    env->breakpoints[env->nb_breakpoints++] = pc;
1066
    
1067
    breakpoint_invalidate(env, pc);
1068
    return 0;
1069
#else
1070
    return -1;
1071
#endif
1072
}
1073

    
1074
/* remove a breakpoint */
1075
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1076
{
1077
#if defined(TARGET_I386) || defined(TARGET_PPC)
1078
    int i;
1079
    for(i = 0; i < env->nb_breakpoints; i++) {
1080
        if (env->breakpoints[i] == pc)
1081
            goto found;
1082
    }
1083
    return -1;
1084
 found:
1085
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1086
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1087
    env->nb_breakpoints--;
1088

    
1089
    breakpoint_invalidate(env, pc);
1090
    return 0;
1091
#else
1092
    return -1;
1093
#endif
1094
}
1095

    
1096
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1097
   CPU loop after each instruction */
1098
void cpu_single_step(CPUState *env, int enabled)
1099
{
1100
#if defined(TARGET_I386) || defined(TARGET_PPC)
1101
    if (env->singlestep_enabled != enabled) {
1102
        env->singlestep_enabled = enabled;
1103
        /* must flush all the translated code to avoid inconsistancies */
1104
        /* XXX: only flush what is necessary */
1105
        tb_flush(env);
1106
    }
1107
#endif
1108
}
1109

    
1110
/* enable or disable low levels log */
1111
void cpu_set_log(int log_flags)
1112
{
1113
    loglevel = log_flags;
1114
    if (loglevel && !logfile) {
1115
        logfile = fopen(logfilename, "w");
1116
        if (!logfile) {
1117
            perror(logfilename);
1118
            _exit(1);
1119
        }
1120
#if !defined(CONFIG_SOFTMMU)
1121
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1122
        {
1123
            static uint8_t logfile_buf[4096];
1124
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1125
        }
1126
#else
1127
        setvbuf(logfile, NULL, _IOLBF, 0);
1128
#endif
1129
    }
1130
}
1131

    
1132
void cpu_set_log_filename(const char *filename)
1133
{
1134
    logfilename = strdup(filename);
1135
}
1136

    
1137
/* mask must never be zero, except for A20 change call */
1138
void cpu_interrupt(CPUState *env, int mask)
1139
{
1140
    TranslationBlock *tb;
1141
    static int interrupt_lock;
1142

    
1143
    env->interrupt_request |= mask;
1144
    /* if the cpu is currently executing code, we must unlink it and
1145
       all the potentially executing TB */
1146
    tb = env->current_tb;
1147
    if (tb && !testandset(&interrupt_lock)) {
1148
        env->current_tb = NULL;
1149
        tb_reset_jump_recursive(tb);
1150
        interrupt_lock = 0;
1151
    }
1152
}
1153

    
1154
void cpu_reset_interrupt(CPUState *env, int mask)
1155
{
1156
    env->interrupt_request &= ~mask;
1157
}
1158

    
1159
CPULogItem cpu_log_items[] = {
1160
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1161
      "show generated host assembly code for each compiled TB" },
1162
    { CPU_LOG_TB_IN_ASM, "in_asm",
1163
      "show target assembly code for each compiled TB" },
1164
    { CPU_LOG_TB_OP, "op", 
1165
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1166
#ifdef TARGET_I386
1167
    { CPU_LOG_TB_OP_OPT, "op_opt",
1168
      "show micro ops after optimization for each compiled TB" },
1169
#endif
1170
    { CPU_LOG_INT, "int",
1171
      "show interrupts/exceptions in short format" },
1172
    { CPU_LOG_EXEC, "exec",
1173
      "show trace before each executed TB (lots of logs)" },
1174
    { CPU_LOG_TB_CPU, "cpu",
1175
      "show CPU state before bloc translation" },
1176
#ifdef TARGET_I386
1177
    { CPU_LOG_PCALL, "pcall",
1178
      "show protected mode far calls/returns/exceptions" },
1179
#endif
1180
    { CPU_LOG_IOPORT, "ioport",
1181
      "show all i/o ports accesses" },
1182
    { 0, NULL, NULL },
1183
};
1184

    
1185
static int cmp1(const char *s1, int n, const char *s2)
1186
{
1187
    if (strlen(s2) != n)
1188
        return 0;
1189
    return memcmp(s1, s2, n) == 0;
1190
}
1191
      
1192
/* takes a comma separated list of log masks. Return 0 if error. */
1193
int cpu_str_to_log_mask(const char *str)
1194
{
1195
    CPULogItem *item;
1196
    int mask;
1197
    const char *p, *p1;
1198

    
1199
    p = str;
1200
    mask = 0;
1201
    for(;;) {
1202
        p1 = strchr(p, ',');
1203
        if (!p1)
1204
            p1 = p + strlen(p);
1205
        for(item = cpu_log_items; item->mask != 0; item++) {
1206
            if (cmp1(p, p1 - p, item->name))
1207
                goto found;
1208
        }
1209
        return 0;
1210
    found:
1211
        mask |= item->mask;
1212
        if (*p1 != ',')
1213
            break;
1214
        p = p1 + 1;
1215
    }
1216
    return mask;
1217
}
1218

    
1219
void cpu_abort(CPUState *env, const char *fmt, ...)
1220
{
1221
    va_list ap;
1222

    
1223
    va_start(ap, fmt);
1224
    fprintf(stderr, "qemu: fatal: ");
1225
    vfprintf(stderr, fmt, ap);
1226
    fprintf(stderr, "\n");
1227
#ifdef TARGET_I386
1228
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1229
#endif
1230
    va_end(ap);
1231
    abort();
1232
}
1233

    
1234
#if !defined(CONFIG_USER_ONLY)
1235

    
1236
/* NOTE: if flush_global is true, also flush global entries (not
1237
   implemented yet) */
1238
void tlb_flush(CPUState *env, int flush_global)
1239
{
1240
    int i;
1241

    
1242
#if defined(DEBUG_TLB)
1243
    printf("tlb_flush:\n");
1244
#endif
1245
    /* must reset current TB so that interrupts cannot modify the
1246
       links while we are modifying them */
1247
    env->current_tb = NULL;
1248

    
1249
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1250
        env->tlb_read[0][i].address = -1;
1251
        env->tlb_write[0][i].address = -1;
1252
        env->tlb_read[1][i].address = -1;
1253
        env->tlb_write[1][i].address = -1;
1254
    }
1255

    
1256
    virt_page_flush();
1257
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1258
        tb_hash[i] = NULL;
1259

    
1260
#if !defined(CONFIG_SOFTMMU)
1261
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1262
#endif
1263
}
1264

    
1265
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1266
{
1267
    if (addr == (tlb_entry->address & 
1268
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1269
        tlb_entry->address = -1;
1270
}
1271

    
1272
void tlb_flush_page(CPUState *env, target_ulong addr)
1273
{
1274
    int i, n;
1275
    VirtPageDesc *vp;
1276
    PageDesc *p;
1277
    TranslationBlock *tb;
1278

    
1279
#if defined(DEBUG_TLB)
1280
    printf("tlb_flush_page: 0x%08x\n", addr);
1281
#endif
1282
    /* must reset current TB so that interrupts cannot modify the
1283
       links while we are modifying them */
1284
    env->current_tb = NULL;
1285

    
1286
    addr &= TARGET_PAGE_MASK;
1287
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1288
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1289
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1290
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1291
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1292

    
1293
    /* remove from the virtual pc hash table all the TB at this
1294
       virtual address */
1295
    
1296
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1297
    if (vp && vp->valid_tag == virt_valid_tag) {
1298
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1299
        if (p) {
1300
            /* we remove all the links to the TBs in this virtual page */
1301
            tb = p->first_tb;
1302
            while (tb != NULL) {
1303
                n = (long)tb & 3;
1304
                tb = (TranslationBlock *)((long)tb & ~3);
1305
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1306
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1307
                    tb_invalidate(tb);
1308
                }
1309
                tb = tb->page_next[n];
1310
            }
1311
        }
1312
        vp->valid_tag = 0;
1313
    }
1314

    
1315
#if !defined(CONFIG_SOFTMMU)
1316
    if (addr < MMAP_AREA_END)
1317
        munmap((void *)addr, TARGET_PAGE_SIZE);
1318
#endif
1319
}
1320

    
1321
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1322
{
1323
    if (addr == (tlb_entry->address & 
1324
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1325
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1326
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1327
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1328
    }
1329
}
1330

    
1331
/* update the TLBs so that writes to code in the virtual page 'addr'
1332
   can be detected */
1333
static void tlb_protect_code(CPUState *env, target_ulong addr)
1334
{
1335
    int i;
1336

    
1337
    addr &= TARGET_PAGE_MASK;
1338
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1339
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1340
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1341
#if !defined(CONFIG_SOFTMMU)
1342
    /* NOTE: as we generated the code for this page, it is already at
1343
       least readable */
1344
    if (addr < MMAP_AREA_END)
1345
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1346
#endif
1347
}
1348

    
1349
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1350
                                       unsigned long phys_addr)
1351
{
1352
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1353
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1354
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1355
    }
1356
}
1357

    
1358
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1359
   tested self modifying code */
1360
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1361
{
1362
    int i;
1363

    
1364
    phys_addr &= TARGET_PAGE_MASK;
1365
    phys_addr += (long)phys_ram_base;
1366
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1367
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1368
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1369
}
1370

    
1371
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1372
                                         unsigned long start, unsigned long length)
1373
{
1374
    unsigned long addr;
1375
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1376
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1377
        if ((addr - start) < length) {
1378
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1379
        }
1380
    }
1381
}
1382

    
1383
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1384
{
1385
    CPUState *env;
1386
    unsigned long length, start1;
1387
    int i;
1388

    
1389
    start &= TARGET_PAGE_MASK;
1390
    end = TARGET_PAGE_ALIGN(end);
1391

    
1392
    length = end - start;
1393
    if (length == 0)
1394
        return;
1395
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1396

    
1397
    env = cpu_single_env;
1398
    /* we modify the TLB cache so that the dirty bit will be set again
1399
       when accessing the range */
1400
    start1 = start + (unsigned long)phys_ram_base;
1401
    for(i = 0; i < CPU_TLB_SIZE; i++)
1402
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1403
    for(i = 0; i < CPU_TLB_SIZE; i++)
1404
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1405

    
1406
#if !defined(CONFIG_SOFTMMU)
1407
    /* XXX: this is expensive */
1408
    {
1409
        VirtPageDesc *p;
1410
        int j;
1411
        target_ulong addr;
1412

    
1413
        for(i = 0; i < L1_SIZE; i++) {
1414
            p = l1_virt_map[i];
1415
            if (p) {
1416
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1417
                for(j = 0; j < L2_SIZE; j++) {
1418
                    if (p->valid_tag == virt_valid_tag &&
1419
                        p->phys_addr >= start && p->phys_addr < end &&
1420
                        (p->prot & PROT_WRITE)) {
1421
                        if (addr < MMAP_AREA_END) {
1422
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1423
                                     p->prot & ~PROT_WRITE);
1424
                        }
1425
                    }
1426
                    addr += TARGET_PAGE_SIZE;
1427
                    p++;
1428
                }
1429
            }
1430
        }
1431
    }
1432
#endif
1433
}
1434

    
1435
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1436
                                    unsigned long start)
1437
{
1438
    unsigned long addr;
1439
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1440
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1441
        if (addr == start) {
1442
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1443
        }
1444
    }
1445
}
1446

    
1447
/* update the TLB corresponding to virtual page vaddr and phys addr
1448
   addr so that it is no longer dirty */
1449
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1450
{
1451
    CPUState *env = cpu_single_env;
1452
    int i;
1453

    
1454
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1455

    
1456
    addr &= TARGET_PAGE_MASK;
1457
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1458
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1459
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1460
}
1461

    
1462
/* add a new TLB entry. At most one entry for a given virtual address
1463
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1464
   (can only happen in non SOFTMMU mode for I/O pages or pages
1465
   conflicting with the host address space). */
1466
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1467
                 target_phys_addr_t paddr, int prot, 
1468
                 int is_user, int is_softmmu)
1469
{
1470
    PhysPageDesc *p;
1471
    unsigned long pd;
1472
    TranslationBlock *first_tb;
1473
    unsigned int index;
1474
    target_ulong address;
1475
    unsigned long addend;
1476
    int ret;
1477

    
1478
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1479
    first_tb = NULL;
1480
    if (!p) {
1481
        pd = IO_MEM_UNASSIGNED;
1482
    } else {
1483
        PageDesc *p1;
1484
        pd = p->phys_offset;
1485
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1486
            /* NOTE: we also allocate the page at this stage */
1487
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1488
            first_tb = p1->first_tb;
1489
        }
1490
    }
1491
#if defined(DEBUG_TLB)
1492
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1493
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1494
#endif
1495

    
1496
    ret = 0;
1497
#if !defined(CONFIG_SOFTMMU)
1498
    if (is_softmmu) 
1499
#endif
1500
    {
1501
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1502
            /* IO memory case */
1503
            address = vaddr | pd;
1504
            addend = paddr;
1505
        } else {
1506
            /* standard memory */
1507
            address = vaddr;
1508
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1509
        }
1510
        
1511
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1512
        addend -= vaddr;
1513
        if (prot & PAGE_READ) {
1514
            env->tlb_read[is_user][index].address = address;
1515
            env->tlb_read[is_user][index].addend = addend;
1516
        } else {
1517
            env->tlb_read[is_user][index].address = -1;
1518
            env->tlb_read[is_user][index].addend = -1;
1519
        }
1520
        if (prot & PAGE_WRITE) {
1521
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1522
                /* ROM: access is ignored (same as unassigned) */
1523
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1524
                env->tlb_write[is_user][index].addend = addend;
1525
            } else 
1526
                /* XXX: the PowerPC code seems not ready to handle
1527
                   self modifying code with DCBI */
1528
#if defined(TARGET_HAS_SMC) || 1
1529
            if (first_tb) {
1530
                /* if code is present, we use a specific memory
1531
                   handler. It works only for physical memory access */
1532
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1533
                env->tlb_write[is_user][index].addend = addend;
1534
            } else 
1535
#endif
1536
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1537
                       !cpu_physical_memory_is_dirty(pd)) {
1538
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1539
                env->tlb_write[is_user][index].addend = addend;
1540
            } else {
1541
                env->tlb_write[is_user][index].address = address;
1542
                env->tlb_write[is_user][index].addend = addend;
1543
            }
1544
        } else {
1545
            env->tlb_write[is_user][index].address = -1;
1546
            env->tlb_write[is_user][index].addend = -1;
1547
        }
1548
    }
1549
#if !defined(CONFIG_SOFTMMU)
1550
    else {
1551
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1552
            /* IO access: no mapping is done as it will be handled by the
1553
               soft MMU */
1554
            if (!(env->hflags & HF_SOFTMMU_MASK))
1555
                ret = 2;
1556
        } else {
1557
            void *map_addr;
1558

    
1559
            if (vaddr >= MMAP_AREA_END) {
1560
                ret = 2;
1561
            } else {
1562
                if (prot & PROT_WRITE) {
1563
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1564
#if defined(TARGET_HAS_SMC) || 1
1565
                        first_tb ||
1566
#endif
1567
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1568
                         !cpu_physical_memory_is_dirty(pd))) {
1569
                        /* ROM: we do as if code was inside */
1570
                        /* if code is present, we only map as read only and save the
1571
                           original mapping */
1572
                        VirtPageDesc *vp;
1573
                        
1574
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1575
                        vp->phys_addr = pd;
1576
                        vp->prot = prot;
1577
                        vp->valid_tag = virt_valid_tag;
1578
                        prot &= ~PAGE_WRITE;
1579
                    }
1580
                }
1581
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1582
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1583
                if (map_addr == MAP_FAILED) {
1584
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1585
                              paddr, vaddr);
1586
                }
1587
            }
1588
        }
1589
    }
1590
#endif
1591
    return ret;
1592
}
1593

    
1594
/* called from signal handler: invalidate the code and unprotect the
1595
   page. Return TRUE if the fault was succesfully handled. */
1596
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1597
{
1598
#if !defined(CONFIG_SOFTMMU)
1599
    VirtPageDesc *vp;
1600

    
1601
#if defined(DEBUG_TLB)
1602
    printf("page_unprotect: addr=0x%08x\n", addr);
1603
#endif
1604
    addr &= TARGET_PAGE_MASK;
1605

    
1606
    /* if it is not mapped, no need to worry here */
1607
    if (addr >= MMAP_AREA_END)
1608
        return 0;
1609
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1610
    if (!vp)
1611
        return 0;
1612
    /* NOTE: in this case, validate_tag is _not_ tested as it
1613
       validates only the code TLB */
1614
    if (vp->valid_tag != virt_valid_tag)
1615
        return 0;
1616
    if (!(vp->prot & PAGE_WRITE))
1617
        return 0;
1618
#if defined(DEBUG_TLB)
1619
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1620
           addr, vp->phys_addr, vp->prot);
1621
#endif
1622
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1623
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1624
                  (unsigned long)addr, vp->prot);
1625
    /* set the dirty bit */
1626
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1627
    /* flush the code inside */
1628
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1629
    return 1;
1630
#else
1631
    return 0;
1632
#endif
1633
}
1634

    
1635
#else
1636

    
1637
void tlb_flush(CPUState *env, int flush_global)
1638
{
1639
}
1640

    
1641
void tlb_flush_page(CPUState *env, target_ulong addr)
1642
{
1643
}
1644

    
1645
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1646
                 target_phys_addr_t paddr, int prot, 
1647
                 int is_user, int is_softmmu)
1648
{
1649
    return 0;
1650
}
1651

    
1652
/* dump memory mappings */
1653
void page_dump(FILE *f)
1654
{
1655
    unsigned long start, end;
1656
    int i, j, prot, prot1;
1657
    PageDesc *p;
1658

    
1659
    fprintf(f, "%-8s %-8s %-8s %s\n",
1660
            "start", "end", "size", "prot");
1661
    start = -1;
1662
    end = -1;
1663
    prot = 0;
1664
    for(i = 0; i <= L1_SIZE; i++) {
1665
        if (i < L1_SIZE)
1666
            p = l1_map[i];
1667
        else
1668
            p = NULL;
1669
        for(j = 0;j < L2_SIZE; j++) {
1670
            if (!p)
1671
                prot1 = 0;
1672
            else
1673
                prot1 = p[j].flags;
1674
            if (prot1 != prot) {
1675
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1676
                if (start != -1) {
1677
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1678
                            start, end, end - start, 
1679
                            prot & PAGE_READ ? 'r' : '-',
1680
                            prot & PAGE_WRITE ? 'w' : '-',
1681
                            prot & PAGE_EXEC ? 'x' : '-');
1682
                }
1683
                if (prot1 != 0)
1684
                    start = end;
1685
                else
1686
                    start = -1;
1687
                prot = prot1;
1688
            }
1689
            if (!p)
1690
                break;
1691
        }
1692
    }
1693
}
1694

    
1695
int page_get_flags(unsigned long address)
1696
{
1697
    PageDesc *p;
1698

    
1699
    p = page_find(address >> TARGET_PAGE_BITS);
1700
    if (!p)
1701
        return 0;
1702
    return p->flags;
1703
}
1704

    
1705
/* modify the flags of a page and invalidate the code if
1706
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1707
   depending on PAGE_WRITE */
1708
void page_set_flags(unsigned long start, unsigned long end, int flags)
1709
{
1710
    PageDesc *p;
1711
    unsigned long addr;
1712

    
1713
    start = start & TARGET_PAGE_MASK;
1714
    end = TARGET_PAGE_ALIGN(end);
1715
    if (flags & PAGE_WRITE)
1716
        flags |= PAGE_WRITE_ORG;
1717
    spin_lock(&tb_lock);
1718
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1719
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1720
        /* if the write protection is set, then we invalidate the code
1721
           inside */
1722
        if (!(p->flags & PAGE_WRITE) && 
1723
            (flags & PAGE_WRITE) &&
1724
            p->first_tb) {
1725
            tb_invalidate_phys_page(addr, 0, NULL);
1726
        }
1727
        p->flags = flags;
1728
    }
1729
    spin_unlock(&tb_lock);
1730
}
1731

    
1732
/* called from signal handler: invalidate the code and unprotect the
1733
   page. Return TRUE if the fault was succesfully handled. */
1734
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1735
{
1736
    unsigned int page_index, prot, pindex;
1737
    PageDesc *p, *p1;
1738
    unsigned long host_start, host_end, addr;
1739

    
1740
    host_start = address & qemu_host_page_mask;
1741
    page_index = host_start >> TARGET_PAGE_BITS;
1742
    p1 = page_find(page_index);
1743
    if (!p1)
1744
        return 0;
1745
    host_end = host_start + qemu_host_page_size;
1746
    p = p1;
1747
    prot = 0;
1748
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1749
        prot |= p->flags;
1750
        p++;
1751
    }
1752
    /* if the page was really writable, then we change its
1753
       protection back to writable */
1754
    if (prot & PAGE_WRITE_ORG) {
1755
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1756
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1757
            mprotect((void *)host_start, qemu_host_page_size, 
1758
                     (prot & PAGE_BITS) | PAGE_WRITE);
1759
            p1[pindex].flags |= PAGE_WRITE;
1760
            /* and since the content will be modified, we must invalidate
1761
               the corresponding translated code. */
1762
            tb_invalidate_phys_page(address, pc, puc);
1763
#ifdef DEBUG_TB_CHECK
1764
            tb_invalidate_check(address);
1765
#endif
1766
            return 1;
1767
        }
1768
    }
1769
    return 0;
1770
}
1771

    
1772
/* call this function when system calls directly modify a memory area */
1773
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1774
{
1775
    unsigned long start, end, addr;
1776

    
1777
    start = (unsigned long)data;
1778
    end = start + data_size;
1779
    start &= TARGET_PAGE_MASK;
1780
    end = TARGET_PAGE_ALIGN(end);
1781
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1782
        page_unprotect(addr, 0, NULL);
1783
    }
1784
}
1785

    
1786
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1787
{
1788
}
1789
#endif /* defined(CONFIG_USER_ONLY) */
1790

    
1791
/* register physical memory. 'size' must be a multiple of the target
1792
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1793
   io memory page */
1794
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1795
                                  unsigned long size,
1796
                                  unsigned long phys_offset)
1797
{
1798
    unsigned long addr, end_addr;
1799
    PhysPageDesc *p;
1800

    
1801
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1802
    end_addr = start_addr + size;
1803
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1804
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1805
        p->phys_offset = phys_offset;
1806
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1807
            phys_offset += TARGET_PAGE_SIZE;
1808
    }
1809
}
1810

    
1811
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1812
{
1813
    return 0;
1814
}
1815

    
1816
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1817
{
1818
}
1819

    
1820
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1821
    unassigned_mem_readb,
1822
    unassigned_mem_readb,
1823
    unassigned_mem_readb,
1824
};
1825

    
1826
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1827
    unassigned_mem_writeb,
1828
    unassigned_mem_writeb,
1829
    unassigned_mem_writeb,
1830
};
1831

    
1832
/* self modifying code support in soft mmu mode : writing to a page
1833
   containing code comes to these functions */
1834

    
1835
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1836
{
1837
    unsigned long phys_addr;
1838

    
1839
    phys_addr = addr - (unsigned long)phys_ram_base;
1840
#if !defined(CONFIG_USER_ONLY)
1841
    tb_invalidate_phys_page_fast(phys_addr, 1);
1842
#endif
1843
    stb_raw((uint8_t *)addr, val);
1844
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1845
}
1846

    
1847
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1848
{
1849
    unsigned long phys_addr;
1850

    
1851
    phys_addr = addr - (unsigned long)phys_ram_base;
1852
#if !defined(CONFIG_USER_ONLY)
1853
    tb_invalidate_phys_page_fast(phys_addr, 2);
1854
#endif
1855
    stw_raw((uint8_t *)addr, val);
1856
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1857
}
1858

    
1859
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1860
{
1861
    unsigned long phys_addr;
1862

    
1863
    phys_addr = addr - (unsigned long)phys_ram_base;
1864
#if !defined(CONFIG_USER_ONLY)
1865
    tb_invalidate_phys_page_fast(phys_addr, 4);
1866
#endif
1867
    stl_raw((uint8_t *)addr, val);
1868
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1869
}
1870

    
1871
static CPUReadMemoryFunc *code_mem_read[3] = {
1872
    NULL, /* never used */
1873
    NULL, /* never used */
1874
    NULL, /* never used */
1875
};
1876

    
1877
static CPUWriteMemoryFunc *code_mem_write[3] = {
1878
    code_mem_writeb,
1879
    code_mem_writew,
1880
    code_mem_writel,
1881
};
1882

    
1883
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1884
{
1885
    stb_raw((uint8_t *)addr, val);
1886
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1887
}
1888

    
1889
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1890
{
1891
    stw_raw((uint8_t *)addr, val);
1892
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1893
}
1894

    
1895
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1896
{
1897
    stl_raw((uint8_t *)addr, val);
1898
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1899
}
1900

    
1901
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1902
    notdirty_mem_writeb,
1903
    notdirty_mem_writew,
1904
    notdirty_mem_writel,
1905
};
1906

    
1907
static void io_mem_init(void)
1908
{
1909
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1910
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1911
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1912
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1913
    io_mem_nb = 5;
1914

    
1915
    /* alloc dirty bits array */
1916
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1917
}
1918

    
1919
/* mem_read and mem_write are arrays of functions containing the
1920
   function to access byte (index 0), word (index 1) and dword (index
1921
   2). All functions must be supplied. If io_index is non zero, the
1922
   corresponding io zone is modified. If it is zero, a new io zone is
1923
   allocated. The return value can be used with
1924
   cpu_register_physical_memory(). (-1) is returned if error. */
1925
int cpu_register_io_memory(int io_index,
1926
                           CPUReadMemoryFunc **mem_read,
1927
                           CPUWriteMemoryFunc **mem_write,
1928
                           void *opaque)
1929
{
1930
    int i;
1931

    
1932
    if (io_index <= 0) {
1933
        if (io_index >= IO_MEM_NB_ENTRIES)
1934
            return -1;
1935
        io_index = io_mem_nb++;
1936
    } else {
1937
        if (io_index >= IO_MEM_NB_ENTRIES)
1938
            return -1;
1939
    }
1940
    
1941
    for(i = 0;i < 3; i++) {
1942
        io_mem_read[io_index][i] = mem_read[i];
1943
        io_mem_write[io_index][i] = mem_write[i];
1944
    }
1945
    io_mem_opaque[io_index] = opaque;
1946
    return io_index << IO_MEM_SHIFT;
1947
}
1948

    
1949
/* physical memory access (slow version, mainly for debug) */
1950
#if defined(CONFIG_USER_ONLY)
1951
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1952
                            int len, int is_write)
1953
{
1954
    int l, flags;
1955
    target_ulong page;
1956

    
1957
    while (len > 0) {
1958
        page = addr & TARGET_PAGE_MASK;
1959
        l = (page + TARGET_PAGE_SIZE) - addr;
1960
        if (l > len)
1961
            l = len;
1962
        flags = page_get_flags(page);
1963
        if (!(flags & PAGE_VALID))
1964
            return;
1965
        if (is_write) {
1966
            if (!(flags & PAGE_WRITE))
1967
                return;
1968
            memcpy((uint8_t *)addr, buf, len);
1969
        } else {
1970
            if (!(flags & PAGE_READ))
1971
                return;
1972
            memcpy(buf, (uint8_t *)addr, len);
1973
        }
1974
        len -= l;
1975
        buf += l;
1976
        addr += l;
1977
    }
1978
}
1979
#else
1980
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1981
                            int len, int is_write)
1982
{
1983
    int l, io_index;
1984
    uint8_t *ptr;
1985
    uint32_t val;
1986
    target_phys_addr_t page;
1987
    unsigned long pd;
1988
    PhysPageDesc *p;
1989
    
1990
    while (len > 0) {
1991
        page = addr & TARGET_PAGE_MASK;
1992
        l = (page + TARGET_PAGE_SIZE) - addr;
1993
        if (l > len)
1994
            l = len;
1995
        p = phys_page_find(page >> TARGET_PAGE_BITS);
1996
        if (!p) {
1997
            pd = IO_MEM_UNASSIGNED;
1998
        } else {
1999
            pd = p->phys_offset;
2000
        }
2001
        
2002
        if (is_write) {
2003
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2004
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2005
                if (l >= 4 && ((addr & 3) == 0)) {
2006
                    /* 32 bit read access */
2007
                    val = ldl_raw(buf);
2008
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2009
                    l = 4;
2010
                } else if (l >= 2 && ((addr & 1) == 0)) {
2011
                    /* 16 bit read access */
2012
                    val = lduw_raw(buf);
2013
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2014
                    l = 2;
2015
                } else {
2016
                    /* 8 bit access */
2017
                    val = ldub_raw(buf);
2018
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2019
                    l = 1;
2020
                }
2021
            } else {
2022
                unsigned long addr1;
2023
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2024
                /* RAM case */
2025
                ptr = phys_ram_base + addr1;
2026
                memcpy(ptr, buf, l);
2027
                /* invalidate code */
2028
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2029
                /* set dirty bit */
2030
                phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;                
2031
            }
2032
        } else {
2033
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2034
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2035
                /* I/O case */
2036
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2037
                if (l >= 4 && ((addr & 3) == 0)) {
2038
                    /* 32 bit read access */
2039
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2040
                    stl_raw(buf, val);
2041
                    l = 4;
2042
                } else if (l >= 2 && ((addr & 1) == 0)) {
2043
                    /* 16 bit read access */
2044
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2045
                    stw_raw(buf, val);
2046
                    l = 2;
2047
                } else {
2048
                    /* 8 bit access */
2049
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2050
                    stb_raw(buf, val);
2051
                    l = 1;
2052
                }
2053
            } else {
2054
                /* RAM case */
2055
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2056
                    (addr & ~TARGET_PAGE_MASK);
2057
                memcpy(buf, ptr, l);
2058
            }
2059
        }
2060
        len -= l;
2061
        buf += l;
2062
        addr += l;
2063
    }
2064
}
2065
#endif
2066

    
2067
/* virtual memory access for debug */
2068
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2069
                        uint8_t *buf, int len, int is_write)
2070
{
2071
    int l;
2072
    target_ulong page, phys_addr;
2073

    
2074
    while (len > 0) {
2075
        page = addr & TARGET_PAGE_MASK;
2076
        phys_addr = cpu_get_phys_page_debug(env, page);
2077
        /* if no physical page mapped, return an error */
2078
        if (phys_addr == -1)
2079
            return -1;
2080
        l = (page + TARGET_PAGE_SIZE) - addr;
2081
        if (l > len)
2082
            l = len;
2083
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2084
                               buf, l, is_write);
2085
        len -= l;
2086
        buf += l;
2087
        addr += l;
2088
    }
2089
    return 0;
2090
}
2091

    
2092
#if !defined(CONFIG_USER_ONLY) 
2093

    
2094
#define MMUSUFFIX _cmmu
2095
#define GETPC() NULL
2096
#define env cpu_single_env
2097

    
2098
#define SHIFT 0
2099
#include "softmmu_template.h"
2100

    
2101
#define SHIFT 1
2102
#include "softmmu_template.h"
2103

    
2104
#define SHIFT 2
2105
#include "softmmu_template.h"
2106

    
2107
#define SHIFT 3
2108
#include "softmmu_template.h"
2109

    
2110
#undef env
2111

    
2112
#endif