Statistics
| Branch: | Revision:

root / exec.c @ ea1c1802

History | View | Annotate | Download (62 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <stdarg.h>
24
#include <string.h>
25
#include <errno.h>
26
#include <unistd.h>
27
#include <inttypes.h>
28
#if !defined(CONFIG_SOFTMMU)
29
#include <sys/mman.h>
30
#endif
31

    
32
#include "cpu.h"
33
#include "exec-all.h"
34

    
35
//#define DEBUG_TB_INVALIDATE
36
//#define DEBUG_FLUSH
37
//#define DEBUG_TLB
38

    
39
/* make various TB consistency checks */
40
//#define DEBUG_TB_CHECK 
41
//#define DEBUG_TLB_CHECK 
42

    
43
/* threshold to flush the translated code buffer */
44
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45

    
46
#define SMC_BITMAP_USE_THRESHOLD 10
47

    
48
#define MMAP_AREA_START        0x00000000
49
#define MMAP_AREA_END          0xa8000000
50

    
51
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
54
int nb_tbs;
55
/* any access to the tbs or the page table must use this lock */
56
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
57

    
58
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59
uint8_t *code_gen_ptr;
60

    
61
int phys_ram_size;
62
int phys_ram_fd;
63
uint8_t *phys_ram_base;
64
uint8_t *phys_ram_dirty;
65

    
66
typedef struct PageDesc {
67
    /* list of TBs intersecting this ram page */
68
    TranslationBlock *first_tb;
69
    /* in order to optimize self modifying code, we count the number
70
       of lookups we do to a given page to use a bitmap */
71
    unsigned int code_write_count;
72
    uint8_t *code_bitmap;
73
#if defined(CONFIG_USER_ONLY)
74
    unsigned long flags;
75
#endif
76
} PageDesc;
77

    
78
typedef struct PhysPageDesc {
79
    /* offset in host memory of the page + io_index in the low 12 bits */
80
    unsigned long phys_offset;
81
} PhysPageDesc;
82

    
83
typedef struct VirtPageDesc {
84
    /* physical address of code page. It is valid only if 'valid_tag'
85
       matches 'virt_valid_tag' */ 
86
    target_ulong phys_addr; 
87
    unsigned int valid_tag;
88
#if !defined(CONFIG_SOFTMMU)
89
    /* original page access rights. It is valid only if 'valid_tag'
90
       matches 'virt_valid_tag' */
91
    unsigned int prot;
92
#endif
93
} VirtPageDesc;
94

    
95
#define L2_BITS 10
96
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97

    
98
#define L1_SIZE (1 << L1_BITS)
99
#define L2_SIZE (1 << L2_BITS)
100

    
101
static void io_mem_init(void);
102

    
103
unsigned long real_host_page_size;
104
unsigned long host_page_bits;
105
unsigned long host_page_size;
106
unsigned long host_page_mask;
107

    
108
/* XXX: for system emulation, it could just be an array */
109
static PageDesc *l1_map[L1_SIZE];
110
static PhysPageDesc *l1_phys_map[L1_SIZE];
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
static VirtPageDesc *l1_virt_map[L1_SIZE];
114
static unsigned int virt_valid_tag;
115
#endif
116

    
117
/* io memory support */
118
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
121
static int io_mem_nb;
122

    
123
/* log support */
124
char *logfilename = "/tmp/qemu.log";
125
FILE *logfile;
126
int loglevel;
127

    
128
static void page_init(void)
129
{
130
    /* NOTE: we can always suppose that host_page_size >=
131
       TARGET_PAGE_SIZE */
132
#ifdef _WIN32
133
    real_host_page_size = 4096;
134
#else
135
    real_host_page_size = getpagesize();
136
#endif
137
    if (host_page_size == 0)
138
        host_page_size = real_host_page_size;
139
    if (host_page_size < TARGET_PAGE_SIZE)
140
        host_page_size = TARGET_PAGE_SIZE;
141
    host_page_bits = 0;
142
    while ((1 << host_page_bits) < host_page_size)
143
        host_page_bits++;
144
    host_page_mask = ~(host_page_size - 1);
145
#if !defined(CONFIG_USER_ONLY)
146
    virt_valid_tag = 1;
147
#endif
148
}
149

    
150
static inline PageDesc *page_find_alloc(unsigned int index)
151
{
152
    PageDesc **lp, *p;
153

    
154
    lp = &l1_map[index >> L2_BITS];
155
    p = *lp;
156
    if (!p) {
157
        /* allocate if not found */
158
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
159
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
160
        *lp = p;
161
    }
162
    return p + (index & (L2_SIZE - 1));
163
}
164

    
165
static inline PageDesc *page_find(unsigned int index)
166
{
167
    PageDesc *p;
168

    
169
    p = l1_map[index >> L2_BITS];
170
    if (!p)
171
        return 0;
172
    return p + (index & (L2_SIZE - 1));
173
}
174

    
175
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
176
{
177
    PhysPageDesc **lp, *p;
178

    
179
    lp = &l1_phys_map[index >> L2_BITS];
180
    p = *lp;
181
    if (!p) {
182
        /* allocate if not found */
183
        p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
184
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
185
        *lp = p;
186
    }
187
    return p + (index & (L2_SIZE - 1));
188
}
189

    
190
static inline PhysPageDesc *phys_page_find(unsigned int index)
191
{
192
    PhysPageDesc *p;
193

    
194
    p = l1_phys_map[index >> L2_BITS];
195
    if (!p)
196
        return 0;
197
    return p + (index & (L2_SIZE - 1));
198
}
199

    
200
#if !defined(CONFIG_USER_ONLY)
201
static void tlb_protect_code(CPUState *env, target_ulong addr);
202
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
203

    
204
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
205
{
206
    VirtPageDesc **lp, *p;
207

    
208
    lp = &l1_virt_map[index >> L2_BITS];
209
    p = *lp;
210
    if (!p) {
211
        /* allocate if not found */
212
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
213
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
214
        *lp = p;
215
    }
216
    return p + (index & (L2_SIZE - 1));
217
}
218

    
219
static inline VirtPageDesc *virt_page_find(unsigned int index)
220
{
221
    VirtPageDesc *p;
222

    
223
    p = l1_virt_map[index >> L2_BITS];
224
    if (!p)
225
        return 0;
226
    return p + (index & (L2_SIZE - 1));
227
}
228

    
229
static void virt_page_flush(void)
230
{
231
    int i, j;
232
    VirtPageDesc *p;
233
    
234
    virt_valid_tag++;
235

    
236
    if (virt_valid_tag == 0) {
237
        virt_valid_tag = 1;
238
        for(i = 0; i < L1_SIZE; i++) {
239
            p = l1_virt_map[i];
240
            if (p) {
241
                for(j = 0; j < L2_SIZE; j++)
242
                    p[j].valid_tag = 0;
243
            }
244
        }
245
    }
246
}
247
#else
248
static void virt_page_flush(void)
249
{
250
}
251
#endif
252

    
253
void cpu_exec_init(void)
254
{
255
    if (!code_gen_ptr) {
256
        code_gen_ptr = code_gen_buffer;
257
        page_init();
258
        io_mem_init();
259
    }
260
}
261

    
262
static inline void invalidate_page_bitmap(PageDesc *p)
263
{
264
    if (p->code_bitmap) {
265
        qemu_free(p->code_bitmap);
266
        p->code_bitmap = NULL;
267
    }
268
    p->code_write_count = 0;
269
}
270

    
271
/* set to NULL all the 'first_tb' fields in all PageDescs */
272
static void page_flush_tb(void)
273
{
274
    int i, j;
275
    PageDesc *p;
276

    
277
    for(i = 0; i < L1_SIZE; i++) {
278
        p = l1_map[i];
279
        if (p) {
280
            for(j = 0; j < L2_SIZE; j++) {
281
                p->first_tb = NULL;
282
                invalidate_page_bitmap(p);
283
                p++;
284
            }
285
        }
286
    }
287
}
288

    
289
/* flush all the translation blocks */
290
/* XXX: tb_flush is currently not thread safe */
291
void tb_flush(CPUState *env)
292
{
293
    int i;
294
#if defined(DEBUG_FLUSH)
295
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
296
           code_gen_ptr - code_gen_buffer, 
297
           nb_tbs, 
298
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
299
#endif
300
    nb_tbs = 0;
301
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
302
        tb_hash[i] = NULL;
303
    virt_page_flush();
304

    
305
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
306
        tb_phys_hash[i] = NULL;
307
    page_flush_tb();
308

    
309
    code_gen_ptr = code_gen_buffer;
310
    /* XXX: flush processor icache at this point if cache flush is
311
       expensive */
312
}
313

    
314
#ifdef DEBUG_TB_CHECK
315

    
316
static void tb_invalidate_check(unsigned long address)
317
{
318
    TranslationBlock *tb;
319
    int i;
320
    address &= TARGET_PAGE_MASK;
321
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
322
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
323
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
324
                  address >= tb->pc + tb->size)) {
325
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326
                       address, tb->pc, tb->size);
327
            }
328
        }
329
    }
330
}
331

    
332
/* verify that all the pages have correct rights for code */
333
static void tb_page_check(void)
334
{
335
    TranslationBlock *tb;
336
    int i, flags1, flags2;
337
    
338
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
339
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
340
            flags1 = page_get_flags(tb->pc);
341
            flags2 = page_get_flags(tb->pc + tb->size - 1);
342
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
343
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344
                       tb->pc, tb->size, flags1, flags2);
345
            }
346
        }
347
    }
348
}
349

    
350
void tb_jmp_check(TranslationBlock *tb)
351
{
352
    TranslationBlock *tb1;
353
    unsigned int n1;
354

    
355
    /* suppress any remaining jumps to this TB */
356
    tb1 = tb->jmp_first;
357
    for(;;) {
358
        n1 = (long)tb1 & 3;
359
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
360
        if (n1 == 2)
361
            break;
362
        tb1 = tb1->jmp_next[n1];
363
    }
364
    /* check end of list */
365
    if (tb1 != tb) {
366
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
367
    }
368
}
369

    
370
#endif
371

    
372
/* invalidate one TB */
373
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
374
                             int next_offset)
375
{
376
    TranslationBlock *tb1;
377
    for(;;) {
378
        tb1 = *ptb;
379
        if (tb1 == tb) {
380
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
381
            break;
382
        }
383
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
384
    }
385
}
386

    
387
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
388
{
389
    TranslationBlock *tb1;
390
    unsigned int n1;
391

    
392
    for(;;) {
393
        tb1 = *ptb;
394
        n1 = (long)tb1 & 3;
395
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
396
        if (tb1 == tb) {
397
            *ptb = tb1->page_next[n1];
398
            break;
399
        }
400
        ptb = &tb1->page_next[n1];
401
    }
402
}
403

    
404
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
405
{
406
    TranslationBlock *tb1, **ptb;
407
    unsigned int n1;
408

    
409
    ptb = &tb->jmp_next[n];
410
    tb1 = *ptb;
411
    if (tb1) {
412
        /* find tb(n) in circular list */
413
        for(;;) {
414
            tb1 = *ptb;
415
            n1 = (long)tb1 & 3;
416
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
417
            if (n1 == n && tb1 == tb)
418
                break;
419
            if (n1 == 2) {
420
                ptb = &tb1->jmp_first;
421
            } else {
422
                ptb = &tb1->jmp_next[n1];
423
            }
424
        }
425
        /* now we can suppress tb(n) from the list */
426
        *ptb = tb->jmp_next[n];
427

    
428
        tb->jmp_next[n] = NULL;
429
    }
430
}
431

    
432
/* reset the jump entry 'n' of a TB so that it is not chained to
433
   another TB */
434
static inline void tb_reset_jump(TranslationBlock *tb, int n)
435
{
436
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
437
}
438

    
439
static inline void tb_invalidate(TranslationBlock *tb)
440
{
441
    unsigned int h, n1;
442
    TranslationBlock *tb1, *tb2, **ptb;
443
    
444
    tb_invalidated_flag = 1;
445

    
446
    /* remove the TB from the hash list */
447
    h = tb_hash_func(tb->pc);
448
    ptb = &tb_hash[h];
449
    for(;;) {
450
        tb1 = *ptb;
451
        /* NOTE: the TB is not necessarily linked in the hash. It
452
           indicates that it is not currently used */
453
        if (tb1 == NULL)
454
            return;
455
        if (tb1 == tb) {
456
            *ptb = tb1->hash_next;
457
            break;
458
        }
459
        ptb = &tb1->hash_next;
460
    }
461

    
462
    /* suppress this TB from the two jump lists */
463
    tb_jmp_remove(tb, 0);
464
    tb_jmp_remove(tb, 1);
465

    
466
    /* suppress any remaining jumps to this TB */
467
    tb1 = tb->jmp_first;
468
    for(;;) {
469
        n1 = (long)tb1 & 3;
470
        if (n1 == 2)
471
            break;
472
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
473
        tb2 = tb1->jmp_next[n1];
474
        tb_reset_jump(tb1, n1);
475
        tb1->jmp_next[n1] = NULL;
476
        tb1 = tb2;
477
    }
478
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
479
}
480

    
481
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
482
{
483
    PageDesc *p;
484
    unsigned int h;
485
    target_ulong phys_pc;
486
    
487
    /* remove the TB from the hash list */
488
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
489
    h = tb_phys_hash_func(phys_pc);
490
    tb_remove(&tb_phys_hash[h], tb, 
491
              offsetof(TranslationBlock, phys_hash_next));
492

    
493
    /* remove the TB from the page list */
494
    if (tb->page_addr[0] != page_addr) {
495
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
496
        tb_page_remove(&p->first_tb, tb);
497
        invalidate_page_bitmap(p);
498
    }
499
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
500
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
501
        tb_page_remove(&p->first_tb, tb);
502
        invalidate_page_bitmap(p);
503
    }
504

    
505
    tb_invalidate(tb);
506
}
507

    
508
static inline void set_bits(uint8_t *tab, int start, int len)
509
{
510
    int end, mask, end1;
511

    
512
    end = start + len;
513
    tab += start >> 3;
514
    mask = 0xff << (start & 7);
515
    if ((start & ~7) == (end & ~7)) {
516
        if (start < end) {
517
            mask &= ~(0xff << (end & 7));
518
            *tab |= mask;
519
        }
520
    } else {
521
        *tab++ |= mask;
522
        start = (start + 8) & ~7;
523
        end1 = end & ~7;
524
        while (start < end1) {
525
            *tab++ = 0xff;
526
            start += 8;
527
        }
528
        if (start < end) {
529
            mask = ~(0xff << (end & 7));
530
            *tab |= mask;
531
        }
532
    }
533
}
534

    
535
static void build_page_bitmap(PageDesc *p)
536
{
537
    int n, tb_start, tb_end;
538
    TranslationBlock *tb;
539
    
540
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
541
    if (!p->code_bitmap)
542
        return;
543
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544

    
545
    tb = p->first_tb;
546
    while (tb != NULL) {
547
        n = (long)tb & 3;
548
        tb = (TranslationBlock *)((long)tb & ~3);
549
        /* NOTE: this is subtle as a TB may span two physical pages */
550
        if (n == 0) {
551
            /* NOTE: tb_end may be after the end of the page, but
552
               it is not a problem */
553
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
554
            tb_end = tb_start + tb->size;
555
            if (tb_end > TARGET_PAGE_SIZE)
556
                tb_end = TARGET_PAGE_SIZE;
557
        } else {
558
            tb_start = 0;
559
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
560
        }
561
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
562
        tb = tb->page_next[n];
563
    }
564
}
565

    
566
#ifdef TARGET_HAS_PRECISE_SMC
567

    
568
static void tb_gen_code(CPUState *env, 
569
                        target_ulong pc, target_ulong cs_base, int flags,
570
                        int cflags)
571
{
572
    TranslationBlock *tb;
573
    uint8_t *tc_ptr;
574
    target_ulong phys_pc, phys_page2, virt_page2;
575
    int code_gen_size;
576

    
577
    phys_pc = get_phys_addr_code(env, (unsigned long)pc);
578
    tb = tb_alloc((unsigned long)pc);
579
    if (!tb) {
580
        /* flush must be done */
581
        tb_flush(env);
582
        /* cannot fail at this point */
583
        tb = tb_alloc((unsigned long)pc);
584
    }
585
    tc_ptr = code_gen_ptr;
586
    tb->tc_ptr = tc_ptr;
587
    tb->cs_base = cs_base;
588
    tb->flags = flags;
589
    tb->cflags = cflags;
590
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
591
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
592
    
593
    /* check next page if needed */
594
    virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
595
    phys_page2 = -1;
596
    if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
597
        phys_page2 = get_phys_addr_code(env, virt_page2);
598
    }
599
    tb_link_phys(tb, phys_pc, phys_page2);
600
}
601
#endif
602
    
603
/* invalidate all TBs which intersect with the target physical page
604
   starting in range [start;end[. NOTE: start and end must refer to
605
   the same physical page. 'is_cpu_write_access' should be true if called
606
   from a real cpu write access: the virtual CPU will exit the current
607
   TB if code is modified inside this TB. */
608
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
609
                                   int is_cpu_write_access)
610
{
611
    int n, current_tb_modified, current_tb_not_found, current_flags;
612
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
613
    CPUState *env = cpu_single_env;
614
#endif
615
    PageDesc *p;
616
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
617
    target_ulong tb_start, tb_end;
618
    target_ulong current_pc, current_cs_base;
619

    
620
    p = page_find(start >> TARGET_PAGE_BITS);
621
    if (!p) 
622
        return;
623
    if (!p->code_bitmap && 
624
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
625
        is_cpu_write_access) {
626
        /* build code bitmap */
627
        build_page_bitmap(p);
628
    }
629

    
630
    /* we remove all the TBs in the range [start, end[ */
631
    /* XXX: see if in some cases it could be faster to invalidate all the code */
632
    current_tb_not_found = is_cpu_write_access;
633
    current_tb_modified = 0;
634
    current_tb = NULL; /* avoid warning */
635
    current_pc = 0; /* avoid warning */
636
    current_cs_base = 0; /* avoid warning */
637
    current_flags = 0; /* avoid warning */
638
    tb = p->first_tb;
639
    while (tb != NULL) {
640
        n = (long)tb & 3;
641
        tb = (TranslationBlock *)((long)tb & ~3);
642
        tb_next = tb->page_next[n];
643
        /* NOTE: this is subtle as a TB may span two physical pages */
644
        if (n == 0) {
645
            /* NOTE: tb_end may be after the end of the page, but
646
               it is not a problem */
647
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
648
            tb_end = tb_start + tb->size;
649
        } else {
650
            tb_start = tb->page_addr[1];
651
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
652
        }
653
        if (!(tb_end <= start || tb_start >= end)) {
654
#ifdef TARGET_HAS_PRECISE_SMC
655
            if (current_tb_not_found) {
656
                current_tb_not_found = 0;
657
                current_tb = NULL;
658
                if (env->mem_write_pc) {
659
                    /* now we have a real cpu fault */
660
                    current_tb = tb_find_pc(env->mem_write_pc);
661
                }
662
            }
663
            if (current_tb == tb &&
664
                !(current_tb->cflags & CF_SINGLE_INSN)) {
665
                /* If we are modifying the current TB, we must stop
666
                its execution. We could be more precise by checking
667
                that the modification is after the current PC, but it
668
                would require a specialized function to partially
669
                restore the CPU state */
670
                
671
                current_tb_modified = 1;
672
                cpu_restore_state(current_tb, env, 
673
                                  env->mem_write_pc, NULL);
674
#if defined(TARGET_I386)
675
                current_flags = env->hflags;
676
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
677
                current_cs_base = (target_ulong)env->segs[R_CS].base;
678
                current_pc = current_cs_base + env->eip;
679
#else
680
#error unsupported CPU
681
#endif
682
            }
683
#endif /* TARGET_HAS_PRECISE_SMC */
684
            saved_tb = env->current_tb;
685
            env->current_tb = NULL;
686
            tb_phys_invalidate(tb, -1);
687
            env->current_tb = saved_tb;
688
            if (env->interrupt_request && env->current_tb)
689
                cpu_interrupt(env, env->interrupt_request);
690
        }
691
        tb = tb_next;
692
    }
693
#if !defined(CONFIG_USER_ONLY)
694
    /* if no code remaining, no need to continue to use slow writes */
695
    if (!p->first_tb) {
696
        invalidate_page_bitmap(p);
697
        if (is_cpu_write_access) {
698
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
699
        }
700
    }
701
#endif
702
#ifdef TARGET_HAS_PRECISE_SMC
703
    if (current_tb_modified) {
704
        /* we generate a block containing just the instruction
705
           modifying the memory. It will ensure that it cannot modify
706
           itself */
707
        env->current_tb = NULL;
708
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
709
                    CF_SINGLE_INSN);
710
        cpu_resume_from_signal(env, NULL);
711
    }
712
#endif
713
}
714

    
715
/* len must be <= 8 and start must be a multiple of len */
716
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
717
{
718
    PageDesc *p;
719
    int offset, b;
720
#if 0
721
    if (1) {
722
        if (loglevel) {
723
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
724
                   cpu_single_env->mem_write_vaddr, len, 
725
                   cpu_single_env->eip, 
726
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
727
        }
728
    }
729
#endif
730
    p = page_find(start >> TARGET_PAGE_BITS);
731
    if (!p) 
732
        return;
733
    if (p->code_bitmap) {
734
        offset = start & ~TARGET_PAGE_MASK;
735
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
736
        if (b & ((1 << len) - 1))
737
            goto do_invalidate;
738
    } else {
739
    do_invalidate:
740
        tb_invalidate_phys_page_range(start, start + len, 1);
741
    }
742
}
743

    
744
#if !defined(CONFIG_SOFTMMU)
745
static void tb_invalidate_phys_page(target_ulong addr, 
746
                                    unsigned long pc, void *puc)
747
{
748
    int n, current_flags, current_tb_modified;
749
    target_ulong current_pc, current_cs_base;
750
    PageDesc *p;
751
    TranslationBlock *tb, *current_tb;
752
#ifdef TARGET_HAS_PRECISE_SMC
753
    CPUState *env = cpu_single_env;
754
#endif
755

    
756
    addr &= TARGET_PAGE_MASK;
757
    p = page_find(addr >> TARGET_PAGE_BITS);
758
    if (!p) 
759
        return;
760
    tb = p->first_tb;
761
    current_tb_modified = 0;
762
    current_tb = NULL;
763
    current_pc = 0; /* avoid warning */
764
    current_cs_base = 0; /* avoid warning */
765
    current_flags = 0; /* avoid warning */
766
#ifdef TARGET_HAS_PRECISE_SMC
767
    if (tb && pc != 0) {
768
        current_tb = tb_find_pc(pc);
769
    }
770
#endif
771
    while (tb != NULL) {
772
        n = (long)tb & 3;
773
        tb = (TranslationBlock *)((long)tb & ~3);
774
#ifdef TARGET_HAS_PRECISE_SMC
775
        if (current_tb == tb &&
776
            !(current_tb->cflags & CF_SINGLE_INSN)) {
777
                /* If we are modifying the current TB, we must stop
778
                   its execution. We could be more precise by checking
779
                   that the modification is after the current PC, but it
780
                   would require a specialized function to partially
781
                   restore the CPU state */
782
            
783
            current_tb_modified = 1;
784
            cpu_restore_state(current_tb, env, pc, puc);
785
#if defined(TARGET_I386)
786
            current_flags = env->hflags;
787
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
788
            current_cs_base = (target_ulong)env->segs[R_CS].base;
789
            current_pc = current_cs_base + env->eip;
790
#else
791
#error unsupported CPU
792
#endif
793
        }
794
#endif /* TARGET_HAS_PRECISE_SMC */
795
        tb_phys_invalidate(tb, addr);
796
        tb = tb->page_next[n];
797
    }
798
    p->first_tb = NULL;
799
#ifdef TARGET_HAS_PRECISE_SMC
800
    if (current_tb_modified) {
801
        /* we generate a block containing just the instruction
802
           modifying the memory. It will ensure that it cannot modify
803
           itself */
804
        env->current_tb = NULL;
805
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
806
                    CF_SINGLE_INSN);
807
        cpu_resume_from_signal(env, puc);
808
    }
809
#endif
810
}
811
#endif
812

    
813
/* add the tb in the target page and protect it if necessary */
814
static inline void tb_alloc_page(TranslationBlock *tb, 
815
                                 unsigned int n, unsigned int page_addr)
816
{
817
    PageDesc *p;
818
    TranslationBlock *last_first_tb;
819

    
820
    tb->page_addr[n] = page_addr;
821
    p = page_find(page_addr >> TARGET_PAGE_BITS);
822
    tb->page_next[n] = p->first_tb;
823
    last_first_tb = p->first_tb;
824
    p->first_tb = (TranslationBlock *)((long)tb | n);
825
    invalidate_page_bitmap(p);
826

    
827
#ifdef TARGET_HAS_SMC
828

    
829
#if defined(CONFIG_USER_ONLY)
830
    if (p->flags & PAGE_WRITE) {
831
        unsigned long host_start, host_end, addr;
832
        int prot;
833

    
834
        /* force the host page as non writable (writes will have a
835
           page fault + mprotect overhead) */
836
        host_start = page_addr & host_page_mask;
837
        host_end = host_start + host_page_size;
838
        prot = 0;
839
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
840
            prot |= page_get_flags(addr);
841
        mprotect((void *)host_start, host_page_size, 
842
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
843
#ifdef DEBUG_TB_INVALIDATE
844
        printf("protecting code page: 0x%08lx\n", 
845
               host_start);
846
#endif
847
        p->flags &= ~PAGE_WRITE;
848
    }
849
#else
850
    /* if some code is already present, then the pages are already
851
       protected. So we handle the case where only the first TB is
852
       allocated in a physical page */
853
    if (!last_first_tb) {
854
        target_ulong virt_addr;
855

    
856
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
857
        tlb_protect_code(cpu_single_env, virt_addr);        
858
    }
859
#endif
860

    
861
#endif /* TARGET_HAS_SMC */
862
}
863

    
864
/* Allocate a new translation block. Flush the translation buffer if
865
   too many translation blocks or too much generated code. */
866
TranslationBlock *tb_alloc(unsigned long pc)
867
{
868
    TranslationBlock *tb;
869

    
870
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
871
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
872
        return NULL;
873
    tb = &tbs[nb_tbs++];
874
    tb->pc = pc;
875
    tb->cflags = 0;
876
    return tb;
877
}
878

    
879
/* add a new TB and link it to the physical page tables. phys_page2 is
880
   (-1) to indicate that only one page contains the TB. */
881
void tb_link_phys(TranslationBlock *tb, 
882
                  target_ulong phys_pc, target_ulong phys_page2)
883
{
884
    unsigned int h;
885
    TranslationBlock **ptb;
886

    
887
    /* add in the physical hash table */
888
    h = tb_phys_hash_func(phys_pc);
889
    ptb = &tb_phys_hash[h];
890
    tb->phys_hash_next = *ptb;
891
    *ptb = tb;
892

    
893
    /* add in the page list */
894
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
895
    if (phys_page2 != -1)
896
        tb_alloc_page(tb, 1, phys_page2);
897
    else
898
        tb->page_addr[1] = -1;
899
#ifdef DEBUG_TB_CHECK
900
    tb_page_check();
901
#endif
902
}
903

    
904
/* link the tb with the other TBs */
905
void tb_link(TranslationBlock *tb)
906
{
907
#if !defined(CONFIG_USER_ONLY)
908
    {
909
        VirtPageDesc *vp;
910
        target_ulong addr;
911
        
912
        /* save the code memory mappings (needed to invalidate the code) */
913
        addr = tb->pc & TARGET_PAGE_MASK;
914
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
915
#ifdef DEBUG_TLB_CHECK 
916
        if (vp->valid_tag == virt_valid_tag &&
917
            vp->phys_addr != tb->page_addr[0]) {
918
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
919
                   addr, tb->page_addr[0], vp->phys_addr);
920
        }
921
#endif
922
        vp->phys_addr = tb->page_addr[0];
923
        if (vp->valid_tag != virt_valid_tag) {
924
            vp->valid_tag = virt_valid_tag;
925
#if !defined(CONFIG_SOFTMMU)
926
            vp->prot = 0;
927
#endif
928
        }
929
        
930
        if (tb->page_addr[1] != -1) {
931
            addr += TARGET_PAGE_SIZE;
932
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
933
#ifdef DEBUG_TLB_CHECK 
934
            if (vp->valid_tag == virt_valid_tag &&
935
                vp->phys_addr != tb->page_addr[1]) { 
936
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
937
                       addr, tb->page_addr[1], vp->phys_addr);
938
            }
939
#endif
940
            vp->phys_addr = tb->page_addr[1];
941
            if (vp->valid_tag != virt_valid_tag) {
942
                vp->valid_tag = virt_valid_tag;
943
#if !defined(CONFIG_SOFTMMU)
944
                vp->prot = 0;
945
#endif
946
            }
947
        }
948
    }
949
#endif
950

    
951
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
952
    tb->jmp_next[0] = NULL;
953
    tb->jmp_next[1] = NULL;
954
#ifdef USE_CODE_COPY
955
    tb->cflags &= ~CF_FP_USED;
956
    if (tb->cflags & CF_TB_FP_USED)
957
        tb->cflags |= CF_FP_USED;
958
#endif
959

    
960
    /* init original jump addresses */
961
    if (tb->tb_next_offset[0] != 0xffff)
962
        tb_reset_jump(tb, 0);
963
    if (tb->tb_next_offset[1] != 0xffff)
964
        tb_reset_jump(tb, 1);
965
}
966

    
967
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
968
   tb[1].tc_ptr. Return NULL if not found */
969
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
970
{
971
    int m_min, m_max, m;
972
    unsigned long v;
973
    TranslationBlock *tb;
974

    
975
    if (nb_tbs <= 0)
976
        return NULL;
977
    if (tc_ptr < (unsigned long)code_gen_buffer ||
978
        tc_ptr >= (unsigned long)code_gen_ptr)
979
        return NULL;
980
    /* binary search (cf Knuth) */
981
    m_min = 0;
982
    m_max = nb_tbs - 1;
983
    while (m_min <= m_max) {
984
        m = (m_min + m_max) >> 1;
985
        tb = &tbs[m];
986
        v = (unsigned long)tb->tc_ptr;
987
        if (v == tc_ptr)
988
            return tb;
989
        else if (tc_ptr < v) {
990
            m_max = m - 1;
991
        } else {
992
            m_min = m + 1;
993
        }
994
    } 
995
    return &tbs[m_max];
996
}
997

    
998
static void tb_reset_jump_recursive(TranslationBlock *tb);
999

    
1000
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1001
{
1002
    TranslationBlock *tb1, *tb_next, **ptb;
1003
    unsigned int n1;
1004

    
1005
    tb1 = tb->jmp_next[n];
1006
    if (tb1 != NULL) {
1007
        /* find head of list */
1008
        for(;;) {
1009
            n1 = (long)tb1 & 3;
1010
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1011
            if (n1 == 2)
1012
                break;
1013
            tb1 = tb1->jmp_next[n1];
1014
        }
1015
        /* we are now sure now that tb jumps to tb1 */
1016
        tb_next = tb1;
1017

    
1018
        /* remove tb from the jmp_first list */
1019
        ptb = &tb_next->jmp_first;
1020
        for(;;) {
1021
            tb1 = *ptb;
1022
            n1 = (long)tb1 & 3;
1023
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1024
            if (n1 == n && tb1 == tb)
1025
                break;
1026
            ptb = &tb1->jmp_next[n1];
1027
        }
1028
        *ptb = tb->jmp_next[n];
1029
        tb->jmp_next[n] = NULL;
1030
        
1031
        /* suppress the jump to next tb in generated code */
1032
        tb_reset_jump(tb, n);
1033

    
1034
        /* suppress jumps in the tb on which we could have jumped */
1035
        tb_reset_jump_recursive(tb_next);
1036
    }
1037
}
1038

    
1039
static void tb_reset_jump_recursive(TranslationBlock *tb)
1040
{
1041
    tb_reset_jump_recursive2(tb, 0);
1042
    tb_reset_jump_recursive2(tb, 1);
1043
}
1044

    
1045
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1046
{
1047
    target_ulong phys_addr;
1048

    
1049
    phys_addr = cpu_get_phys_page_debug(env, pc);
1050
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1051
}
1052

    
1053
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1054
   breakpoint is reached */
1055
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1056
{
1057
#if defined(TARGET_I386) || defined(TARGET_PPC)
1058
    int i;
1059
    
1060
    for(i = 0; i < env->nb_breakpoints; i++) {
1061
        if (env->breakpoints[i] == pc)
1062
            return 0;
1063
    }
1064

    
1065
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1066
        return -1;
1067
    env->breakpoints[env->nb_breakpoints++] = pc;
1068
    
1069
    breakpoint_invalidate(env, pc);
1070
    return 0;
1071
#else
1072
    return -1;
1073
#endif
1074
}
1075

    
1076
/* remove a breakpoint */
1077
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1078
{
1079
#if defined(TARGET_I386) || defined(TARGET_PPC)
1080
    int i;
1081
    for(i = 0; i < env->nb_breakpoints; i++) {
1082
        if (env->breakpoints[i] == pc)
1083
            goto found;
1084
    }
1085
    return -1;
1086
 found:
1087
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1088
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1089
    env->nb_breakpoints--;
1090

    
1091
    breakpoint_invalidate(env, pc);
1092
    return 0;
1093
#else
1094
    return -1;
1095
#endif
1096
}
1097

    
1098
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1099
   CPU loop after each instruction */
1100
void cpu_single_step(CPUState *env, int enabled)
1101
{
1102
#if defined(TARGET_I386) || defined(TARGET_PPC)
1103
    if (env->singlestep_enabled != enabled) {
1104
        env->singlestep_enabled = enabled;
1105
        /* must flush all the translated code to avoid inconsistancies */
1106
        /* XXX: only flush what is necessary */
1107
        tb_flush(env);
1108
    }
1109
#endif
1110
}
1111

    
1112
/* enable or disable low levels log */
1113
void cpu_set_log(int log_flags)
1114
{
1115
    loglevel = log_flags;
1116
    if (loglevel && !logfile) {
1117
        logfile = fopen(logfilename, "w");
1118
        if (!logfile) {
1119
            perror(logfilename);
1120
            _exit(1);
1121
        }
1122
#if !defined(CONFIG_SOFTMMU)
1123
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1124
        {
1125
            static uint8_t logfile_buf[4096];
1126
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1127
        }
1128
#else
1129
        setvbuf(logfile, NULL, _IOLBF, 0);
1130
#endif
1131
    }
1132
}
1133

    
1134
void cpu_set_log_filename(const char *filename)
1135
{
1136
    logfilename = strdup(filename);
1137
}
1138

    
1139
/* mask must never be zero, except for A20 change call */
1140
void cpu_interrupt(CPUState *env, int mask)
1141
{
1142
    TranslationBlock *tb;
1143
    static int interrupt_lock;
1144

    
1145
    env->interrupt_request |= mask;
1146
    /* if the cpu is currently executing code, we must unlink it and
1147
       all the potentially executing TB */
1148
    tb = env->current_tb;
1149
    if (tb && !testandset(&interrupt_lock)) {
1150
        env->current_tb = NULL;
1151
        tb_reset_jump_recursive(tb);
1152
        interrupt_lock = 0;
1153
    }
1154
}
1155

    
1156
void cpu_reset_interrupt(CPUState *env, int mask)
1157
{
1158
    env->interrupt_request &= ~mask;
1159
}
1160

    
1161
CPULogItem cpu_log_items[] = {
1162
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1163
      "show generated host assembly code for each compiled TB" },
1164
    { CPU_LOG_TB_IN_ASM, "in_asm",
1165
      "show target assembly code for each compiled TB" },
1166
    { CPU_LOG_TB_OP, "op", 
1167
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1168
#ifdef TARGET_I386
1169
    { CPU_LOG_TB_OP_OPT, "op_opt",
1170
      "show micro ops after optimization for each compiled TB" },
1171
#endif
1172
    { CPU_LOG_INT, "int",
1173
      "show interrupts/exceptions in short format" },
1174
    { CPU_LOG_EXEC, "exec",
1175
      "show trace before each executed TB (lots of logs)" },
1176
    { CPU_LOG_TB_CPU, "cpu",
1177
      "show CPU state before bloc translation" },
1178
#ifdef TARGET_I386
1179
    { CPU_LOG_PCALL, "pcall",
1180
      "show protected mode far calls/returns/exceptions" },
1181
#endif
1182
    { CPU_LOG_IOPORT, "ioport",
1183
      "show all i/o ports accesses" },
1184
    { 0, NULL, NULL },
1185
};
1186

    
1187
static int cmp1(const char *s1, int n, const char *s2)
1188
{
1189
    if (strlen(s2) != n)
1190
        return 0;
1191
    return memcmp(s1, s2, n) == 0;
1192
}
1193
      
1194
/* takes a comma separated list of log masks. Return 0 if error. */
1195
int cpu_str_to_log_mask(const char *str)
1196
{
1197
    CPULogItem *item;
1198
    int mask;
1199
    const char *p, *p1;
1200

    
1201
    p = str;
1202
    mask = 0;
1203
    for(;;) {
1204
        p1 = strchr(p, ',');
1205
        if (!p1)
1206
            p1 = p + strlen(p);
1207
        for(item = cpu_log_items; item->mask != 0; item++) {
1208
            if (cmp1(p, p1 - p, item->name))
1209
                goto found;
1210
        }
1211
        return 0;
1212
    found:
1213
        mask |= item->mask;
1214
        if (*p1 != ',')
1215
            break;
1216
        p = p1 + 1;
1217
    }
1218
    return mask;
1219
}
1220

    
1221
void cpu_abort(CPUState *env, const char *fmt, ...)
1222
{
1223
    va_list ap;
1224

    
1225
    va_start(ap, fmt);
1226
    fprintf(stderr, "qemu: fatal: ");
1227
    vfprintf(stderr, fmt, ap);
1228
    fprintf(stderr, "\n");
1229
#ifdef TARGET_I386
1230
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1231
#endif
1232
    va_end(ap);
1233
    abort();
1234
}
1235

    
1236
#if !defined(CONFIG_USER_ONLY)
1237

    
1238
/* NOTE: if flush_global is true, also flush global entries (not
1239
   implemented yet) */
1240
void tlb_flush(CPUState *env, int flush_global)
1241
{
1242
    int i;
1243

    
1244
#if defined(DEBUG_TLB)
1245
    printf("tlb_flush:\n");
1246
#endif
1247
    /* must reset current TB so that interrupts cannot modify the
1248
       links while we are modifying them */
1249
    env->current_tb = NULL;
1250

    
1251
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1252
        env->tlb_read[0][i].address = -1;
1253
        env->tlb_write[0][i].address = -1;
1254
        env->tlb_read[1][i].address = -1;
1255
        env->tlb_write[1][i].address = -1;
1256
    }
1257

    
1258
    virt_page_flush();
1259
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1260
        tb_hash[i] = NULL;
1261

    
1262
#if !defined(CONFIG_SOFTMMU)
1263
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1264
#endif
1265
}
1266

    
1267
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1268
{
1269
    if (addr == (tlb_entry->address & 
1270
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1271
        tlb_entry->address = -1;
1272
}
1273

    
1274
void tlb_flush_page(CPUState *env, target_ulong addr)
1275
{
1276
    int i, n;
1277
    VirtPageDesc *vp;
1278
    PageDesc *p;
1279
    TranslationBlock *tb;
1280

    
1281
#if defined(DEBUG_TLB)
1282
    printf("tlb_flush_page: 0x%08x\n", addr);
1283
#endif
1284
    /* must reset current TB so that interrupts cannot modify the
1285
       links while we are modifying them */
1286
    env->current_tb = NULL;
1287

    
1288
    addr &= TARGET_PAGE_MASK;
1289
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1290
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1291
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1292
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1293
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1294

    
1295
    /* remove from the virtual pc hash table all the TB at this
1296
       virtual address */
1297
    
1298
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1299
    if (vp && vp->valid_tag == virt_valid_tag) {
1300
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1301
        if (p) {
1302
            /* we remove all the links to the TBs in this virtual page */
1303
            tb = p->first_tb;
1304
            while (tb != NULL) {
1305
                n = (long)tb & 3;
1306
                tb = (TranslationBlock *)((long)tb & ~3);
1307
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1308
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1309
                    tb_invalidate(tb);
1310
                }
1311
                tb = tb->page_next[n];
1312
            }
1313
        }
1314
        vp->valid_tag = 0;
1315
    }
1316

    
1317
#if !defined(CONFIG_SOFTMMU)
1318
    if (addr < MMAP_AREA_END)
1319
        munmap((void *)addr, TARGET_PAGE_SIZE);
1320
#endif
1321
}
1322

    
1323
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1324
{
1325
    if (addr == (tlb_entry->address & 
1326
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1327
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1328
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1329
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1330
    }
1331
}
1332

    
1333
/* update the TLBs so that writes to code in the virtual page 'addr'
1334
   can be detected */
1335
static void tlb_protect_code(CPUState *env, target_ulong addr)
1336
{
1337
    int i;
1338

    
1339
    addr &= TARGET_PAGE_MASK;
1340
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1341
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1342
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1343
#if !defined(CONFIG_SOFTMMU)
1344
    /* NOTE: as we generated the code for this page, it is already at
1345
       least readable */
1346
    if (addr < MMAP_AREA_END)
1347
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1348
#endif
1349
}
1350

    
1351
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1352
                                       unsigned long phys_addr)
1353
{
1354
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1355
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1356
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1357
    }
1358
}
1359

    
1360
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1361
   tested self modifying code */
1362
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1363
{
1364
    int i;
1365

    
1366
    phys_addr &= TARGET_PAGE_MASK;
1367
    phys_addr += (long)phys_ram_base;
1368
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1369
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1370
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1371
}
1372

    
1373
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1374
                                         unsigned long start, unsigned long length)
1375
{
1376
    unsigned long addr;
1377
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1378
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1379
        if ((addr - start) < length) {
1380
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1381
        }
1382
    }
1383
}
1384

    
1385
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1386
{
1387
    CPUState *env;
1388
    unsigned long length, start1;
1389
    int i;
1390

    
1391
    start &= TARGET_PAGE_MASK;
1392
    end = TARGET_PAGE_ALIGN(end);
1393

    
1394
    length = end - start;
1395
    if (length == 0)
1396
        return;
1397
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1398

    
1399
    env = cpu_single_env;
1400
    /* we modify the TLB cache so that the dirty bit will be set again
1401
       when accessing the range */
1402
    start1 = start + (unsigned long)phys_ram_base;
1403
    for(i = 0; i < CPU_TLB_SIZE; i++)
1404
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1405
    for(i = 0; i < CPU_TLB_SIZE; i++)
1406
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1407

    
1408
#if !defined(CONFIG_SOFTMMU)
1409
    /* XXX: this is expensive */
1410
    {
1411
        VirtPageDesc *p;
1412
        int j;
1413
        target_ulong addr;
1414

    
1415
        for(i = 0; i < L1_SIZE; i++) {
1416
            p = l1_virt_map[i];
1417
            if (p) {
1418
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1419
                for(j = 0; j < L2_SIZE; j++) {
1420
                    if (p->valid_tag == virt_valid_tag &&
1421
                        p->phys_addr >= start && p->phys_addr < end &&
1422
                        (p->prot & PROT_WRITE)) {
1423
                        if (addr < MMAP_AREA_END) {
1424
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1425
                                     p->prot & ~PROT_WRITE);
1426
                        }
1427
                    }
1428
                    addr += TARGET_PAGE_SIZE;
1429
                    p++;
1430
                }
1431
            }
1432
        }
1433
    }
1434
#endif
1435
}
1436

    
1437
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1438
                                    unsigned long start)
1439
{
1440
    unsigned long addr;
1441
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1442
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1443
        if (addr == start) {
1444
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1445
        }
1446
    }
1447
}
1448

    
1449
/* update the TLB corresponding to virtual page vaddr and phys addr
1450
   addr so that it is no longer dirty */
1451
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1452
{
1453
    CPUState *env = cpu_single_env;
1454
    int i;
1455

    
1456
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1457

    
1458
    addr &= TARGET_PAGE_MASK;
1459
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1460
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1461
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1462
}
1463

    
1464
/* add a new TLB entry. At most one entry for a given virtual address
1465
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1466
   (can only happen in non SOFTMMU mode for I/O pages or pages
1467
   conflicting with the host address space). */
1468
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1469
                 target_phys_addr_t paddr, int prot, 
1470
                 int is_user, int is_softmmu)
1471
{
1472
    PhysPageDesc *p;
1473
    unsigned long pd;
1474
    TranslationBlock *first_tb;
1475
    unsigned int index;
1476
    target_ulong address;
1477
    unsigned long addend;
1478
    int ret;
1479

    
1480
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1481
    first_tb = NULL;
1482
    if (!p) {
1483
        pd = IO_MEM_UNASSIGNED;
1484
    } else {
1485
        PageDesc *p1;
1486
        pd = p->phys_offset;
1487
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1488
            /* NOTE: we also allocate the page at this stage */
1489
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1490
            first_tb = p1->first_tb;
1491
        }
1492
    }
1493
#if defined(DEBUG_TLB)
1494
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1495
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1496
#endif
1497

    
1498
    ret = 0;
1499
#if !defined(CONFIG_SOFTMMU)
1500
    if (is_softmmu) 
1501
#endif
1502
    {
1503
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1504
            /* IO memory case */
1505
            address = vaddr | pd;
1506
            addend = paddr;
1507
        } else {
1508
            /* standard memory */
1509
            address = vaddr;
1510
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1511
        }
1512
        
1513
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1514
        addend -= vaddr;
1515
        if (prot & PAGE_READ) {
1516
            env->tlb_read[is_user][index].address = address;
1517
            env->tlb_read[is_user][index].addend = addend;
1518
        } else {
1519
            env->tlb_read[is_user][index].address = -1;
1520
            env->tlb_read[is_user][index].addend = -1;
1521
        }
1522
        if (prot & PAGE_WRITE) {
1523
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1524
                /* ROM: access is ignored (same as unassigned) */
1525
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1526
                env->tlb_write[is_user][index].addend = addend;
1527
            } else 
1528
                /* XXX: the PowerPC code seems not ready to handle
1529
                   self modifying code with DCBI */
1530
#if defined(TARGET_HAS_SMC) || 1
1531
            if (first_tb) {
1532
                /* if code is present, we use a specific memory
1533
                   handler. It works only for physical memory access */
1534
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1535
                env->tlb_write[is_user][index].addend = addend;
1536
            } else 
1537
#endif
1538
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1539
                       !cpu_physical_memory_is_dirty(pd)) {
1540
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1541
                env->tlb_write[is_user][index].addend = addend;
1542
            } else {
1543
                env->tlb_write[is_user][index].address = address;
1544
                env->tlb_write[is_user][index].addend = addend;
1545
            }
1546
        } else {
1547
            env->tlb_write[is_user][index].address = -1;
1548
            env->tlb_write[is_user][index].addend = -1;
1549
        }
1550
    }
1551
#if !defined(CONFIG_SOFTMMU)
1552
    else {
1553
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1554
            /* IO access: no mapping is done as it will be handled by the
1555
               soft MMU */
1556
            if (!(env->hflags & HF_SOFTMMU_MASK))
1557
                ret = 2;
1558
        } else {
1559
            void *map_addr;
1560

    
1561
            if (vaddr >= MMAP_AREA_END) {
1562
                ret = 2;
1563
            } else {
1564
                if (prot & PROT_WRITE) {
1565
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1566
#if defined(TARGET_HAS_SMC) || 1
1567
                        first_tb ||
1568
#endif
1569
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1570
                         !cpu_physical_memory_is_dirty(pd))) {
1571
                        /* ROM: we do as if code was inside */
1572
                        /* if code is present, we only map as read only and save the
1573
                           original mapping */
1574
                        VirtPageDesc *vp;
1575
                        
1576
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1577
                        vp->phys_addr = pd;
1578
                        vp->prot = prot;
1579
                        vp->valid_tag = virt_valid_tag;
1580
                        prot &= ~PAGE_WRITE;
1581
                    }
1582
                }
1583
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1584
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1585
                if (map_addr == MAP_FAILED) {
1586
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1587
                              paddr, vaddr);
1588
                }
1589
            }
1590
        }
1591
    }
1592
#endif
1593
    return ret;
1594
}
1595

    
1596
/* called from signal handler: invalidate the code and unprotect the
1597
   page. Return TRUE if the fault was succesfully handled. */
1598
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1599
{
1600
#if !defined(CONFIG_SOFTMMU)
1601
    VirtPageDesc *vp;
1602

    
1603
#if defined(DEBUG_TLB)
1604
    printf("page_unprotect: addr=0x%08x\n", addr);
1605
#endif
1606
    addr &= TARGET_PAGE_MASK;
1607

    
1608
    /* if it is not mapped, no need to worry here */
1609
    if (addr >= MMAP_AREA_END)
1610
        return 0;
1611
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1612
    if (!vp)
1613
        return 0;
1614
    /* NOTE: in this case, validate_tag is _not_ tested as it
1615
       validates only the code TLB */
1616
    if (vp->valid_tag != virt_valid_tag)
1617
        return 0;
1618
    if (!(vp->prot & PAGE_WRITE))
1619
        return 0;
1620
#if defined(DEBUG_TLB)
1621
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1622
           addr, vp->phys_addr, vp->prot);
1623
#endif
1624
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1625
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1626
                  (unsigned long)addr, vp->prot);
1627
    /* set the dirty bit */
1628
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1629
    /* flush the code inside */
1630
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1631
    return 1;
1632
#else
1633
    return 0;
1634
#endif
1635
}
1636

    
1637
#else
1638

    
1639
void tlb_flush(CPUState *env, int flush_global)
1640
{
1641
}
1642

    
1643
void tlb_flush_page(CPUState *env, target_ulong addr)
1644
{
1645
}
1646

    
1647
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1648
                 target_phys_addr_t paddr, int prot, 
1649
                 int is_user, int is_softmmu)
1650
{
1651
    return 0;
1652
}
1653

    
1654
/* dump memory mappings */
1655
void page_dump(FILE *f)
1656
{
1657
    unsigned long start, end;
1658
    int i, j, prot, prot1;
1659
    PageDesc *p;
1660

    
1661
    fprintf(f, "%-8s %-8s %-8s %s\n",
1662
            "start", "end", "size", "prot");
1663
    start = -1;
1664
    end = -1;
1665
    prot = 0;
1666
    for(i = 0; i <= L1_SIZE; i++) {
1667
        if (i < L1_SIZE)
1668
            p = l1_map[i];
1669
        else
1670
            p = NULL;
1671
        for(j = 0;j < L2_SIZE; j++) {
1672
            if (!p)
1673
                prot1 = 0;
1674
            else
1675
                prot1 = p[j].flags;
1676
            if (prot1 != prot) {
1677
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1678
                if (start != -1) {
1679
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1680
                            start, end, end - start, 
1681
                            prot & PAGE_READ ? 'r' : '-',
1682
                            prot & PAGE_WRITE ? 'w' : '-',
1683
                            prot & PAGE_EXEC ? 'x' : '-');
1684
                }
1685
                if (prot1 != 0)
1686
                    start = end;
1687
                else
1688
                    start = -1;
1689
                prot = prot1;
1690
            }
1691
            if (!p)
1692
                break;
1693
        }
1694
    }
1695
}
1696

    
1697
int page_get_flags(unsigned long address)
1698
{
1699
    PageDesc *p;
1700

    
1701
    p = page_find(address >> TARGET_PAGE_BITS);
1702
    if (!p)
1703
        return 0;
1704
    return p->flags;
1705
}
1706

    
1707
/* modify the flags of a page and invalidate the code if
1708
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1709
   depending on PAGE_WRITE */
1710
void page_set_flags(unsigned long start, unsigned long end, int flags)
1711
{
1712
    PageDesc *p;
1713
    unsigned long addr;
1714

    
1715
    start = start & TARGET_PAGE_MASK;
1716
    end = TARGET_PAGE_ALIGN(end);
1717
    if (flags & PAGE_WRITE)
1718
        flags |= PAGE_WRITE_ORG;
1719
    spin_lock(&tb_lock);
1720
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1721
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1722
        /* if the write protection is set, then we invalidate the code
1723
           inside */
1724
        if (!(p->flags & PAGE_WRITE) && 
1725
            (flags & PAGE_WRITE) &&
1726
            p->first_tb) {
1727
            tb_invalidate_phys_page(addr, 0, NULL);
1728
        }
1729
        p->flags = flags;
1730
    }
1731
    spin_unlock(&tb_lock);
1732
}
1733

    
1734
/* called from signal handler: invalidate the code and unprotect the
1735
   page. Return TRUE if the fault was succesfully handled. */
1736
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1737
{
1738
    unsigned int page_index, prot, pindex;
1739
    PageDesc *p, *p1;
1740
    unsigned long host_start, host_end, addr;
1741

    
1742
    host_start = address & host_page_mask;
1743
    page_index = host_start >> TARGET_PAGE_BITS;
1744
    p1 = page_find(page_index);
1745
    if (!p1)
1746
        return 0;
1747
    host_end = host_start + host_page_size;
1748
    p = p1;
1749
    prot = 0;
1750
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1751
        prot |= p->flags;
1752
        p++;
1753
    }
1754
    /* if the page was really writable, then we change its
1755
       protection back to writable */
1756
    if (prot & PAGE_WRITE_ORG) {
1757
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1758
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1759
            mprotect((void *)host_start, host_page_size, 
1760
                     (prot & PAGE_BITS) | PAGE_WRITE);
1761
            p1[pindex].flags |= PAGE_WRITE;
1762
            /* and since the content will be modified, we must invalidate
1763
               the corresponding translated code. */
1764
            tb_invalidate_phys_page(address, pc, puc);
1765
#ifdef DEBUG_TB_CHECK
1766
            tb_invalidate_check(address);
1767
#endif
1768
            return 1;
1769
        }
1770
    }
1771
    return 0;
1772
}
1773

    
1774
/* call this function when system calls directly modify a memory area */
1775
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1776
{
1777
    unsigned long start, end, addr;
1778

    
1779
    start = (unsigned long)data;
1780
    end = start + data_size;
1781
    start &= TARGET_PAGE_MASK;
1782
    end = TARGET_PAGE_ALIGN(end);
1783
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1784
        page_unprotect(addr, 0, NULL);
1785
    }
1786
}
1787

    
1788
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1789
{
1790
}
1791
#endif /* defined(CONFIG_USER_ONLY) */
1792

    
1793
/* register physical memory. 'size' must be a multiple of the target
1794
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1795
   io memory page */
1796
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1797
                                  unsigned long size,
1798
                                  unsigned long phys_offset)
1799
{
1800
    unsigned long addr, end_addr;
1801
    PhysPageDesc *p;
1802

    
1803
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1804
    end_addr = start_addr + size;
1805
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1806
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1807
        p->phys_offset = phys_offset;
1808
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1809
            phys_offset += TARGET_PAGE_SIZE;
1810
    }
1811
}
1812

    
1813
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1814
{
1815
    return 0;
1816
}
1817

    
1818
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1819
{
1820
}
1821

    
1822
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1823
    unassigned_mem_readb,
1824
    unassigned_mem_readb,
1825
    unassigned_mem_readb,
1826
};
1827

    
1828
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1829
    unassigned_mem_writeb,
1830
    unassigned_mem_writeb,
1831
    unassigned_mem_writeb,
1832
};
1833

    
1834
/* self modifying code support in soft mmu mode : writing to a page
1835
   containing code comes to these functions */
1836

    
1837
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1838
{
1839
    unsigned long phys_addr;
1840

    
1841
    phys_addr = addr - (unsigned long)phys_ram_base;
1842
#if !defined(CONFIG_USER_ONLY)
1843
    tb_invalidate_phys_page_fast(phys_addr, 1);
1844
#endif
1845
    stb_raw((uint8_t *)addr, val);
1846
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1847
}
1848

    
1849
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1850
{
1851
    unsigned long phys_addr;
1852

    
1853
    phys_addr = addr - (unsigned long)phys_ram_base;
1854
#if !defined(CONFIG_USER_ONLY)
1855
    tb_invalidate_phys_page_fast(phys_addr, 2);
1856
#endif
1857
    stw_raw((uint8_t *)addr, val);
1858
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1859
}
1860

    
1861
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1862
{
1863
    unsigned long phys_addr;
1864

    
1865
    phys_addr = addr - (unsigned long)phys_ram_base;
1866
#if !defined(CONFIG_USER_ONLY)
1867
    tb_invalidate_phys_page_fast(phys_addr, 4);
1868
#endif
1869
    stl_raw((uint8_t *)addr, val);
1870
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1871
}
1872

    
1873
static CPUReadMemoryFunc *code_mem_read[3] = {
1874
    NULL, /* never used */
1875
    NULL, /* never used */
1876
    NULL, /* never used */
1877
};
1878

    
1879
static CPUWriteMemoryFunc *code_mem_write[3] = {
1880
    code_mem_writeb,
1881
    code_mem_writew,
1882
    code_mem_writel,
1883
};
1884

    
1885
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1886
{
1887
    stb_raw((uint8_t *)addr, val);
1888
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1889
}
1890

    
1891
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1892
{
1893
    stw_raw((uint8_t *)addr, val);
1894
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1895
}
1896

    
1897
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1898
{
1899
    stl_raw((uint8_t *)addr, val);
1900
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1901
}
1902

    
1903
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1904
    notdirty_mem_writeb,
1905
    notdirty_mem_writew,
1906
    notdirty_mem_writel,
1907
};
1908

    
1909
static void io_mem_init(void)
1910
{
1911
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1912
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1913
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1914
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1915
    io_mem_nb = 5;
1916

    
1917
    /* alloc dirty bits array */
1918
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1919
}
1920

    
1921
/* mem_read and mem_write are arrays of functions containing the
1922
   function to access byte (index 0), word (index 1) and dword (index
1923
   2). All functions must be supplied. If io_index is non zero, the
1924
   corresponding io zone is modified. If it is zero, a new io zone is
1925
   allocated. The return value can be used with
1926
   cpu_register_physical_memory(). (-1) is returned if error. */
1927
int cpu_register_io_memory(int io_index,
1928
                           CPUReadMemoryFunc **mem_read,
1929
                           CPUWriteMemoryFunc **mem_write,
1930
                           void *opaque)
1931
{
1932
    int i;
1933

    
1934
    if (io_index <= 0) {
1935
        if (io_index >= IO_MEM_NB_ENTRIES)
1936
            return -1;
1937
        io_index = io_mem_nb++;
1938
    } else {
1939
        if (io_index >= IO_MEM_NB_ENTRIES)
1940
            return -1;
1941
    }
1942
    
1943
    for(i = 0;i < 3; i++) {
1944
        io_mem_read[io_index][i] = mem_read[i];
1945
        io_mem_write[io_index][i] = mem_write[i];
1946
    }
1947
    io_mem_opaque[io_index] = opaque;
1948
    return io_index << IO_MEM_SHIFT;
1949
}
1950

    
1951
/* physical memory access (slow version, mainly for debug) */
1952
#if defined(CONFIG_USER_ONLY)
1953
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1954
                            int len, int is_write)
1955
{
1956
    int l, flags;
1957
    target_ulong page;
1958

    
1959
    while (len > 0) {
1960
        page = addr & TARGET_PAGE_MASK;
1961
        l = (page + TARGET_PAGE_SIZE) - addr;
1962
        if (l > len)
1963
            l = len;
1964
        flags = page_get_flags(page);
1965
        if (!(flags & PAGE_VALID))
1966
            return;
1967
        if (is_write) {
1968
            if (!(flags & PAGE_WRITE))
1969
                return;
1970
            memcpy((uint8_t *)addr, buf, len);
1971
        } else {
1972
            if (!(flags & PAGE_READ))
1973
                return;
1974
            memcpy(buf, (uint8_t *)addr, len);
1975
        }
1976
        len -= l;
1977
        buf += l;
1978
        addr += l;
1979
    }
1980
}
1981
#else
1982
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1983
                            int len, int is_write)
1984
{
1985
    int l, io_index;
1986
    uint8_t *ptr;
1987
    uint32_t val;
1988
    target_phys_addr_t page;
1989
    unsigned long pd;
1990
    PhysPageDesc *p;
1991
    
1992
    while (len > 0) {
1993
        page = addr & TARGET_PAGE_MASK;
1994
        l = (page + TARGET_PAGE_SIZE) - addr;
1995
        if (l > len)
1996
            l = len;
1997
        p = phys_page_find(page >> TARGET_PAGE_BITS);
1998
        if (!p) {
1999
            pd = IO_MEM_UNASSIGNED;
2000
        } else {
2001
            pd = p->phys_offset;
2002
        }
2003
        
2004
        if (is_write) {
2005
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2006
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2007
                if (l >= 4 && ((addr & 3) == 0)) {
2008
                    /* 32 bit read access */
2009
                    val = ldl_raw(buf);
2010
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2011
                    l = 4;
2012
                } else if (l >= 2 && ((addr & 1) == 0)) {
2013
                    /* 16 bit read access */
2014
                    val = lduw_raw(buf);
2015
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2016
                    l = 2;
2017
                } else {
2018
                    /* 8 bit access */
2019
                    val = ldub_raw(buf);
2020
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2021
                    l = 1;
2022
                }
2023
            } else {
2024
                unsigned long addr1;
2025
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2026
                /* RAM case */
2027
                ptr = phys_ram_base + addr1;
2028
                memcpy(ptr, buf, l);
2029
                /* invalidate code */
2030
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2031
                /* set dirty bit */
2032
                phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;                
2033
            }
2034
        } else {
2035
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2036
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2037
                /* I/O case */
2038
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2039
                if (l >= 4 && ((addr & 3) == 0)) {
2040
                    /* 32 bit read access */
2041
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2042
                    stl_raw(buf, val);
2043
                    l = 4;
2044
                } else if (l >= 2 && ((addr & 1) == 0)) {
2045
                    /* 16 bit read access */
2046
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2047
                    stw_raw(buf, val);
2048
                    l = 2;
2049
                } else {
2050
                    /* 8 bit access */
2051
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2052
                    stb_raw(buf, val);
2053
                    l = 1;
2054
                }
2055
            } else {
2056
                /* RAM case */
2057
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2058
                    (addr & ~TARGET_PAGE_MASK);
2059
                memcpy(buf, ptr, l);
2060
            }
2061
        }
2062
        len -= l;
2063
        buf += l;
2064
        addr += l;
2065
    }
2066
}
2067
#endif
2068

    
2069
/* virtual memory access for debug */
2070
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2071
                        uint8_t *buf, int len, int is_write)
2072
{
2073
    int l;
2074
    target_ulong page, phys_addr;
2075

    
2076
    while (len > 0) {
2077
        page = addr & TARGET_PAGE_MASK;
2078
        phys_addr = cpu_get_phys_page_debug(env, page);
2079
        /* if no physical page mapped, return an error */
2080
        if (phys_addr == -1)
2081
            return -1;
2082
        l = (page + TARGET_PAGE_SIZE) - addr;
2083
        if (l > len)
2084
            l = len;
2085
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2086
                               buf, l, is_write);
2087
        len -= l;
2088
        buf += l;
2089
        addr += l;
2090
    }
2091
    return 0;
2092
}
2093

    
2094
#if !defined(CONFIG_USER_ONLY) 
2095

    
2096
#define MMUSUFFIX _cmmu
2097
#define GETPC() NULL
2098
#define env cpu_single_env
2099

    
2100
#define SHIFT 0
2101
#include "softmmu_template.h"
2102

    
2103
#define SHIFT 1
2104
#include "softmmu_template.h"
2105

    
2106
#define SHIFT 2
2107
#include "softmmu_template.h"
2108

    
2109
#define SHIFT 3
2110
#include "softmmu_template.h"
2111

    
2112
#undef env
2113

    
2114
#endif