Statistics
| Branch: | Revision:

root / exec.c @ 7fe48483

History | View | Annotate | Download (63 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36

    
37
//#define DEBUG_TB_INVALIDATE
38
//#define DEBUG_FLUSH
39
//#define DEBUG_TLB
40

    
41
/* make various TB consistency checks */
42
//#define DEBUG_TB_CHECK 
43
//#define DEBUG_TLB_CHECK 
44

    
45
/* threshold to flush the translated code buffer */
46
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
47

    
48
#define SMC_BITMAP_USE_THRESHOLD 10
49

    
50
#define MMAP_AREA_START        0x00000000
51
#define MMAP_AREA_END          0xa8000000
52

    
53
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
54
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
55
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
56
int nb_tbs;
57
/* any access to the tbs or the page table must use this lock */
58
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
59

    
60
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
61
uint8_t *code_gen_ptr;
62

    
63
int phys_ram_size;
64
int phys_ram_fd;
65
uint8_t *phys_ram_base;
66
uint8_t *phys_ram_dirty;
67

    
68
typedef struct PageDesc {
69
    /* list of TBs intersecting this ram page */
70
    TranslationBlock *first_tb;
71
    /* in order to optimize self modifying code, we count the number
72
       of lookups we do to a given page to use a bitmap */
73
    unsigned int code_write_count;
74
    uint8_t *code_bitmap;
75
#if defined(CONFIG_USER_ONLY)
76
    unsigned long flags;
77
#endif
78
} PageDesc;
79

    
80
typedef struct PhysPageDesc {
81
    /* offset in host memory of the page + io_index in the low 12 bits */
82
    unsigned long phys_offset;
83
} PhysPageDesc;
84

    
85
typedef struct VirtPageDesc {
86
    /* physical address of code page. It is valid only if 'valid_tag'
87
       matches 'virt_valid_tag' */ 
88
    target_ulong phys_addr; 
89
    unsigned int valid_tag;
90
#if !defined(CONFIG_SOFTMMU)
91
    /* original page access rights. It is valid only if 'valid_tag'
92
       matches 'virt_valid_tag' */
93
    unsigned int prot;
94
#endif
95
} VirtPageDesc;
96

    
97
#define L2_BITS 10
98
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
99

    
100
#define L1_SIZE (1 << L1_BITS)
101
#define L2_SIZE (1 << L2_BITS)
102

    
103
static void io_mem_init(void);
104

    
105
unsigned long qemu_real_host_page_size;
106
unsigned long qemu_host_page_bits;
107
unsigned long qemu_host_page_size;
108
unsigned long qemu_host_page_mask;
109

    
110
/* XXX: for system emulation, it could just be an array */
111
static PageDesc *l1_map[L1_SIZE];
112
static PhysPageDesc *l1_phys_map[L1_SIZE];
113

    
114
#if !defined(CONFIG_USER_ONLY)
115
static VirtPageDesc *l1_virt_map[L1_SIZE];
116
static unsigned int virt_valid_tag;
117
#endif
118

    
119
/* io memory support */
120
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
123
static int io_mem_nb;
124

    
125
/* log support */
126
char *logfilename = "/tmp/qemu.log";
127
FILE *logfile;
128
int loglevel;
129

    
130
static void page_init(void)
131
{
132
    /* NOTE: we can always suppose that qemu_host_page_size >=
133
       TARGET_PAGE_SIZE */
134
#ifdef _WIN32
135
    {
136
        SYSTEM_INFO system_info;
137
        DWORD old_protect;
138
        
139
        GetSystemInfo(&system_info);
140
        qemu_real_host_page_size = system_info.dwPageSize;
141
        
142
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
143
                       PAGE_EXECUTE_READWRITE, &old_protect);
144
    }
145
#else
146
    qemu_real_host_page_size = getpagesize();
147
    {
148
        unsigned long start, end;
149

    
150
        start = (unsigned long)code_gen_buffer;
151
        start &= ~(qemu_real_host_page_size - 1);
152
        
153
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
154
        end += qemu_real_host_page_size - 1;
155
        end &= ~(qemu_real_host_page_size - 1);
156
        
157
        mprotect((void *)start, end - start, 
158
                 PROT_READ | PROT_WRITE | PROT_EXEC);
159
    }
160
#endif
161

    
162
    if (qemu_host_page_size == 0)
163
        qemu_host_page_size = qemu_real_host_page_size;
164
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
165
        qemu_host_page_size = TARGET_PAGE_SIZE;
166
    qemu_host_page_bits = 0;
167
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
168
        qemu_host_page_bits++;
169
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
170
#if !defined(CONFIG_USER_ONLY)
171
    virt_valid_tag = 1;
172
#endif
173
}
174

    
175
static inline PageDesc *page_find_alloc(unsigned int index)
176
{
177
    PageDesc **lp, *p;
178

    
179
    lp = &l1_map[index >> L2_BITS];
180
    p = *lp;
181
    if (!p) {
182
        /* allocate if not found */
183
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
184
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
185
        *lp = p;
186
    }
187
    return p + (index & (L2_SIZE - 1));
188
}
189

    
190
static inline PageDesc *page_find(unsigned int index)
191
{
192
    PageDesc *p;
193

    
194
    p = l1_map[index >> L2_BITS];
195
    if (!p)
196
        return 0;
197
    return p + (index & (L2_SIZE - 1));
198
}
199

    
200
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
201
{
202
    PhysPageDesc **lp, *p;
203

    
204
    lp = &l1_phys_map[index >> L2_BITS];
205
    p = *lp;
206
    if (!p) {
207
        /* allocate if not found */
208
        p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
209
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
210
        *lp = p;
211
    }
212
    return p + (index & (L2_SIZE - 1));
213
}
214

    
215
static inline PhysPageDesc *phys_page_find(unsigned int index)
216
{
217
    PhysPageDesc *p;
218

    
219
    p = l1_phys_map[index >> L2_BITS];
220
    if (!p)
221
        return 0;
222
    return p + (index & (L2_SIZE - 1));
223
}
224

    
225
#if !defined(CONFIG_USER_ONLY)
226
static void tlb_protect_code(CPUState *env, target_ulong addr);
227
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
228

    
229
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
230
{
231
    VirtPageDesc **lp, *p;
232

    
233
    lp = &l1_virt_map[index >> L2_BITS];
234
    p = *lp;
235
    if (!p) {
236
        /* allocate if not found */
237
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
238
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
239
        *lp = p;
240
    }
241
    return p + (index & (L2_SIZE - 1));
242
}
243

    
244
static inline VirtPageDesc *virt_page_find(unsigned int index)
245
{
246
    VirtPageDesc *p;
247

    
248
    p = l1_virt_map[index >> L2_BITS];
249
    if (!p)
250
        return 0;
251
    return p + (index & (L2_SIZE - 1));
252
}
253

    
254
static void virt_page_flush(void)
255
{
256
    int i, j;
257
    VirtPageDesc *p;
258
    
259
    virt_valid_tag++;
260

    
261
    if (virt_valid_tag == 0) {
262
        virt_valid_tag = 1;
263
        for(i = 0; i < L1_SIZE; i++) {
264
            p = l1_virt_map[i];
265
            if (p) {
266
                for(j = 0; j < L2_SIZE; j++)
267
                    p[j].valid_tag = 0;
268
            }
269
        }
270
    }
271
}
272
#else
273
static void virt_page_flush(void)
274
{
275
}
276
#endif
277

    
278
void cpu_exec_init(void)
279
{
280
    if (!code_gen_ptr) {
281
        code_gen_ptr = code_gen_buffer;
282
        page_init();
283
        io_mem_init();
284
    }
285
}
286

    
287
static inline void invalidate_page_bitmap(PageDesc *p)
288
{
289
    if (p->code_bitmap) {
290
        qemu_free(p->code_bitmap);
291
        p->code_bitmap = NULL;
292
    }
293
    p->code_write_count = 0;
294
}
295

    
296
/* set to NULL all the 'first_tb' fields in all PageDescs */
297
static void page_flush_tb(void)
298
{
299
    int i, j;
300
    PageDesc *p;
301

    
302
    for(i = 0; i < L1_SIZE; i++) {
303
        p = l1_map[i];
304
        if (p) {
305
            for(j = 0; j < L2_SIZE; j++) {
306
                p->first_tb = NULL;
307
                invalidate_page_bitmap(p);
308
                p++;
309
            }
310
        }
311
    }
312
}
313

    
314
/* flush all the translation blocks */
315
/* XXX: tb_flush is currently not thread safe */
316
void tb_flush(CPUState *env)
317
{
318
#if defined(DEBUG_FLUSH)
319
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
320
           code_gen_ptr - code_gen_buffer, 
321
           nb_tbs, 
322
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
323
#endif
324
    nb_tbs = 0;
325
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
326
    virt_page_flush();
327

    
328
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
329
    page_flush_tb();
330

    
331
    code_gen_ptr = code_gen_buffer;
332
    /* XXX: flush processor icache at this point if cache flush is
333
       expensive */
334
}
335

    
336
#ifdef DEBUG_TB_CHECK
337

    
338
static void tb_invalidate_check(unsigned long address)
339
{
340
    TranslationBlock *tb;
341
    int i;
342
    address &= TARGET_PAGE_MASK;
343
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
344
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
345
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
346
                  address >= tb->pc + tb->size)) {
347
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
348
                       address, tb->pc, tb->size);
349
            }
350
        }
351
    }
352
}
353

    
354
/* verify that all the pages have correct rights for code */
355
static void tb_page_check(void)
356
{
357
    TranslationBlock *tb;
358
    int i, flags1, flags2;
359
    
360
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
361
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
362
            flags1 = page_get_flags(tb->pc);
363
            flags2 = page_get_flags(tb->pc + tb->size - 1);
364
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
365
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
366
                       tb->pc, tb->size, flags1, flags2);
367
            }
368
        }
369
    }
370
}
371

    
372
void tb_jmp_check(TranslationBlock *tb)
373
{
374
    TranslationBlock *tb1;
375
    unsigned int n1;
376

    
377
    /* suppress any remaining jumps to this TB */
378
    tb1 = tb->jmp_first;
379
    for(;;) {
380
        n1 = (long)tb1 & 3;
381
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
382
        if (n1 == 2)
383
            break;
384
        tb1 = tb1->jmp_next[n1];
385
    }
386
    /* check end of list */
387
    if (tb1 != tb) {
388
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
389
    }
390
}
391

    
392
#endif
393

    
394
/* invalidate one TB */
395
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
396
                             int next_offset)
397
{
398
    TranslationBlock *tb1;
399
    for(;;) {
400
        tb1 = *ptb;
401
        if (tb1 == tb) {
402
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
403
            break;
404
        }
405
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
406
    }
407
}
408

    
409
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
410
{
411
    TranslationBlock *tb1;
412
    unsigned int n1;
413

    
414
    for(;;) {
415
        tb1 = *ptb;
416
        n1 = (long)tb1 & 3;
417
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
418
        if (tb1 == tb) {
419
            *ptb = tb1->page_next[n1];
420
            break;
421
        }
422
        ptb = &tb1->page_next[n1];
423
    }
424
}
425

    
426
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
427
{
428
    TranslationBlock *tb1, **ptb;
429
    unsigned int n1;
430

    
431
    ptb = &tb->jmp_next[n];
432
    tb1 = *ptb;
433
    if (tb1) {
434
        /* find tb(n) in circular list */
435
        for(;;) {
436
            tb1 = *ptb;
437
            n1 = (long)tb1 & 3;
438
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
439
            if (n1 == n && tb1 == tb)
440
                break;
441
            if (n1 == 2) {
442
                ptb = &tb1->jmp_first;
443
            } else {
444
                ptb = &tb1->jmp_next[n1];
445
            }
446
        }
447
        /* now we can suppress tb(n) from the list */
448
        *ptb = tb->jmp_next[n];
449

    
450
        tb->jmp_next[n] = NULL;
451
    }
452
}
453

    
454
/* reset the jump entry 'n' of a TB so that it is not chained to
455
   another TB */
456
static inline void tb_reset_jump(TranslationBlock *tb, int n)
457
{
458
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
459
}
460

    
461
static inline void tb_invalidate(TranslationBlock *tb)
462
{
463
    unsigned int h, n1;
464
    TranslationBlock *tb1, *tb2, **ptb;
465
    
466
    tb_invalidated_flag = 1;
467

    
468
    /* remove the TB from the hash list */
469
    h = tb_hash_func(tb->pc);
470
    ptb = &tb_hash[h];
471
    for(;;) {
472
        tb1 = *ptb;
473
        /* NOTE: the TB is not necessarily linked in the hash. It
474
           indicates that it is not currently used */
475
        if (tb1 == NULL)
476
            return;
477
        if (tb1 == tb) {
478
            *ptb = tb1->hash_next;
479
            break;
480
        }
481
        ptb = &tb1->hash_next;
482
    }
483

    
484
    /* suppress this TB from the two jump lists */
485
    tb_jmp_remove(tb, 0);
486
    tb_jmp_remove(tb, 1);
487

    
488
    /* suppress any remaining jumps to this TB */
489
    tb1 = tb->jmp_first;
490
    for(;;) {
491
        n1 = (long)tb1 & 3;
492
        if (n1 == 2)
493
            break;
494
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
495
        tb2 = tb1->jmp_next[n1];
496
        tb_reset_jump(tb1, n1);
497
        tb1->jmp_next[n1] = NULL;
498
        tb1 = tb2;
499
    }
500
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
501
}
502

    
503
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
504
{
505
    PageDesc *p;
506
    unsigned int h;
507
    target_ulong phys_pc;
508
    
509
    /* remove the TB from the hash list */
510
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
511
    h = tb_phys_hash_func(phys_pc);
512
    tb_remove(&tb_phys_hash[h], tb, 
513
              offsetof(TranslationBlock, phys_hash_next));
514

    
515
    /* remove the TB from the page list */
516
    if (tb->page_addr[0] != page_addr) {
517
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
518
        tb_page_remove(&p->first_tb, tb);
519
        invalidate_page_bitmap(p);
520
    }
521
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
522
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
523
        tb_page_remove(&p->first_tb, tb);
524
        invalidate_page_bitmap(p);
525
    }
526

    
527
    tb_invalidate(tb);
528
}
529

    
530
static inline void set_bits(uint8_t *tab, int start, int len)
531
{
532
    int end, mask, end1;
533

    
534
    end = start + len;
535
    tab += start >> 3;
536
    mask = 0xff << (start & 7);
537
    if ((start & ~7) == (end & ~7)) {
538
        if (start < end) {
539
            mask &= ~(0xff << (end & 7));
540
            *tab |= mask;
541
        }
542
    } else {
543
        *tab++ |= mask;
544
        start = (start + 8) & ~7;
545
        end1 = end & ~7;
546
        while (start < end1) {
547
            *tab++ = 0xff;
548
            start += 8;
549
        }
550
        if (start < end) {
551
            mask = ~(0xff << (end & 7));
552
            *tab |= mask;
553
        }
554
    }
555
}
556

    
557
static void build_page_bitmap(PageDesc *p)
558
{
559
    int n, tb_start, tb_end;
560
    TranslationBlock *tb;
561
    
562
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
563
    if (!p->code_bitmap)
564
        return;
565
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
566

    
567
    tb = p->first_tb;
568
    while (tb != NULL) {
569
        n = (long)tb & 3;
570
        tb = (TranslationBlock *)((long)tb & ~3);
571
        /* NOTE: this is subtle as a TB may span two physical pages */
572
        if (n == 0) {
573
            /* NOTE: tb_end may be after the end of the page, but
574
               it is not a problem */
575
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
576
            tb_end = tb_start + tb->size;
577
            if (tb_end > TARGET_PAGE_SIZE)
578
                tb_end = TARGET_PAGE_SIZE;
579
        } else {
580
            tb_start = 0;
581
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
582
        }
583
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
584
        tb = tb->page_next[n];
585
    }
586
}
587

    
588
#ifdef TARGET_HAS_PRECISE_SMC
589

    
590
static void tb_gen_code(CPUState *env, 
591
                        target_ulong pc, target_ulong cs_base, int flags,
592
                        int cflags)
593
{
594
    TranslationBlock *tb;
595
    uint8_t *tc_ptr;
596
    target_ulong phys_pc, phys_page2, virt_page2;
597
    int code_gen_size;
598

    
599
    phys_pc = get_phys_addr_code(env, (unsigned long)pc);
600
    tb = tb_alloc((unsigned long)pc);
601
    if (!tb) {
602
        /* flush must be done */
603
        tb_flush(env);
604
        /* cannot fail at this point */
605
        tb = tb_alloc((unsigned long)pc);
606
    }
607
    tc_ptr = code_gen_ptr;
608
    tb->tc_ptr = tc_ptr;
609
    tb->cs_base = cs_base;
610
    tb->flags = flags;
611
    tb->cflags = cflags;
612
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
613
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
614
    
615
    /* check next page if needed */
616
    virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
617
    phys_page2 = -1;
618
    if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
619
        phys_page2 = get_phys_addr_code(env, virt_page2);
620
    }
621
    tb_link_phys(tb, phys_pc, phys_page2);
622
}
623
#endif
624
    
625
/* invalidate all TBs which intersect with the target physical page
626
   starting in range [start;end[. NOTE: start and end must refer to
627
   the same physical page. 'is_cpu_write_access' should be true if called
628
   from a real cpu write access: the virtual CPU will exit the current
629
   TB if code is modified inside this TB. */
630
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
631
                                   int is_cpu_write_access)
632
{
633
    int n, current_tb_modified, current_tb_not_found, current_flags;
634
    CPUState *env = cpu_single_env;
635
    PageDesc *p;
636
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
637
    target_ulong tb_start, tb_end;
638
    target_ulong current_pc, current_cs_base;
639

    
640
    p = page_find(start >> TARGET_PAGE_BITS);
641
    if (!p) 
642
        return;
643
    if (!p->code_bitmap && 
644
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
645
        is_cpu_write_access) {
646
        /* build code bitmap */
647
        build_page_bitmap(p);
648
    }
649

    
650
    /* we remove all the TBs in the range [start, end[ */
651
    /* XXX: see if in some cases it could be faster to invalidate all the code */
652
    current_tb_not_found = is_cpu_write_access;
653
    current_tb_modified = 0;
654
    current_tb = NULL; /* avoid warning */
655
    current_pc = 0; /* avoid warning */
656
    current_cs_base = 0; /* avoid warning */
657
    current_flags = 0; /* avoid warning */
658
    tb = p->first_tb;
659
    while (tb != NULL) {
660
        n = (long)tb & 3;
661
        tb = (TranslationBlock *)((long)tb & ~3);
662
        tb_next = tb->page_next[n];
663
        /* NOTE: this is subtle as a TB may span two physical pages */
664
        if (n == 0) {
665
            /* NOTE: tb_end may be after the end of the page, but
666
               it is not a problem */
667
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
668
            tb_end = tb_start + tb->size;
669
        } else {
670
            tb_start = tb->page_addr[1];
671
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
672
        }
673
        if (!(tb_end <= start || tb_start >= end)) {
674
#ifdef TARGET_HAS_PRECISE_SMC
675
            if (current_tb_not_found) {
676
                current_tb_not_found = 0;
677
                current_tb = NULL;
678
                if (env->mem_write_pc) {
679
                    /* now we have a real cpu fault */
680
                    current_tb = tb_find_pc(env->mem_write_pc);
681
                }
682
            }
683
            if (current_tb == tb &&
684
                !(current_tb->cflags & CF_SINGLE_INSN)) {
685
                /* If we are modifying the current TB, we must stop
686
                its execution. We could be more precise by checking
687
                that the modification is after the current PC, but it
688
                would require a specialized function to partially
689
                restore the CPU state */
690
                
691
                current_tb_modified = 1;
692
                cpu_restore_state(current_tb, env, 
693
                                  env->mem_write_pc, NULL);
694
#if defined(TARGET_I386)
695
                current_flags = env->hflags;
696
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
697
                current_cs_base = (target_ulong)env->segs[R_CS].base;
698
                current_pc = current_cs_base + env->eip;
699
#else
700
#error unsupported CPU
701
#endif
702
            }
703
#endif /* TARGET_HAS_PRECISE_SMC */
704
            saved_tb = env->current_tb;
705
            env->current_tb = NULL;
706
            tb_phys_invalidate(tb, -1);
707
            env->current_tb = saved_tb;
708
            if (env->interrupt_request && env->current_tb)
709
                cpu_interrupt(env, env->interrupt_request);
710
        }
711
        tb = tb_next;
712
    }
713
#if !defined(CONFIG_USER_ONLY)
714
    /* if no code remaining, no need to continue to use slow writes */
715
    if (!p->first_tb) {
716
        invalidate_page_bitmap(p);
717
        if (is_cpu_write_access) {
718
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
719
        }
720
    }
721
#endif
722
#ifdef TARGET_HAS_PRECISE_SMC
723
    if (current_tb_modified) {
724
        /* we generate a block containing just the instruction
725
           modifying the memory. It will ensure that it cannot modify
726
           itself */
727
        env->current_tb = NULL;
728
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
729
                    CF_SINGLE_INSN);
730
        cpu_resume_from_signal(env, NULL);
731
    }
732
#endif
733
}
734

    
735
/* len must be <= 8 and start must be a multiple of len */
736
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
737
{
738
    PageDesc *p;
739
    int offset, b;
740
#if 0
741
    if (1) {
742
        if (loglevel) {
743
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
744
                   cpu_single_env->mem_write_vaddr, len, 
745
                   cpu_single_env->eip, 
746
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
747
        }
748
    }
749
#endif
750
    p = page_find(start >> TARGET_PAGE_BITS);
751
    if (!p) 
752
        return;
753
    if (p->code_bitmap) {
754
        offset = start & ~TARGET_PAGE_MASK;
755
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
756
        if (b & ((1 << len) - 1))
757
            goto do_invalidate;
758
    } else {
759
    do_invalidate:
760
        tb_invalidate_phys_page_range(start, start + len, 1);
761
    }
762
}
763

    
764
#if !defined(CONFIG_SOFTMMU)
765
static void tb_invalidate_phys_page(target_ulong addr, 
766
                                    unsigned long pc, void *puc)
767
{
768
    int n, current_flags, current_tb_modified;
769
    target_ulong current_pc, current_cs_base;
770
    PageDesc *p;
771
    TranslationBlock *tb, *current_tb;
772
#ifdef TARGET_HAS_PRECISE_SMC
773
    CPUState *env = cpu_single_env;
774
#endif
775

    
776
    addr &= TARGET_PAGE_MASK;
777
    p = page_find(addr >> TARGET_PAGE_BITS);
778
    if (!p) 
779
        return;
780
    tb = p->first_tb;
781
    current_tb_modified = 0;
782
    current_tb = NULL;
783
    current_pc = 0; /* avoid warning */
784
    current_cs_base = 0; /* avoid warning */
785
    current_flags = 0; /* avoid warning */
786
#ifdef TARGET_HAS_PRECISE_SMC
787
    if (tb && pc != 0) {
788
        current_tb = tb_find_pc(pc);
789
    }
790
#endif
791
    while (tb != NULL) {
792
        n = (long)tb & 3;
793
        tb = (TranslationBlock *)((long)tb & ~3);
794
#ifdef TARGET_HAS_PRECISE_SMC
795
        if (current_tb == tb &&
796
            !(current_tb->cflags & CF_SINGLE_INSN)) {
797
                /* If we are modifying the current TB, we must stop
798
                   its execution. We could be more precise by checking
799
                   that the modification is after the current PC, but it
800
                   would require a specialized function to partially
801
                   restore the CPU state */
802
            
803
            current_tb_modified = 1;
804
            cpu_restore_state(current_tb, env, pc, puc);
805
#if defined(TARGET_I386)
806
            current_flags = env->hflags;
807
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
808
            current_cs_base = (target_ulong)env->segs[R_CS].base;
809
            current_pc = current_cs_base + env->eip;
810
#else
811
#error unsupported CPU
812
#endif
813
        }
814
#endif /* TARGET_HAS_PRECISE_SMC */
815
        tb_phys_invalidate(tb, addr);
816
        tb = tb->page_next[n];
817
    }
818
    p->first_tb = NULL;
819
#ifdef TARGET_HAS_PRECISE_SMC
820
    if (current_tb_modified) {
821
        /* we generate a block containing just the instruction
822
           modifying the memory. It will ensure that it cannot modify
823
           itself */
824
        env->current_tb = NULL;
825
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
826
                    CF_SINGLE_INSN);
827
        cpu_resume_from_signal(env, puc);
828
    }
829
#endif
830
}
831
#endif
832

    
833
/* add the tb in the target page and protect it if necessary */
834
static inline void tb_alloc_page(TranslationBlock *tb, 
835
                                 unsigned int n, unsigned int page_addr)
836
{
837
    PageDesc *p;
838
    TranslationBlock *last_first_tb;
839

    
840
    tb->page_addr[n] = page_addr;
841
    p = page_find(page_addr >> TARGET_PAGE_BITS);
842
    tb->page_next[n] = p->first_tb;
843
    last_first_tb = p->first_tb;
844
    p->first_tb = (TranslationBlock *)((long)tb | n);
845
    invalidate_page_bitmap(p);
846

    
847
#if defined(TARGET_HAS_SMC) || 1
848

    
849
#if defined(CONFIG_USER_ONLY)
850
    if (p->flags & PAGE_WRITE) {
851
        unsigned long host_start, host_end, addr;
852
        int prot;
853

    
854
        /* force the host page as non writable (writes will have a
855
           page fault + mprotect overhead) */
856
        host_start = page_addr & qemu_host_page_mask;
857
        host_end = host_start + qemu_host_page_size;
858
        prot = 0;
859
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
860
            prot |= page_get_flags(addr);
861
        mprotect((void *)host_start, qemu_host_page_size, 
862
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
863
#ifdef DEBUG_TB_INVALIDATE
864
        printf("protecting code page: 0x%08lx\n", 
865
               host_start);
866
#endif
867
        p->flags &= ~PAGE_WRITE;
868
    }
869
#else
870
    /* if some code is already present, then the pages are already
871
       protected. So we handle the case where only the first TB is
872
       allocated in a physical page */
873
    if (!last_first_tb) {
874
        target_ulong virt_addr;
875

    
876
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
877
        tlb_protect_code(cpu_single_env, virt_addr);        
878
    }
879
#endif
880

    
881
#endif /* TARGET_HAS_SMC */
882
}
883

    
884
/* Allocate a new translation block. Flush the translation buffer if
885
   too many translation blocks or too much generated code. */
886
TranslationBlock *tb_alloc(unsigned long pc)
887
{
888
    TranslationBlock *tb;
889

    
890
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
891
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
892
        return NULL;
893
    tb = &tbs[nb_tbs++];
894
    tb->pc = pc;
895
    tb->cflags = 0;
896
    return tb;
897
}
898

    
899
/* add a new TB and link it to the physical page tables. phys_page2 is
900
   (-1) to indicate that only one page contains the TB. */
901
void tb_link_phys(TranslationBlock *tb, 
902
                  target_ulong phys_pc, target_ulong phys_page2)
903
{
904
    unsigned int h;
905
    TranslationBlock **ptb;
906

    
907
    /* add in the physical hash table */
908
    h = tb_phys_hash_func(phys_pc);
909
    ptb = &tb_phys_hash[h];
910
    tb->phys_hash_next = *ptb;
911
    *ptb = tb;
912

    
913
    /* add in the page list */
914
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
915
    if (phys_page2 != -1)
916
        tb_alloc_page(tb, 1, phys_page2);
917
    else
918
        tb->page_addr[1] = -1;
919
#ifdef DEBUG_TB_CHECK
920
    tb_page_check();
921
#endif
922
}
923

    
924
/* link the tb with the other TBs */
925
void tb_link(TranslationBlock *tb)
926
{
927
#if !defined(CONFIG_USER_ONLY)
928
    {
929
        VirtPageDesc *vp;
930
        target_ulong addr;
931
        
932
        /* save the code memory mappings (needed to invalidate the code) */
933
        addr = tb->pc & TARGET_PAGE_MASK;
934
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
935
#ifdef DEBUG_TLB_CHECK 
936
        if (vp->valid_tag == virt_valid_tag &&
937
            vp->phys_addr != tb->page_addr[0]) {
938
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
939
                   addr, tb->page_addr[0], vp->phys_addr);
940
        }
941
#endif
942
        vp->phys_addr = tb->page_addr[0];
943
        if (vp->valid_tag != virt_valid_tag) {
944
            vp->valid_tag = virt_valid_tag;
945
#if !defined(CONFIG_SOFTMMU)
946
            vp->prot = 0;
947
#endif
948
        }
949
        
950
        if (tb->page_addr[1] != -1) {
951
            addr += TARGET_PAGE_SIZE;
952
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
953
#ifdef DEBUG_TLB_CHECK 
954
            if (vp->valid_tag == virt_valid_tag &&
955
                vp->phys_addr != tb->page_addr[1]) { 
956
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
957
                       addr, tb->page_addr[1], vp->phys_addr);
958
            }
959
#endif
960
            vp->phys_addr = tb->page_addr[1];
961
            if (vp->valid_tag != virt_valid_tag) {
962
                vp->valid_tag = virt_valid_tag;
963
#if !defined(CONFIG_SOFTMMU)
964
                vp->prot = 0;
965
#endif
966
            }
967
        }
968
    }
969
#endif
970

    
971
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972
    tb->jmp_next[0] = NULL;
973
    tb->jmp_next[1] = NULL;
974
#ifdef USE_CODE_COPY
975
    tb->cflags &= ~CF_FP_USED;
976
    if (tb->cflags & CF_TB_FP_USED)
977
        tb->cflags |= CF_FP_USED;
978
#endif
979

    
980
    /* init original jump addresses */
981
    if (tb->tb_next_offset[0] != 0xffff)
982
        tb_reset_jump(tb, 0);
983
    if (tb->tb_next_offset[1] != 0xffff)
984
        tb_reset_jump(tb, 1);
985
}
986

    
987
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
988
   tb[1].tc_ptr. Return NULL if not found */
989
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
990
{
991
    int m_min, m_max, m;
992
    unsigned long v;
993
    TranslationBlock *tb;
994

    
995
    if (nb_tbs <= 0)
996
        return NULL;
997
    if (tc_ptr < (unsigned long)code_gen_buffer ||
998
        tc_ptr >= (unsigned long)code_gen_ptr)
999
        return NULL;
1000
    /* binary search (cf Knuth) */
1001
    m_min = 0;
1002
    m_max = nb_tbs - 1;
1003
    while (m_min <= m_max) {
1004
        m = (m_min + m_max) >> 1;
1005
        tb = &tbs[m];
1006
        v = (unsigned long)tb->tc_ptr;
1007
        if (v == tc_ptr)
1008
            return tb;
1009
        else if (tc_ptr < v) {
1010
            m_max = m - 1;
1011
        } else {
1012
            m_min = m + 1;
1013
        }
1014
    } 
1015
    return &tbs[m_max];
1016
}
1017

    
1018
static void tb_reset_jump_recursive(TranslationBlock *tb);
1019

    
1020
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1021
{
1022
    TranslationBlock *tb1, *tb_next, **ptb;
1023
    unsigned int n1;
1024

    
1025
    tb1 = tb->jmp_next[n];
1026
    if (tb1 != NULL) {
1027
        /* find head of list */
1028
        for(;;) {
1029
            n1 = (long)tb1 & 3;
1030
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1031
            if (n1 == 2)
1032
                break;
1033
            tb1 = tb1->jmp_next[n1];
1034
        }
1035
        /* we are now sure now that tb jumps to tb1 */
1036
        tb_next = tb1;
1037

    
1038
        /* remove tb from the jmp_first list */
1039
        ptb = &tb_next->jmp_first;
1040
        for(;;) {
1041
            tb1 = *ptb;
1042
            n1 = (long)tb1 & 3;
1043
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1044
            if (n1 == n && tb1 == tb)
1045
                break;
1046
            ptb = &tb1->jmp_next[n1];
1047
        }
1048
        *ptb = tb->jmp_next[n];
1049
        tb->jmp_next[n] = NULL;
1050
        
1051
        /* suppress the jump to next tb in generated code */
1052
        tb_reset_jump(tb, n);
1053

    
1054
        /* suppress jumps in the tb on which we could have jumped */
1055
        tb_reset_jump_recursive(tb_next);
1056
    }
1057
}
1058

    
1059
static void tb_reset_jump_recursive(TranslationBlock *tb)
1060
{
1061
    tb_reset_jump_recursive2(tb, 0);
1062
    tb_reset_jump_recursive2(tb, 1);
1063
}
1064

    
1065
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1066
{
1067
    target_ulong phys_addr;
1068

    
1069
    phys_addr = cpu_get_phys_page_debug(env, pc);
1070
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1071
}
1072

    
1073
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1074
   breakpoint is reached */
1075
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1076
{
1077
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1078
    int i;
1079
    
1080
    for(i = 0; i < env->nb_breakpoints; i++) {
1081
        if (env->breakpoints[i] == pc)
1082
            return 0;
1083
    }
1084

    
1085
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1086
        return -1;
1087
    env->breakpoints[env->nb_breakpoints++] = pc;
1088
    
1089
    breakpoint_invalidate(env, pc);
1090
    return 0;
1091
#else
1092
    return -1;
1093
#endif
1094
}
1095

    
1096
/* remove a breakpoint */
1097
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1098
{
1099
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1100
    int i;
1101
    for(i = 0; i < env->nb_breakpoints; i++) {
1102
        if (env->breakpoints[i] == pc)
1103
            goto found;
1104
    }
1105
    return -1;
1106
 found:
1107
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1108
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1109
    env->nb_breakpoints--;
1110

    
1111
    breakpoint_invalidate(env, pc);
1112
    return 0;
1113
#else
1114
    return -1;
1115
#endif
1116
}
1117

    
1118
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1119
   CPU loop after each instruction */
1120
void cpu_single_step(CPUState *env, int enabled)
1121
{
1122
#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1123
    if (env->singlestep_enabled != enabled) {
1124
        env->singlestep_enabled = enabled;
1125
        /* must flush all the translated code to avoid inconsistancies */
1126
        /* XXX: only flush what is necessary */
1127
        tb_flush(env);
1128
    }
1129
#endif
1130
}
1131

    
1132
/* enable or disable low levels log */
1133
void cpu_set_log(int log_flags)
1134
{
1135
    loglevel = log_flags;
1136
    if (loglevel && !logfile) {
1137
        logfile = fopen(logfilename, "w");
1138
        if (!logfile) {
1139
            perror(logfilename);
1140
            _exit(1);
1141
        }
1142
#if !defined(CONFIG_SOFTMMU)
1143
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1144
        {
1145
            static uint8_t logfile_buf[4096];
1146
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1147
        }
1148
#else
1149
        setvbuf(logfile, NULL, _IOLBF, 0);
1150
#endif
1151
    }
1152
}
1153

    
1154
void cpu_set_log_filename(const char *filename)
1155
{
1156
    logfilename = strdup(filename);
1157
}
1158

    
1159
/* mask must never be zero, except for A20 change call */
1160
void cpu_interrupt(CPUState *env, int mask)
1161
{
1162
    TranslationBlock *tb;
1163
    static int interrupt_lock;
1164

    
1165
    env->interrupt_request |= mask;
1166
    /* if the cpu is currently executing code, we must unlink it and
1167
       all the potentially executing TB */
1168
    tb = env->current_tb;
1169
    if (tb && !testandset(&interrupt_lock)) {
1170
        env->current_tb = NULL;
1171
        tb_reset_jump_recursive(tb);
1172
        interrupt_lock = 0;
1173
    }
1174
}
1175

    
1176
void cpu_reset_interrupt(CPUState *env, int mask)
1177
{
1178
    env->interrupt_request &= ~mask;
1179
}
1180

    
1181
CPULogItem cpu_log_items[] = {
1182
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1183
      "show generated host assembly code for each compiled TB" },
1184
    { CPU_LOG_TB_IN_ASM, "in_asm",
1185
      "show target assembly code for each compiled TB" },
1186
    { CPU_LOG_TB_OP, "op", 
1187
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1188
#ifdef TARGET_I386
1189
    { CPU_LOG_TB_OP_OPT, "op_opt",
1190
      "show micro ops after optimization for each compiled TB" },
1191
#endif
1192
    { CPU_LOG_INT, "int",
1193
      "show interrupts/exceptions in short format" },
1194
    { CPU_LOG_EXEC, "exec",
1195
      "show trace before each executed TB (lots of logs)" },
1196
    { CPU_LOG_TB_CPU, "cpu",
1197
      "show CPU state before bloc translation" },
1198
#ifdef TARGET_I386
1199
    { CPU_LOG_PCALL, "pcall",
1200
      "show protected mode far calls/returns/exceptions" },
1201
#endif
1202
#ifdef DEBUG_IOPORT
1203
    { CPU_LOG_IOPORT, "ioport",
1204
      "show all i/o ports accesses" },
1205
#endif
1206
    { 0, NULL, NULL },
1207
};
1208

    
1209
static int cmp1(const char *s1, int n, const char *s2)
1210
{
1211
    if (strlen(s2) != n)
1212
        return 0;
1213
    return memcmp(s1, s2, n) == 0;
1214
}
1215
      
1216
/* takes a comma separated list of log masks. Return 0 if error. */
1217
int cpu_str_to_log_mask(const char *str)
1218
{
1219
    CPULogItem *item;
1220
    int mask;
1221
    const char *p, *p1;
1222

    
1223
    p = str;
1224
    mask = 0;
1225
    for(;;) {
1226
        p1 = strchr(p, ',');
1227
        if (!p1)
1228
            p1 = p + strlen(p);
1229
        if(cmp1(p,p1-p,"all")) {
1230
                for(item = cpu_log_items; item->mask != 0; item++) {
1231
                        mask |= item->mask;
1232
                }
1233
        } else {
1234
        for(item = cpu_log_items; item->mask != 0; item++) {
1235
            if (cmp1(p, p1 - p, item->name))
1236
                goto found;
1237
        }
1238
        return 0;
1239
        }
1240
    found:
1241
        mask |= item->mask;
1242
        if (*p1 != ',')
1243
            break;
1244
        p = p1 + 1;
1245
    }
1246
    return mask;
1247
}
1248

    
1249
void cpu_abort(CPUState *env, const char *fmt, ...)
1250
{
1251
    va_list ap;
1252

    
1253
    va_start(ap, fmt);
1254
    fprintf(stderr, "qemu: fatal: ");
1255
    vfprintf(stderr, fmt, ap);
1256
    fprintf(stderr, "\n");
1257
#ifdef TARGET_I386
1258
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1259
#else
1260
    cpu_dump_state(env, stderr, fprintf, 0);
1261
#endif
1262
    va_end(ap);
1263
    abort();
1264
}
1265

    
1266
#if !defined(CONFIG_USER_ONLY)
1267

    
1268
/* NOTE: if flush_global is true, also flush global entries (not
1269
   implemented yet) */
1270
void tlb_flush(CPUState *env, int flush_global)
1271
{
1272
    int i;
1273

    
1274
#if defined(DEBUG_TLB)
1275
    printf("tlb_flush:\n");
1276
#endif
1277
    /* must reset current TB so that interrupts cannot modify the
1278
       links while we are modifying them */
1279
    env->current_tb = NULL;
1280

    
1281
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1282
        env->tlb_read[0][i].address = -1;
1283
        env->tlb_write[0][i].address = -1;
1284
        env->tlb_read[1][i].address = -1;
1285
        env->tlb_write[1][i].address = -1;
1286
    }
1287

    
1288
    virt_page_flush();
1289
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1290

    
1291
#if !defined(CONFIG_SOFTMMU)
1292
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1293
#endif
1294
}
1295

    
1296
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1297
{
1298
    if (addr == (tlb_entry->address & 
1299
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1300
        tlb_entry->address = -1;
1301
}
1302

    
1303
void tlb_flush_page(CPUState *env, target_ulong addr)
1304
{
1305
    int i, n;
1306
    VirtPageDesc *vp;
1307
    PageDesc *p;
1308
    TranslationBlock *tb;
1309

    
1310
#if defined(DEBUG_TLB)
1311
    printf("tlb_flush_page: 0x%08x\n", addr);
1312
#endif
1313
    /* must reset current TB so that interrupts cannot modify the
1314
       links while we are modifying them */
1315
    env->current_tb = NULL;
1316

    
1317
    addr &= TARGET_PAGE_MASK;
1318
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1319
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1320
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1321
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1322
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1323

    
1324
    /* remove from the virtual pc hash table all the TB at this
1325
       virtual address */
1326
    
1327
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1328
    if (vp && vp->valid_tag == virt_valid_tag) {
1329
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1330
        if (p) {
1331
            /* we remove all the links to the TBs in this virtual page */
1332
            tb = p->first_tb;
1333
            while (tb != NULL) {
1334
                n = (long)tb & 3;
1335
                tb = (TranslationBlock *)((long)tb & ~3);
1336
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1337
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1338
                    tb_invalidate(tb);
1339
                }
1340
                tb = tb->page_next[n];
1341
            }
1342
        }
1343
        vp->valid_tag = 0;
1344
    }
1345

    
1346
#if !defined(CONFIG_SOFTMMU)
1347
    if (addr < MMAP_AREA_END)
1348
        munmap((void *)addr, TARGET_PAGE_SIZE);
1349
#endif
1350
}
1351

    
1352
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1353
{
1354
    if (addr == (tlb_entry->address & 
1355
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1356
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1357
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1358
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1359
    }
1360
}
1361

    
1362
/* update the TLBs so that writes to code in the virtual page 'addr'
1363
   can be detected */
1364
static void tlb_protect_code(CPUState *env, target_ulong addr)
1365
{
1366
    int i;
1367

    
1368
    addr &= TARGET_PAGE_MASK;
1369
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1370
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1371
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1372
#if !defined(CONFIG_SOFTMMU)
1373
    /* NOTE: as we generated the code for this page, it is already at
1374
       least readable */
1375
    if (addr < MMAP_AREA_END)
1376
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1377
#endif
1378
}
1379

    
1380
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1381
                                       unsigned long phys_addr)
1382
{
1383
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1384
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1385
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1386
    }
1387
}
1388

    
1389
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1390
   tested self modifying code */
1391
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1392
{
1393
    int i;
1394

    
1395
    phys_addr &= TARGET_PAGE_MASK;
1396
    phys_addr += (long)phys_ram_base;
1397
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1398
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1399
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1400
}
1401

    
1402
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1403
                                         unsigned long start, unsigned long length)
1404
{
1405
    unsigned long addr;
1406
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1407
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1408
        if ((addr - start) < length) {
1409
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1410
        }
1411
    }
1412
}
1413

    
1414
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1415
{
1416
    CPUState *env;
1417
    unsigned long length, start1;
1418
    int i;
1419

    
1420
    start &= TARGET_PAGE_MASK;
1421
    end = TARGET_PAGE_ALIGN(end);
1422

    
1423
    length = end - start;
1424
    if (length == 0)
1425
        return;
1426
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1427

    
1428
    env = cpu_single_env;
1429
    /* we modify the TLB cache so that the dirty bit will be set again
1430
       when accessing the range */
1431
    start1 = start + (unsigned long)phys_ram_base;
1432
    for(i = 0; i < CPU_TLB_SIZE; i++)
1433
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1434
    for(i = 0; i < CPU_TLB_SIZE; i++)
1435
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1436

    
1437
#if !defined(CONFIG_SOFTMMU)
1438
    /* XXX: this is expensive */
1439
    {
1440
        VirtPageDesc *p;
1441
        int j;
1442
        target_ulong addr;
1443

    
1444
        for(i = 0; i < L1_SIZE; i++) {
1445
            p = l1_virt_map[i];
1446
            if (p) {
1447
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1448
                for(j = 0; j < L2_SIZE; j++) {
1449
                    if (p->valid_tag == virt_valid_tag &&
1450
                        p->phys_addr >= start && p->phys_addr < end &&
1451
                        (p->prot & PROT_WRITE)) {
1452
                        if (addr < MMAP_AREA_END) {
1453
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1454
                                     p->prot & ~PROT_WRITE);
1455
                        }
1456
                    }
1457
                    addr += TARGET_PAGE_SIZE;
1458
                    p++;
1459
                }
1460
            }
1461
        }
1462
    }
1463
#endif
1464
}
1465

    
1466
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1467
                                    unsigned long start)
1468
{
1469
    unsigned long addr;
1470
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1471
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1472
        if (addr == start) {
1473
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1474
        }
1475
    }
1476
}
1477

    
1478
/* update the TLB corresponding to virtual page vaddr and phys addr
1479
   addr so that it is no longer dirty */
1480
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1481
{
1482
    CPUState *env = cpu_single_env;
1483
    int i;
1484

    
1485
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1486

    
1487
    addr &= TARGET_PAGE_MASK;
1488
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1489
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1490
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1491
}
1492

    
1493
/* add a new TLB entry. At most one entry for a given virtual address
1494
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1495
   (can only happen in non SOFTMMU mode for I/O pages or pages
1496
   conflicting with the host address space). */
1497
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1498
                 target_phys_addr_t paddr, int prot, 
1499
                 int is_user, int is_softmmu)
1500
{
1501
    PhysPageDesc *p;
1502
    unsigned long pd;
1503
    TranslationBlock *first_tb;
1504
    unsigned int index;
1505
    target_ulong address;
1506
    unsigned long addend;
1507
    int ret;
1508

    
1509
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1510
    first_tb = NULL;
1511
    if (!p) {
1512
        pd = IO_MEM_UNASSIGNED;
1513
    } else {
1514
        PageDesc *p1;
1515
        pd = p->phys_offset;
1516
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1517
            /* NOTE: we also allocate the page at this stage */
1518
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1519
            first_tb = p1->first_tb;
1520
        }
1521
    }
1522
#if defined(DEBUG_TLB)
1523
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1524
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1525
#endif
1526

    
1527
    ret = 0;
1528
#if !defined(CONFIG_SOFTMMU)
1529
    if (is_softmmu) 
1530
#endif
1531
    {
1532
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1533
            /* IO memory case */
1534
            address = vaddr | pd;
1535
            addend = paddr;
1536
        } else {
1537
            /* standard memory */
1538
            address = vaddr;
1539
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1540
        }
1541
        
1542
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1543
        addend -= vaddr;
1544
        if (prot & PAGE_READ) {
1545
            env->tlb_read[is_user][index].address = address;
1546
            env->tlb_read[is_user][index].addend = addend;
1547
        } else {
1548
            env->tlb_read[is_user][index].address = -1;
1549
            env->tlb_read[is_user][index].addend = -1;
1550
        }
1551
        if (prot & PAGE_WRITE) {
1552
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1553
                /* ROM: access is ignored (same as unassigned) */
1554
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1555
                env->tlb_write[is_user][index].addend = addend;
1556
            } else 
1557
                /* XXX: the PowerPC code seems not ready to handle
1558
                   self modifying code with DCBI */
1559
#if defined(TARGET_HAS_SMC) || 1
1560
            if (first_tb) {
1561
                /* if code is present, we use a specific memory
1562
                   handler. It works only for physical memory access */
1563
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1564
                env->tlb_write[is_user][index].addend = addend;
1565
            } else 
1566
#endif
1567
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1568
                       !cpu_physical_memory_is_dirty(pd)) {
1569
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1570
                env->tlb_write[is_user][index].addend = addend;
1571
            } else {
1572
                env->tlb_write[is_user][index].address = address;
1573
                env->tlb_write[is_user][index].addend = addend;
1574
            }
1575
        } else {
1576
            env->tlb_write[is_user][index].address = -1;
1577
            env->tlb_write[is_user][index].addend = -1;
1578
        }
1579
    }
1580
#if !defined(CONFIG_SOFTMMU)
1581
    else {
1582
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1583
            /* IO access: no mapping is done as it will be handled by the
1584
               soft MMU */
1585
            if (!(env->hflags & HF_SOFTMMU_MASK))
1586
                ret = 2;
1587
        } else {
1588
            void *map_addr;
1589

    
1590
            if (vaddr >= MMAP_AREA_END) {
1591
                ret = 2;
1592
            } else {
1593
                if (prot & PROT_WRITE) {
1594
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1595
#if defined(TARGET_HAS_SMC) || 1
1596
                        first_tb ||
1597
#endif
1598
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1599
                         !cpu_physical_memory_is_dirty(pd))) {
1600
                        /* ROM: we do as if code was inside */
1601
                        /* if code is present, we only map as read only and save the
1602
                           original mapping */
1603
                        VirtPageDesc *vp;
1604
                        
1605
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1606
                        vp->phys_addr = pd;
1607
                        vp->prot = prot;
1608
                        vp->valid_tag = virt_valid_tag;
1609
                        prot &= ~PAGE_WRITE;
1610
                    }
1611
                }
1612
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1613
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1614
                if (map_addr == MAP_FAILED) {
1615
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1616
                              paddr, vaddr);
1617
                }
1618
            }
1619
        }
1620
    }
1621
#endif
1622
    return ret;
1623
}
1624

    
1625
/* called from signal handler: invalidate the code and unprotect the
1626
   page. Return TRUE if the fault was succesfully handled. */
1627
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1628
{
1629
#if !defined(CONFIG_SOFTMMU)
1630
    VirtPageDesc *vp;
1631

    
1632
#if defined(DEBUG_TLB)
1633
    printf("page_unprotect: addr=0x%08x\n", addr);
1634
#endif
1635
    addr &= TARGET_PAGE_MASK;
1636

    
1637
    /* if it is not mapped, no need to worry here */
1638
    if (addr >= MMAP_AREA_END)
1639
        return 0;
1640
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1641
    if (!vp)
1642
        return 0;
1643
    /* NOTE: in this case, validate_tag is _not_ tested as it
1644
       validates only the code TLB */
1645
    if (vp->valid_tag != virt_valid_tag)
1646
        return 0;
1647
    if (!(vp->prot & PAGE_WRITE))
1648
        return 0;
1649
#if defined(DEBUG_TLB)
1650
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1651
           addr, vp->phys_addr, vp->prot);
1652
#endif
1653
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1654
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1655
                  (unsigned long)addr, vp->prot);
1656
    /* set the dirty bit */
1657
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1658
    /* flush the code inside */
1659
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1660
    return 1;
1661
#else
1662
    return 0;
1663
#endif
1664
}
1665

    
1666
#else
1667

    
1668
void tlb_flush(CPUState *env, int flush_global)
1669
{
1670
}
1671

    
1672
void tlb_flush_page(CPUState *env, target_ulong addr)
1673
{
1674
}
1675

    
1676
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1677
                 target_phys_addr_t paddr, int prot, 
1678
                 int is_user, int is_softmmu)
1679
{
1680
    return 0;
1681
}
1682

    
1683
/* dump memory mappings */
1684
void page_dump(FILE *f)
1685
{
1686
    unsigned long start, end;
1687
    int i, j, prot, prot1;
1688
    PageDesc *p;
1689

    
1690
    fprintf(f, "%-8s %-8s %-8s %s\n",
1691
            "start", "end", "size", "prot");
1692
    start = -1;
1693
    end = -1;
1694
    prot = 0;
1695
    for(i = 0; i <= L1_SIZE; i++) {
1696
        if (i < L1_SIZE)
1697
            p = l1_map[i];
1698
        else
1699
            p = NULL;
1700
        for(j = 0;j < L2_SIZE; j++) {
1701
            if (!p)
1702
                prot1 = 0;
1703
            else
1704
                prot1 = p[j].flags;
1705
            if (prot1 != prot) {
1706
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1707
                if (start != -1) {
1708
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1709
                            start, end, end - start, 
1710
                            prot & PAGE_READ ? 'r' : '-',
1711
                            prot & PAGE_WRITE ? 'w' : '-',
1712
                            prot & PAGE_EXEC ? 'x' : '-');
1713
                }
1714
                if (prot1 != 0)
1715
                    start = end;
1716
                else
1717
                    start = -1;
1718
                prot = prot1;
1719
            }
1720
            if (!p)
1721
                break;
1722
        }
1723
    }
1724
}
1725

    
1726
int page_get_flags(unsigned long address)
1727
{
1728
    PageDesc *p;
1729

    
1730
    p = page_find(address >> TARGET_PAGE_BITS);
1731
    if (!p)
1732
        return 0;
1733
    return p->flags;
1734
}
1735

    
1736
/* modify the flags of a page and invalidate the code if
1737
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1738
   depending on PAGE_WRITE */
1739
void page_set_flags(unsigned long start, unsigned long end, int flags)
1740
{
1741
    PageDesc *p;
1742
    unsigned long addr;
1743

    
1744
    start = start & TARGET_PAGE_MASK;
1745
    end = TARGET_PAGE_ALIGN(end);
1746
    if (flags & PAGE_WRITE)
1747
        flags |= PAGE_WRITE_ORG;
1748
    spin_lock(&tb_lock);
1749
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1750
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1751
        /* if the write protection is set, then we invalidate the code
1752
           inside */
1753
        if (!(p->flags & PAGE_WRITE) && 
1754
            (flags & PAGE_WRITE) &&
1755
            p->first_tb) {
1756
            tb_invalidate_phys_page(addr, 0, NULL);
1757
        }
1758
        p->flags = flags;
1759
    }
1760
    spin_unlock(&tb_lock);
1761
}
1762

    
1763
/* called from signal handler: invalidate the code and unprotect the
1764
   page. Return TRUE if the fault was succesfully handled. */
1765
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1766
{
1767
    unsigned int page_index, prot, pindex;
1768
    PageDesc *p, *p1;
1769
    unsigned long host_start, host_end, addr;
1770

    
1771
    host_start = address & qemu_host_page_mask;
1772
    page_index = host_start >> TARGET_PAGE_BITS;
1773
    p1 = page_find(page_index);
1774
    if (!p1)
1775
        return 0;
1776
    host_end = host_start + qemu_host_page_size;
1777
    p = p1;
1778
    prot = 0;
1779
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1780
        prot |= p->flags;
1781
        p++;
1782
    }
1783
    /* if the page was really writable, then we change its
1784
       protection back to writable */
1785
    if (prot & PAGE_WRITE_ORG) {
1786
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1787
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1788
            mprotect((void *)host_start, qemu_host_page_size, 
1789
                     (prot & PAGE_BITS) | PAGE_WRITE);
1790
            p1[pindex].flags |= PAGE_WRITE;
1791
            /* and since the content will be modified, we must invalidate
1792
               the corresponding translated code. */
1793
            tb_invalidate_phys_page(address, pc, puc);
1794
#ifdef DEBUG_TB_CHECK
1795
            tb_invalidate_check(address);
1796
#endif
1797
            return 1;
1798
        }
1799
    }
1800
    return 0;
1801
}
1802

    
1803
/* call this function when system calls directly modify a memory area */
1804
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1805
{
1806
    unsigned long start, end, addr;
1807

    
1808
    start = (unsigned long)data;
1809
    end = start + data_size;
1810
    start &= TARGET_PAGE_MASK;
1811
    end = TARGET_PAGE_ALIGN(end);
1812
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1813
        page_unprotect(addr, 0, NULL);
1814
    }
1815
}
1816

    
1817
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1818
{
1819
}
1820
#endif /* defined(CONFIG_USER_ONLY) */
1821

    
1822
/* register physical memory. 'size' must be a multiple of the target
1823
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1824
   io memory page */
1825
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1826
                                  unsigned long size,
1827
                                  unsigned long phys_offset)
1828
{
1829
    unsigned long addr, end_addr;
1830
    PhysPageDesc *p;
1831

    
1832
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1833
    end_addr = start_addr + size;
1834
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1835
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1836
        p->phys_offset = phys_offset;
1837
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1838
            phys_offset += TARGET_PAGE_SIZE;
1839
    }
1840
}
1841

    
1842
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1843
{
1844
    return 0;
1845
}
1846

    
1847
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1848
{
1849
}
1850

    
1851
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1852
    unassigned_mem_readb,
1853
    unassigned_mem_readb,
1854
    unassigned_mem_readb,
1855
};
1856

    
1857
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1858
    unassigned_mem_writeb,
1859
    unassigned_mem_writeb,
1860
    unassigned_mem_writeb,
1861
};
1862

    
1863
/* self modifying code support in soft mmu mode : writing to a page
1864
   containing code comes to these functions */
1865

    
1866
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1867
{
1868
    unsigned long phys_addr;
1869

    
1870
    phys_addr = addr - (unsigned long)phys_ram_base;
1871
#if !defined(CONFIG_USER_ONLY)
1872
    tb_invalidate_phys_page_fast(phys_addr, 1);
1873
#endif
1874
    stb_raw((uint8_t *)addr, val);
1875
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1876
}
1877

    
1878
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1879
{
1880
    unsigned long phys_addr;
1881

    
1882
    phys_addr = addr - (unsigned long)phys_ram_base;
1883
#if !defined(CONFIG_USER_ONLY)
1884
    tb_invalidate_phys_page_fast(phys_addr, 2);
1885
#endif
1886
    stw_raw((uint8_t *)addr, val);
1887
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1888
}
1889

    
1890
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1891
{
1892
    unsigned long phys_addr;
1893

    
1894
    phys_addr = addr - (unsigned long)phys_ram_base;
1895
#if !defined(CONFIG_USER_ONLY)
1896
    tb_invalidate_phys_page_fast(phys_addr, 4);
1897
#endif
1898
    stl_raw((uint8_t *)addr, val);
1899
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1900
}
1901

    
1902
static CPUReadMemoryFunc *code_mem_read[3] = {
1903
    NULL, /* never used */
1904
    NULL, /* never used */
1905
    NULL, /* never used */
1906
};
1907

    
1908
static CPUWriteMemoryFunc *code_mem_write[3] = {
1909
    code_mem_writeb,
1910
    code_mem_writew,
1911
    code_mem_writel,
1912
};
1913

    
1914
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1915
{
1916
    stb_raw((uint8_t *)addr, val);
1917
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1918
}
1919

    
1920
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1921
{
1922
    stw_raw((uint8_t *)addr, val);
1923
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1924
}
1925

    
1926
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1927
{
1928
    stl_raw((uint8_t *)addr, val);
1929
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1930
}
1931

    
1932
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1933
    notdirty_mem_writeb,
1934
    notdirty_mem_writew,
1935
    notdirty_mem_writel,
1936
};
1937

    
1938
static void io_mem_init(void)
1939
{
1940
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1941
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1942
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1943
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1944
    io_mem_nb = 5;
1945

    
1946
    /* alloc dirty bits array */
1947
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1948
}
1949

    
1950
/* mem_read and mem_write are arrays of functions containing the
1951
   function to access byte (index 0), word (index 1) and dword (index
1952
   2). All functions must be supplied. If io_index is non zero, the
1953
   corresponding io zone is modified. If it is zero, a new io zone is
1954
   allocated. The return value can be used with
1955
   cpu_register_physical_memory(). (-1) is returned if error. */
1956
int cpu_register_io_memory(int io_index,
1957
                           CPUReadMemoryFunc **mem_read,
1958
                           CPUWriteMemoryFunc **mem_write,
1959
                           void *opaque)
1960
{
1961
    int i;
1962

    
1963
    if (io_index <= 0) {
1964
        if (io_index >= IO_MEM_NB_ENTRIES)
1965
            return -1;
1966
        io_index = io_mem_nb++;
1967
    } else {
1968
        if (io_index >= IO_MEM_NB_ENTRIES)
1969
            return -1;
1970
    }
1971
    
1972
    for(i = 0;i < 3; i++) {
1973
        io_mem_read[io_index][i] = mem_read[i];
1974
        io_mem_write[io_index][i] = mem_write[i];
1975
    }
1976
    io_mem_opaque[io_index] = opaque;
1977
    return io_index << IO_MEM_SHIFT;
1978
}
1979

    
1980
/* physical memory access (slow version, mainly for debug) */
1981
#if defined(CONFIG_USER_ONLY)
1982
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1983
                            int len, int is_write)
1984
{
1985
    int l, flags;
1986
    target_ulong page;
1987

    
1988
    while (len > 0) {
1989
        page = addr & TARGET_PAGE_MASK;
1990
        l = (page + TARGET_PAGE_SIZE) - addr;
1991
        if (l > len)
1992
            l = len;
1993
        flags = page_get_flags(page);
1994
        if (!(flags & PAGE_VALID))
1995
            return;
1996
        if (is_write) {
1997
            if (!(flags & PAGE_WRITE))
1998
                return;
1999
            memcpy((uint8_t *)addr, buf, len);
2000
        } else {
2001
            if (!(flags & PAGE_READ))
2002
                return;
2003
            memcpy(buf, (uint8_t *)addr, len);
2004
        }
2005
        len -= l;
2006
        buf += l;
2007
        addr += l;
2008
    }
2009
}
2010
#else
2011
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2012
                            int len, int is_write)
2013
{
2014
    int l, io_index;
2015
    uint8_t *ptr;
2016
    uint32_t val;
2017
    target_phys_addr_t page;
2018
    unsigned long pd;
2019
    PhysPageDesc *p;
2020
    
2021
    while (len > 0) {
2022
        page = addr & TARGET_PAGE_MASK;
2023
        l = (page + TARGET_PAGE_SIZE) - addr;
2024
        if (l > len)
2025
            l = len;
2026
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2027
        if (!p) {
2028
            pd = IO_MEM_UNASSIGNED;
2029
        } else {
2030
            pd = p->phys_offset;
2031
        }
2032
        
2033
        if (is_write) {
2034
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2035
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2036
                if (l >= 4 && ((addr & 3) == 0)) {
2037
                    /* 32 bit read access */
2038
                    val = ldl_raw(buf);
2039
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2040
                    l = 4;
2041
                } else if (l >= 2 && ((addr & 1) == 0)) {
2042
                    /* 16 bit read access */
2043
                    val = lduw_raw(buf);
2044
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2045
                    l = 2;
2046
                } else {
2047
                    /* 8 bit access */
2048
                    val = ldub_raw(buf);
2049
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2050
                    l = 1;
2051
                }
2052
            } else {
2053
                unsigned long addr1;
2054
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2055
                /* RAM case */
2056
                ptr = phys_ram_base + addr1;
2057
                memcpy(ptr, buf, l);
2058
                /* invalidate code */
2059
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2060
                /* set dirty bit */
2061
                phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;                
2062
            }
2063
        } else {
2064
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2065
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2066
                /* I/O case */
2067
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2068
                if (l >= 4 && ((addr & 3) == 0)) {
2069
                    /* 32 bit read access */
2070
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2071
                    stl_raw(buf, val);
2072
                    l = 4;
2073
                } else if (l >= 2 && ((addr & 1) == 0)) {
2074
                    /* 16 bit read access */
2075
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2076
                    stw_raw(buf, val);
2077
                    l = 2;
2078
                } else {
2079
                    /* 8 bit access */
2080
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2081
                    stb_raw(buf, val);
2082
                    l = 1;
2083
                }
2084
            } else {
2085
                /* RAM case */
2086
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2087
                    (addr & ~TARGET_PAGE_MASK);
2088
                memcpy(buf, ptr, l);
2089
            }
2090
        }
2091
        len -= l;
2092
        buf += l;
2093
        addr += l;
2094
    }
2095
}
2096
#endif
2097

    
2098
/* virtual memory access for debug */
2099
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2100
                        uint8_t *buf, int len, int is_write)
2101
{
2102
    int l;
2103
    target_ulong page, phys_addr;
2104

    
2105
    while (len > 0) {
2106
        page = addr & TARGET_PAGE_MASK;
2107
        phys_addr = cpu_get_phys_page_debug(env, page);
2108
        /* if no physical page mapped, return an error */
2109
        if (phys_addr == -1)
2110
            return -1;
2111
        l = (page + TARGET_PAGE_SIZE) - addr;
2112
        if (l > len)
2113
            l = len;
2114
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2115
                               buf, l, is_write);
2116
        len -= l;
2117
        buf += l;
2118
        addr += l;
2119
    }
2120
    return 0;
2121
}
2122

    
2123
#if !defined(CONFIG_USER_ONLY) 
2124

    
2125
#define MMUSUFFIX _cmmu
2126
#define GETPC() NULL
2127
#define env cpu_single_env
2128
#define SOFTMMU_CODE_ACCESS
2129

    
2130
#define SHIFT 0
2131
#include "softmmu_template.h"
2132

    
2133
#define SHIFT 1
2134
#include "softmmu_template.h"
2135

    
2136
#define SHIFT 2
2137
#include "softmmu_template.h"
2138

    
2139
#define SHIFT 3
2140
#include "softmmu_template.h"
2141

    
2142
#undef env
2143

    
2144
#endif