Statistics
| Branch: | Revision:

root / exec.c @ 023fe10d

History | View | Annotate | Download (61.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <stdarg.h>
24
#include <string.h>
25
#include <errno.h>
26
#include <unistd.h>
27
#include <inttypes.h>
28
#if !defined(CONFIG_SOFTMMU)
29
#include <sys/mman.h>
30
#endif
31

    
32
#include "cpu.h"
33
#include "exec-all.h"
34

    
35
//#define DEBUG_TB_INVALIDATE
36
//#define DEBUG_FLUSH
37
//#define DEBUG_TLB
38

    
39
/* make various TB consistency checks */
40
//#define DEBUG_TB_CHECK 
41
//#define DEBUG_TLB_CHECK 
42

    
43
/* threshold to flush the translated code buffer */
44
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45

    
46
#define SMC_BITMAP_USE_THRESHOLD 10
47

    
48
#define MMAP_AREA_START        0x00000000
49
#define MMAP_AREA_END          0xa8000000
50

    
51
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
54
int nb_tbs;
55
/* any access to the tbs or the page table must use this lock */
56
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
57

    
58
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59
uint8_t *code_gen_ptr;
60

    
61
int phys_ram_size;
62
int phys_ram_fd;
63
uint8_t *phys_ram_base;
64
uint8_t *phys_ram_dirty;
65

    
66
typedef struct PageDesc {
67
    /* list of TBs intersecting this ram page */
68
    TranslationBlock *first_tb;
69
    /* in order to optimize self modifying code, we count the number
70
       of lookups we do to a given page to use a bitmap */
71
    unsigned int code_write_count;
72
    uint8_t *code_bitmap;
73
#if defined(CONFIG_USER_ONLY)
74
    unsigned long flags;
75
#endif
76
} PageDesc;
77

    
78
typedef struct PhysPageDesc {
79
    /* offset in host memory of the page + io_index in the low 12 bits */
80
    unsigned long phys_offset;
81
} PhysPageDesc;
82

    
83
typedef struct VirtPageDesc {
84
    /* physical address of code page. It is valid only if 'valid_tag'
85
       matches 'virt_valid_tag' */ 
86
    target_ulong phys_addr; 
87
    unsigned int valid_tag;
88
#if !defined(CONFIG_SOFTMMU)
89
    /* original page access rights. It is valid only if 'valid_tag'
90
       matches 'virt_valid_tag' */
91
    unsigned int prot;
92
#endif
93
} VirtPageDesc;
94

    
95
#define L2_BITS 10
96
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97

    
98
#define L1_SIZE (1 << L1_BITS)
99
#define L2_SIZE (1 << L2_BITS)
100

    
101
static void io_mem_init(void);
102

    
103
unsigned long real_host_page_size;
104
unsigned long host_page_bits;
105
unsigned long host_page_size;
106
unsigned long host_page_mask;
107

    
108
/* XXX: for system emulation, it could just be an array */
109
static PageDesc *l1_map[L1_SIZE];
110
static PhysPageDesc *l1_phys_map[L1_SIZE];
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
static VirtPageDesc *l1_virt_map[L1_SIZE];
114
static unsigned int virt_valid_tag;
115
#endif
116

    
117
/* io memory support */
118
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120
static int io_mem_nb;
121

    
122
/* log support */
123
char *logfilename = "/tmp/qemu.log";
124
FILE *logfile;
125
int loglevel;
126

    
127
static void page_init(void)
128
{
129
    /* NOTE: we can always suppose that host_page_size >=
130
       TARGET_PAGE_SIZE */
131
#ifdef _WIN32
132
    real_host_page_size = 4096;
133
#else
134
    real_host_page_size = getpagesize();
135
#endif
136
    if (host_page_size == 0)
137
        host_page_size = real_host_page_size;
138
    if (host_page_size < TARGET_PAGE_SIZE)
139
        host_page_size = TARGET_PAGE_SIZE;
140
    host_page_bits = 0;
141
    while ((1 << host_page_bits) < host_page_size)
142
        host_page_bits++;
143
    host_page_mask = ~(host_page_size - 1);
144
#if !defined(CONFIG_USER_ONLY)
145
    virt_valid_tag = 1;
146
#endif
147
}
148

    
149
static inline PageDesc *page_find_alloc(unsigned int index)
150
{
151
    PageDesc **lp, *p;
152

    
153
    lp = &l1_map[index >> L2_BITS];
154
    p = *lp;
155
    if (!p) {
156
        /* allocate if not found */
157
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
158
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
159
        *lp = p;
160
    }
161
    return p + (index & (L2_SIZE - 1));
162
}
163

    
164
static inline PageDesc *page_find(unsigned int index)
165
{
166
    PageDesc *p;
167

    
168
    p = l1_map[index >> L2_BITS];
169
    if (!p)
170
        return 0;
171
    return p + (index & (L2_SIZE - 1));
172
}
173

    
174
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
175
{
176
    PhysPageDesc **lp, *p;
177

    
178
    lp = &l1_phys_map[index >> L2_BITS];
179
    p = *lp;
180
    if (!p) {
181
        /* allocate if not found */
182
        p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
183
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
184
        *lp = p;
185
    }
186
    return p + (index & (L2_SIZE - 1));
187
}
188

    
189
static inline PhysPageDesc *phys_page_find(unsigned int index)
190
{
191
    PhysPageDesc *p;
192

    
193
    p = l1_phys_map[index >> L2_BITS];
194
    if (!p)
195
        return 0;
196
    return p + (index & (L2_SIZE - 1));
197
}
198

    
199
#if !defined(CONFIG_USER_ONLY)
200
static void tlb_protect_code(CPUState *env, target_ulong addr);
201
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
202

    
203
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
204
{
205
    VirtPageDesc **lp, *p;
206

    
207
    lp = &l1_virt_map[index >> L2_BITS];
208
    p = *lp;
209
    if (!p) {
210
        /* allocate if not found */
211
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
212
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
213
        *lp = p;
214
    }
215
    return p + (index & (L2_SIZE - 1));
216
}
217

    
218
static inline VirtPageDesc *virt_page_find(unsigned int index)
219
{
220
    VirtPageDesc *p;
221

    
222
    p = l1_virt_map[index >> L2_BITS];
223
    if (!p)
224
        return 0;
225
    return p + (index & (L2_SIZE - 1));
226
}
227

    
228
static void virt_page_flush(void)
229
{
230
    int i, j;
231
    VirtPageDesc *p;
232
    
233
    virt_valid_tag++;
234

    
235
    if (virt_valid_tag == 0) {
236
        virt_valid_tag = 1;
237
        for(i = 0; i < L1_SIZE; i++) {
238
            p = l1_virt_map[i];
239
            if (p) {
240
                for(j = 0; j < L2_SIZE; j++)
241
                    p[j].valid_tag = 0;
242
            }
243
        }
244
    }
245
}
246
#else
247
static void virt_page_flush(void)
248
{
249
}
250
#endif
251

    
252
void cpu_exec_init(void)
253
{
254
    if (!code_gen_ptr) {
255
        code_gen_ptr = code_gen_buffer;
256
        page_init();
257
        io_mem_init();
258
    }
259
}
260

    
261
static inline void invalidate_page_bitmap(PageDesc *p)
262
{
263
    if (p->code_bitmap) {
264
        qemu_free(p->code_bitmap);
265
        p->code_bitmap = NULL;
266
    }
267
    p->code_write_count = 0;
268
}
269

    
270
/* set to NULL all the 'first_tb' fields in all PageDescs */
271
static void page_flush_tb(void)
272
{
273
    int i, j;
274
    PageDesc *p;
275

    
276
    for(i = 0; i < L1_SIZE; i++) {
277
        p = l1_map[i];
278
        if (p) {
279
            for(j = 0; j < L2_SIZE; j++) {
280
                p->first_tb = NULL;
281
                invalidate_page_bitmap(p);
282
                p++;
283
            }
284
        }
285
    }
286
}
287

    
288
/* flush all the translation blocks */
289
/* XXX: tb_flush is currently not thread safe */
290
void tb_flush(CPUState *env)
291
{
292
    int i;
293
#if defined(DEBUG_FLUSH)
294
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
295
           code_gen_ptr - code_gen_buffer, 
296
           nb_tbs, 
297
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
298
#endif
299
    nb_tbs = 0;
300
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
301
        tb_hash[i] = NULL;
302
    virt_page_flush();
303

    
304
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
305
        tb_phys_hash[i] = NULL;
306
    page_flush_tb();
307

    
308
    code_gen_ptr = code_gen_buffer;
309
    /* XXX: flush processor icache at this point if cache flush is
310
       expensive */
311
}
312

    
313
#ifdef DEBUG_TB_CHECK
314

    
315
static void tb_invalidate_check(unsigned long address)
316
{
317
    TranslationBlock *tb;
318
    int i;
319
    address &= TARGET_PAGE_MASK;
320
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
321
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
322
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
323
                  address >= tb->pc + tb->size)) {
324
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
325
                       address, tb->pc, tb->size);
326
            }
327
        }
328
    }
329
}
330

    
331
/* verify that all the pages have correct rights for code */
332
static void tb_page_check(void)
333
{
334
    TranslationBlock *tb;
335
    int i, flags1, flags2;
336
    
337
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
338
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
339
            flags1 = page_get_flags(tb->pc);
340
            flags2 = page_get_flags(tb->pc + tb->size - 1);
341
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
342
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
343
                       tb->pc, tb->size, flags1, flags2);
344
            }
345
        }
346
    }
347
}
348

    
349
void tb_jmp_check(TranslationBlock *tb)
350
{
351
    TranslationBlock *tb1;
352
    unsigned int n1;
353

    
354
    /* suppress any remaining jumps to this TB */
355
    tb1 = tb->jmp_first;
356
    for(;;) {
357
        n1 = (long)tb1 & 3;
358
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
359
        if (n1 == 2)
360
            break;
361
        tb1 = tb1->jmp_next[n1];
362
    }
363
    /* check end of list */
364
    if (tb1 != tb) {
365
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
366
    }
367
}
368

    
369
#endif
370

    
371
/* invalidate one TB */
372
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
373
                             int next_offset)
374
{
375
    TranslationBlock *tb1;
376
    for(;;) {
377
        tb1 = *ptb;
378
        if (tb1 == tb) {
379
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
380
            break;
381
        }
382
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
383
    }
384
}
385

    
386
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
387
{
388
    TranslationBlock *tb1;
389
    unsigned int n1;
390

    
391
    for(;;) {
392
        tb1 = *ptb;
393
        n1 = (long)tb1 & 3;
394
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
395
        if (tb1 == tb) {
396
            *ptb = tb1->page_next[n1];
397
            break;
398
        }
399
        ptb = &tb1->page_next[n1];
400
    }
401
}
402

    
403
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
404
{
405
    TranslationBlock *tb1, **ptb;
406
    unsigned int n1;
407

    
408
    ptb = &tb->jmp_next[n];
409
    tb1 = *ptb;
410
    if (tb1) {
411
        /* find tb(n) in circular list */
412
        for(;;) {
413
            tb1 = *ptb;
414
            n1 = (long)tb1 & 3;
415
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
416
            if (n1 == n && tb1 == tb)
417
                break;
418
            if (n1 == 2) {
419
                ptb = &tb1->jmp_first;
420
            } else {
421
                ptb = &tb1->jmp_next[n1];
422
            }
423
        }
424
        /* now we can suppress tb(n) from the list */
425
        *ptb = tb->jmp_next[n];
426

    
427
        tb->jmp_next[n] = NULL;
428
    }
429
}
430

    
431
/* reset the jump entry 'n' of a TB so that it is not chained to
432
   another TB */
433
static inline void tb_reset_jump(TranslationBlock *tb, int n)
434
{
435
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
436
}
437

    
438
static inline void tb_invalidate(TranslationBlock *tb)
439
{
440
    unsigned int h, n1;
441
    TranslationBlock *tb1, *tb2, **ptb;
442
    
443
    tb_invalidated_flag = 1;
444

    
445
    /* remove the TB from the hash list */
446
    h = tb_hash_func(tb->pc);
447
    ptb = &tb_hash[h];
448
    for(;;) {
449
        tb1 = *ptb;
450
        /* NOTE: the TB is not necessarily linked in the hash. It
451
           indicates that it is not currently used */
452
        if (tb1 == NULL)
453
            return;
454
        if (tb1 == tb) {
455
            *ptb = tb1->hash_next;
456
            break;
457
        }
458
        ptb = &tb1->hash_next;
459
    }
460

    
461
    /* suppress this TB from the two jump lists */
462
    tb_jmp_remove(tb, 0);
463
    tb_jmp_remove(tb, 1);
464

    
465
    /* suppress any remaining jumps to this TB */
466
    tb1 = tb->jmp_first;
467
    for(;;) {
468
        n1 = (long)tb1 & 3;
469
        if (n1 == 2)
470
            break;
471
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
472
        tb2 = tb1->jmp_next[n1];
473
        tb_reset_jump(tb1, n1);
474
        tb1->jmp_next[n1] = NULL;
475
        tb1 = tb2;
476
    }
477
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
478
}
479

    
480
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
481
{
482
    PageDesc *p;
483
    unsigned int h;
484
    target_ulong phys_pc;
485
    
486
    /* remove the TB from the hash list */
487
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
488
    h = tb_phys_hash_func(phys_pc);
489
    tb_remove(&tb_phys_hash[h], tb, 
490
              offsetof(TranslationBlock, phys_hash_next));
491

    
492
    /* remove the TB from the page list */
493
    if (tb->page_addr[0] != page_addr) {
494
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
495
        tb_page_remove(&p->first_tb, tb);
496
        invalidate_page_bitmap(p);
497
    }
498
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
499
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
500
        tb_page_remove(&p->first_tb, tb);
501
        invalidate_page_bitmap(p);
502
    }
503

    
504
    tb_invalidate(tb);
505
}
506

    
507
static inline void set_bits(uint8_t *tab, int start, int len)
508
{
509
    int end, mask, end1;
510

    
511
    end = start + len;
512
    tab += start >> 3;
513
    mask = 0xff << (start & 7);
514
    if ((start & ~7) == (end & ~7)) {
515
        if (start < end) {
516
            mask &= ~(0xff << (end & 7));
517
            *tab |= mask;
518
        }
519
    } else {
520
        *tab++ |= mask;
521
        start = (start + 8) & ~7;
522
        end1 = end & ~7;
523
        while (start < end1) {
524
            *tab++ = 0xff;
525
            start += 8;
526
        }
527
        if (start < end) {
528
            mask = ~(0xff << (end & 7));
529
            *tab |= mask;
530
        }
531
    }
532
}
533

    
534
static void build_page_bitmap(PageDesc *p)
535
{
536
    int n, tb_start, tb_end;
537
    TranslationBlock *tb;
538
    
539
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
540
    if (!p->code_bitmap)
541
        return;
542
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
543

    
544
    tb = p->first_tb;
545
    while (tb != NULL) {
546
        n = (long)tb & 3;
547
        tb = (TranslationBlock *)((long)tb & ~3);
548
        /* NOTE: this is subtle as a TB may span two physical pages */
549
        if (n == 0) {
550
            /* NOTE: tb_end may be after the end of the page, but
551
               it is not a problem */
552
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
553
            tb_end = tb_start + tb->size;
554
            if (tb_end > TARGET_PAGE_SIZE)
555
                tb_end = TARGET_PAGE_SIZE;
556
        } else {
557
            tb_start = 0;
558
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
559
        }
560
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
561
        tb = tb->page_next[n];
562
    }
563
}
564

    
565
#ifdef TARGET_HAS_PRECISE_SMC
566

    
567
static void tb_gen_code(CPUState *env, 
568
                        target_ulong pc, target_ulong cs_base, int flags,
569
                        int cflags)
570
{
571
    TranslationBlock *tb;
572
    uint8_t *tc_ptr;
573
    target_ulong phys_pc, phys_page2, virt_page2;
574
    int code_gen_size;
575

    
576
    phys_pc = get_phys_addr_code(env, (unsigned long)pc);
577
    tb = tb_alloc((unsigned long)pc);
578
    if (!tb) {
579
        /* flush must be done */
580
        tb_flush(env);
581
        /* cannot fail at this point */
582
        tb = tb_alloc((unsigned long)pc);
583
    }
584
    tc_ptr = code_gen_ptr;
585
    tb->tc_ptr = tc_ptr;
586
    tb->cs_base = cs_base;
587
    tb->flags = flags;
588
    tb->cflags = cflags;
589
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
590
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
591
    
592
    /* check next page if needed */
593
    virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
594
    phys_page2 = -1;
595
    if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
596
        phys_page2 = get_phys_addr_code(env, virt_page2);
597
    }
598
    tb_link_phys(tb, phys_pc, phys_page2);
599
}
600
#endif
601
    
602
/* invalidate all TBs which intersect with the target physical page
603
   starting in range [start;end[. NOTE: start and end must refer to
604
   the same physical page. 'is_cpu_write_access' should be true if called
605
   from a real cpu write access: the virtual CPU will exit the current
606
   TB if code is modified inside this TB. */
607
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
608
                                   int is_cpu_write_access)
609
{
610
    int n, current_tb_modified, current_tb_not_found, current_flags;
611
#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
612
    CPUState *env = cpu_single_env;
613
#endif
614
    PageDesc *p;
615
    TranslationBlock *tb, *tb_next, *current_tb;
616
    target_ulong tb_start, tb_end;
617
    target_ulong current_pc, current_cs_base;
618

    
619
    p = page_find(start >> TARGET_PAGE_BITS);
620
    if (!p) 
621
        return;
622
    if (!p->code_bitmap && 
623
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
624
        is_cpu_write_access) {
625
        /* build code bitmap */
626
        build_page_bitmap(p);
627
    }
628

    
629
    /* we remove all the TBs in the range [start, end[ */
630
    /* XXX: see if in some cases it could be faster to invalidate all the code */
631
    current_tb_not_found = is_cpu_write_access;
632
    current_tb_modified = 0;
633
    current_tb = NULL; /* avoid warning */
634
    current_pc = 0; /* avoid warning */
635
    current_cs_base = 0; /* avoid warning */
636
    current_flags = 0; /* avoid warning */
637
    tb = p->first_tb;
638
    while (tb != NULL) {
639
        n = (long)tb & 3;
640
        tb = (TranslationBlock *)((long)tb & ~3);
641
        tb_next = tb->page_next[n];
642
        /* NOTE: this is subtle as a TB may span two physical pages */
643
        if (n == 0) {
644
            /* NOTE: tb_end may be after the end of the page, but
645
               it is not a problem */
646
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
647
            tb_end = tb_start + tb->size;
648
        } else {
649
            tb_start = tb->page_addr[1];
650
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
651
        }
652
        if (!(tb_end <= start || tb_start >= end)) {
653
#ifdef TARGET_HAS_PRECISE_SMC
654
            if (current_tb_not_found) {
655
                current_tb_not_found = 0;
656
                current_tb = NULL;
657
                if (env->mem_write_pc) {
658
                    /* now we have a real cpu fault */
659
                    current_tb = tb_find_pc(env->mem_write_pc);
660
                }
661
            }
662
            if (current_tb == tb &&
663
                !(current_tb->cflags & CF_SINGLE_INSN)) {
664
                /* If we are modifying the current TB, we must stop
665
                its execution. We could be more precise by checking
666
                that the modification is after the current PC, but it
667
                would require a specialized function to partially
668
                restore the CPU state */
669
                
670
                current_tb_modified = 1;
671
                cpu_restore_state(current_tb, env, 
672
                                  env->mem_write_pc, NULL);
673
#if defined(TARGET_I386)
674
                current_flags = env->hflags;
675
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
676
                current_cs_base = (target_ulong)env->segs[R_CS].base;
677
                current_pc = current_cs_base + env->eip;
678
#else
679
#error unsupported CPU
680
#endif
681
            }
682
#endif /* TARGET_HAS_PRECISE_SMC */
683
            tb_phys_invalidate(tb, -1);
684
        }
685
        tb = tb_next;
686
    }
687
#if !defined(CONFIG_USER_ONLY)
688
    /* if no code remaining, no need to continue to use slow writes */
689
    if (!p->first_tb) {
690
        invalidate_page_bitmap(p);
691
        if (is_cpu_write_access) {
692
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
693
        }
694
    }
695
#endif
696
#ifdef TARGET_HAS_PRECISE_SMC
697
    if (current_tb_modified) {
698
        /* we generate a block containing just the instruction
699
           modifying the memory. It will ensure that it cannot modify
700
           itself */
701
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
702
                    CF_SINGLE_INSN);
703
        cpu_resume_from_signal(env, NULL);
704
    }
705
#endif
706
}
707

    
708
/* len must be <= 8 and start must be a multiple of len */
709
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
710
{
711
    PageDesc *p;
712
    int offset, b;
713
#if 0
714
    if (cpu_single_env->cr[0] & CR0_PE_MASK) {
715
        printf("modifying code at 0x%x size=%d EIP=%x\n", 
716
               (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len, 
717
               cpu_single_env->eip);
718
    }
719
#endif
720
    p = page_find(start >> TARGET_PAGE_BITS);
721
    if (!p) 
722
        return;
723
    if (p->code_bitmap) {
724
        offset = start & ~TARGET_PAGE_MASK;
725
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
726
        if (b & ((1 << len) - 1))
727
            goto do_invalidate;
728
    } else {
729
    do_invalidate:
730
        tb_invalidate_phys_page_range(start, start + len, 1);
731
    }
732
}
733

    
734
#if !defined(CONFIG_SOFTMMU)
735
static void tb_invalidate_phys_page(target_ulong addr, 
736
                                    unsigned long pc, void *puc)
737
{
738
    int n, current_flags, current_tb_modified;
739
    target_ulong current_pc, current_cs_base;
740
    PageDesc *p;
741
    TranslationBlock *tb, *current_tb;
742
#ifdef TARGET_HAS_PRECISE_SMC
743
    CPUState *env = cpu_single_env;
744
#endif
745

    
746
    addr &= TARGET_PAGE_MASK;
747
    p = page_find(addr >> TARGET_PAGE_BITS);
748
    if (!p) 
749
        return;
750
    tb = p->first_tb;
751
    current_tb_modified = 0;
752
    current_tb = NULL;
753
    current_pc = 0; /* avoid warning */
754
    current_cs_base = 0; /* avoid warning */
755
    current_flags = 0; /* avoid warning */
756
#ifdef TARGET_HAS_PRECISE_SMC
757
    if (tb && pc != 0) {
758
        current_tb = tb_find_pc(pc);
759
    }
760
#endif
761
    while (tb != NULL) {
762
        n = (long)tb & 3;
763
        tb = (TranslationBlock *)((long)tb & ~3);
764
#ifdef TARGET_HAS_PRECISE_SMC
765
        if (current_tb == tb &&
766
            !(current_tb->cflags & CF_SINGLE_INSN)) {
767
                /* If we are modifying the current TB, we must stop
768
                   its execution. We could be more precise by checking
769
                   that the modification is after the current PC, but it
770
                   would require a specialized function to partially
771
                   restore the CPU state */
772
            
773
            current_tb_modified = 1;
774
            cpu_restore_state(current_tb, env, pc, puc);
775
#if defined(TARGET_I386)
776
            current_flags = env->hflags;
777
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
778
            current_cs_base = (target_ulong)env->segs[R_CS].base;
779
            current_pc = current_cs_base + env->eip;
780
#else
781
#error unsupported CPU
782
#endif
783
        }
784
#endif /* TARGET_HAS_PRECISE_SMC */
785
        tb_phys_invalidate(tb, addr);
786
        tb = tb->page_next[n];
787
    }
788
    p->first_tb = NULL;
789
#ifdef TARGET_HAS_PRECISE_SMC
790
    if (current_tb_modified) {
791
        /* we generate a block containing just the instruction
792
           modifying the memory. It will ensure that it cannot modify
793
           itself */
794
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
795
                    CF_SINGLE_INSN);
796
        cpu_resume_from_signal(env, puc);
797
    }
798
#endif
799
}
800
#endif
801

    
802
/* add the tb in the target page and protect it if necessary */
803
static inline void tb_alloc_page(TranslationBlock *tb, 
804
                                 unsigned int n, unsigned int page_addr)
805
{
806
    PageDesc *p;
807
    TranslationBlock *last_first_tb;
808

    
809
    tb->page_addr[n] = page_addr;
810
    p = page_find(page_addr >> TARGET_PAGE_BITS);
811
    tb->page_next[n] = p->first_tb;
812
    last_first_tb = p->first_tb;
813
    p->first_tb = (TranslationBlock *)((long)tb | n);
814
    invalidate_page_bitmap(p);
815

    
816
#ifdef TARGET_HAS_SMC
817

    
818
#if defined(CONFIG_USER_ONLY)
819
    if (p->flags & PAGE_WRITE) {
820
        unsigned long host_start, host_end, addr;
821
        int prot;
822

    
823
        /* force the host page as non writable (writes will have a
824
           page fault + mprotect overhead) */
825
        host_start = page_addr & host_page_mask;
826
        host_end = host_start + host_page_size;
827
        prot = 0;
828
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
829
            prot |= page_get_flags(addr);
830
        mprotect((void *)host_start, host_page_size, 
831
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
832
#ifdef DEBUG_TB_INVALIDATE
833
        printf("protecting code page: 0x%08lx\n", 
834
               host_start);
835
#endif
836
        p->flags &= ~PAGE_WRITE;
837
    }
838
#else
839
    /* if some code is already present, then the pages are already
840
       protected. So we handle the case where only the first TB is
841
       allocated in a physical page */
842
    if (!last_first_tb) {
843
        target_ulong virt_addr;
844

    
845
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
846
        tlb_protect_code(cpu_single_env, virt_addr);        
847
    }
848
#endif
849

    
850
#endif /* TARGET_HAS_SMC */
851
}
852

    
853
/* Allocate a new translation block. Flush the translation buffer if
854
   too many translation blocks or too much generated code. */
855
TranslationBlock *tb_alloc(unsigned long pc)
856
{
857
    TranslationBlock *tb;
858

    
859
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
860
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
861
        return NULL;
862
    tb = &tbs[nb_tbs++];
863
    tb->pc = pc;
864
    tb->cflags = 0;
865
    return tb;
866
}
867

    
868
/* add a new TB and link it to the physical page tables. phys_page2 is
869
   (-1) to indicate that only one page contains the TB. */
870
void tb_link_phys(TranslationBlock *tb, 
871
                  target_ulong phys_pc, target_ulong phys_page2)
872
{
873
    unsigned int h;
874
    TranslationBlock **ptb;
875

    
876
    /* add in the physical hash table */
877
    h = tb_phys_hash_func(phys_pc);
878
    ptb = &tb_phys_hash[h];
879
    tb->phys_hash_next = *ptb;
880
    *ptb = tb;
881

    
882
    /* add in the page list */
883
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
884
    if (phys_page2 != -1)
885
        tb_alloc_page(tb, 1, phys_page2);
886
    else
887
        tb->page_addr[1] = -1;
888
#ifdef DEBUG_TB_CHECK
889
    tb_page_check();
890
#endif
891
}
892

    
893
/* link the tb with the other TBs */
894
void tb_link(TranslationBlock *tb)
895
{
896
#if !defined(CONFIG_USER_ONLY)
897
    {
898
        VirtPageDesc *vp;
899
        target_ulong addr;
900
        
901
        /* save the code memory mappings (needed to invalidate the code) */
902
        addr = tb->pc & TARGET_PAGE_MASK;
903
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
904
#ifdef DEBUG_TLB_CHECK 
905
        if (vp->valid_tag == virt_valid_tag &&
906
            vp->phys_addr != tb->page_addr[0]) {
907
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
908
                   addr, tb->page_addr[0], vp->phys_addr);
909
        }
910
#endif
911
        vp->phys_addr = tb->page_addr[0];
912
        if (vp->valid_tag != virt_valid_tag) {
913
            vp->valid_tag = virt_valid_tag;
914
#if !defined(CONFIG_SOFTMMU)
915
            vp->prot = 0;
916
#endif
917
        }
918
        
919
        if (tb->page_addr[1] != -1) {
920
            addr += TARGET_PAGE_SIZE;
921
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
922
#ifdef DEBUG_TLB_CHECK 
923
            if (vp->valid_tag == virt_valid_tag &&
924
                vp->phys_addr != tb->page_addr[1]) { 
925
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
926
                       addr, tb->page_addr[1], vp->phys_addr);
927
            }
928
#endif
929
            vp->phys_addr = tb->page_addr[1];
930
            if (vp->valid_tag != virt_valid_tag) {
931
                vp->valid_tag = virt_valid_tag;
932
#if !defined(CONFIG_SOFTMMU)
933
                vp->prot = 0;
934
#endif
935
            }
936
        }
937
    }
938
#endif
939

    
940
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
941
    tb->jmp_next[0] = NULL;
942
    tb->jmp_next[1] = NULL;
943
#ifdef USE_CODE_COPY
944
    tb->cflags &= ~CF_FP_USED;
945
    if (tb->cflags & CF_TB_FP_USED)
946
        tb->cflags |= CF_FP_USED;
947
#endif
948

    
949
    /* init original jump addresses */
950
    if (tb->tb_next_offset[0] != 0xffff)
951
        tb_reset_jump(tb, 0);
952
    if (tb->tb_next_offset[1] != 0xffff)
953
        tb_reset_jump(tb, 1);
954
}
955

    
956
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
957
   tb[1].tc_ptr. Return NULL if not found */
958
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
959
{
960
    int m_min, m_max, m;
961
    unsigned long v;
962
    TranslationBlock *tb;
963

    
964
    if (nb_tbs <= 0)
965
        return NULL;
966
    if (tc_ptr < (unsigned long)code_gen_buffer ||
967
        tc_ptr >= (unsigned long)code_gen_ptr)
968
        return NULL;
969
    /* binary search (cf Knuth) */
970
    m_min = 0;
971
    m_max = nb_tbs - 1;
972
    while (m_min <= m_max) {
973
        m = (m_min + m_max) >> 1;
974
        tb = &tbs[m];
975
        v = (unsigned long)tb->tc_ptr;
976
        if (v == tc_ptr)
977
            return tb;
978
        else if (tc_ptr < v) {
979
            m_max = m - 1;
980
        } else {
981
            m_min = m + 1;
982
        }
983
    } 
984
    return &tbs[m_max];
985
}
986

    
987
static void tb_reset_jump_recursive(TranslationBlock *tb);
988

    
989
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
990
{
991
    TranslationBlock *tb1, *tb_next, **ptb;
992
    unsigned int n1;
993

    
994
    tb1 = tb->jmp_next[n];
995
    if (tb1 != NULL) {
996
        /* find head of list */
997
        for(;;) {
998
            n1 = (long)tb1 & 3;
999
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1000
            if (n1 == 2)
1001
                break;
1002
            tb1 = tb1->jmp_next[n1];
1003
        }
1004
        /* we are now sure now that tb jumps to tb1 */
1005
        tb_next = tb1;
1006

    
1007
        /* remove tb from the jmp_first list */
1008
        ptb = &tb_next->jmp_first;
1009
        for(;;) {
1010
            tb1 = *ptb;
1011
            n1 = (long)tb1 & 3;
1012
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1013
            if (n1 == n && tb1 == tb)
1014
                break;
1015
            ptb = &tb1->jmp_next[n1];
1016
        }
1017
        *ptb = tb->jmp_next[n];
1018
        tb->jmp_next[n] = NULL;
1019
        
1020
        /* suppress the jump to next tb in generated code */
1021
        tb_reset_jump(tb, n);
1022

    
1023
        /* suppress jumps in the tb on which we could have jumped */
1024
        tb_reset_jump_recursive(tb_next);
1025
    }
1026
}
1027

    
1028
static void tb_reset_jump_recursive(TranslationBlock *tb)
1029
{
1030
    tb_reset_jump_recursive2(tb, 0);
1031
    tb_reset_jump_recursive2(tb, 1);
1032
}
1033

    
1034
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1035
{
1036
    target_ulong phys_addr;
1037

    
1038
    phys_addr = cpu_get_phys_page_debug(env, pc);
1039
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1040
}
1041

    
1042
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1043
   breakpoint is reached */
1044
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1045
{
1046
#if defined(TARGET_I386) || defined(TARGET_PPC)
1047
    int i;
1048
    
1049
    for(i = 0; i < env->nb_breakpoints; i++) {
1050
        if (env->breakpoints[i] == pc)
1051
            return 0;
1052
    }
1053

    
1054
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1055
        return -1;
1056
    env->breakpoints[env->nb_breakpoints++] = pc;
1057
    
1058
    breakpoint_invalidate(env, pc);
1059
    return 0;
1060
#else
1061
    return -1;
1062
#endif
1063
}
1064

    
1065
/* remove a breakpoint */
1066
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1067
{
1068
#if defined(TARGET_I386) || defined(TARGET_PPC)
1069
    int i;
1070
    for(i = 0; i < env->nb_breakpoints; i++) {
1071
        if (env->breakpoints[i] == pc)
1072
            goto found;
1073
    }
1074
    return -1;
1075
 found:
1076
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1077
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1078
    env->nb_breakpoints--;
1079

    
1080
    breakpoint_invalidate(env, pc);
1081
    return 0;
1082
#else
1083
    return -1;
1084
#endif
1085
}
1086

    
1087
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1088
   CPU loop after each instruction */
1089
void cpu_single_step(CPUState *env, int enabled)
1090
{
1091
#if defined(TARGET_I386) || defined(TARGET_PPC)
1092
    if (env->singlestep_enabled != enabled) {
1093
        env->singlestep_enabled = enabled;
1094
        /* must flush all the translated code to avoid inconsistancies */
1095
        /* XXX: only flush what is necessary */
1096
        tb_flush(env);
1097
    }
1098
#endif
1099
}
1100

    
1101
/* enable or disable low levels log */
1102
void cpu_set_log(int log_flags)
1103
{
1104
    loglevel = log_flags;
1105
    if (loglevel && !logfile) {
1106
        logfile = fopen(logfilename, "w");
1107
        if (!logfile) {
1108
            perror(logfilename);
1109
            _exit(1);
1110
        }
1111
#if !defined(CONFIG_SOFTMMU)
1112
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1113
        {
1114
            static uint8_t logfile_buf[4096];
1115
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1116
        }
1117
#else
1118
        setvbuf(logfile, NULL, _IOLBF, 0);
1119
#endif
1120
    }
1121
}
1122

    
1123
void cpu_set_log_filename(const char *filename)
1124
{
1125
    logfilename = strdup(filename);
1126
}
1127

    
1128
/* mask must never be zero, except for A20 change call */
1129
void cpu_interrupt(CPUState *env, int mask)
1130
{
1131
    TranslationBlock *tb;
1132
    static int interrupt_lock;
1133

    
1134
    env->interrupt_request |= mask;
1135
    /* if the cpu is currently executing code, we must unlink it and
1136
       all the potentially executing TB */
1137
    tb = env->current_tb;
1138
    if (tb && !testandset(&interrupt_lock)) {
1139
        env->current_tb = NULL;
1140
        tb_reset_jump_recursive(tb);
1141
        interrupt_lock = 0;
1142
    }
1143
}
1144

    
1145
void cpu_reset_interrupt(CPUState *env, int mask)
1146
{
1147
    env->interrupt_request &= ~mask;
1148
}
1149

    
1150
CPULogItem cpu_log_items[] = {
1151
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1152
      "show generated host assembly code for each compiled TB" },
1153
    { CPU_LOG_TB_IN_ASM, "in_asm",
1154
      "show target assembly code for each compiled TB" },
1155
    { CPU_LOG_TB_OP, "op", 
1156
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1157
#ifdef TARGET_I386
1158
    { CPU_LOG_TB_OP_OPT, "op_opt",
1159
      "show micro ops after optimization for each compiled TB" },
1160
#endif
1161
    { CPU_LOG_INT, "int",
1162
      "show interrupts/exceptions in short format" },
1163
    { CPU_LOG_EXEC, "exec",
1164
      "show trace before each executed TB (lots of logs)" },
1165
    { CPU_LOG_TB_CPU, "cpu",
1166
      "show CPU state before bloc translation" },
1167
#ifdef TARGET_I386
1168
    { CPU_LOG_PCALL, "pcall",
1169
      "show protected mode far calls/returns/exceptions" },
1170
#endif
1171
    { CPU_LOG_IOPORT, "ioport",
1172
      "show all i/o ports accesses" },
1173
    { 0, NULL, NULL },
1174
};
1175

    
1176
static int cmp1(const char *s1, int n, const char *s2)
1177
{
1178
    if (strlen(s2) != n)
1179
        return 0;
1180
    return memcmp(s1, s2, n) == 0;
1181
}
1182
      
1183
/* takes a comma separated list of log masks. Return 0 if error. */
1184
int cpu_str_to_log_mask(const char *str)
1185
{
1186
    CPULogItem *item;
1187
    int mask;
1188
    const char *p, *p1;
1189

    
1190
    p = str;
1191
    mask = 0;
1192
    for(;;) {
1193
        p1 = strchr(p, ',');
1194
        if (!p1)
1195
            p1 = p + strlen(p);
1196
        for(item = cpu_log_items; item->mask != 0; item++) {
1197
            if (cmp1(p, p1 - p, item->name))
1198
                goto found;
1199
        }
1200
        return 0;
1201
    found:
1202
        mask |= item->mask;
1203
        if (*p1 != ',')
1204
            break;
1205
        p = p1 + 1;
1206
    }
1207
    return mask;
1208
}
1209

    
1210
void cpu_abort(CPUState *env, const char *fmt, ...)
1211
{
1212
    va_list ap;
1213

    
1214
    va_start(ap, fmt);
1215
    fprintf(stderr, "qemu: fatal: ");
1216
    vfprintf(stderr, fmt, ap);
1217
    fprintf(stderr, "\n");
1218
#ifdef TARGET_I386
1219
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1220
#endif
1221
    va_end(ap);
1222
    abort();
1223
}
1224

    
1225
#if !defined(CONFIG_USER_ONLY)
1226

    
1227
/* NOTE: if flush_global is true, also flush global entries (not
1228
   implemented yet) */
1229
void tlb_flush(CPUState *env, int flush_global)
1230
{
1231
    int i;
1232

    
1233
#if defined(DEBUG_TLB)
1234
    printf("tlb_flush:\n");
1235
#endif
1236
    /* must reset current TB so that interrupts cannot modify the
1237
       links while we are modifying them */
1238
    env->current_tb = NULL;
1239

    
1240
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1241
        env->tlb_read[0][i].address = -1;
1242
        env->tlb_write[0][i].address = -1;
1243
        env->tlb_read[1][i].address = -1;
1244
        env->tlb_write[1][i].address = -1;
1245
    }
1246

    
1247
    virt_page_flush();
1248
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1249
        tb_hash[i] = NULL;
1250

    
1251
#if !defined(CONFIG_SOFTMMU)
1252
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1253
#endif
1254
}
1255

    
1256
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1257
{
1258
    if (addr == (tlb_entry->address & 
1259
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1260
        tlb_entry->address = -1;
1261
}
1262

    
1263
void tlb_flush_page(CPUState *env, target_ulong addr)
1264
{
1265
    int i, n;
1266
    VirtPageDesc *vp;
1267
    PageDesc *p;
1268
    TranslationBlock *tb;
1269

    
1270
#if defined(DEBUG_TLB)
1271
    printf("tlb_flush_page: 0x%08x\n", addr);
1272
#endif
1273
    /* must reset current TB so that interrupts cannot modify the
1274
       links while we are modifying them */
1275
    env->current_tb = NULL;
1276

    
1277
    addr &= TARGET_PAGE_MASK;
1278
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1279
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1280
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1281
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1282
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1283

    
1284
    /* remove from the virtual pc hash table all the TB at this
1285
       virtual address */
1286
    
1287
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1288
    if (vp && vp->valid_tag == virt_valid_tag) {
1289
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1290
        if (p) {
1291
            /* we remove all the links to the TBs in this virtual page */
1292
            tb = p->first_tb;
1293
            while (tb != NULL) {
1294
                n = (long)tb & 3;
1295
                tb = (TranslationBlock *)((long)tb & ~3);
1296
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1297
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1298
                    tb_invalidate(tb);
1299
                }
1300
                tb = tb->page_next[n];
1301
            }
1302
        }
1303
        vp->valid_tag = 0;
1304
    }
1305

    
1306
#if !defined(CONFIG_SOFTMMU)
1307
    if (addr < MMAP_AREA_END)
1308
        munmap((void *)addr, TARGET_PAGE_SIZE);
1309
#endif
1310
}
1311

    
1312
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1313
{
1314
    if (addr == (tlb_entry->address & 
1315
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1316
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1317
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1318
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1319
    }
1320
}
1321

    
1322
/* update the TLBs so that writes to code in the virtual page 'addr'
1323
   can be detected */
1324
static void tlb_protect_code(CPUState *env, target_ulong addr)
1325
{
1326
    int i;
1327

    
1328
    addr &= TARGET_PAGE_MASK;
1329
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1330
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1331
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1332
#if !defined(CONFIG_SOFTMMU)
1333
    /* NOTE: as we generated the code for this page, it is already at
1334
       least readable */
1335
    if (addr < MMAP_AREA_END)
1336
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1337
#endif
1338
}
1339

    
1340
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1341
                                       unsigned long phys_addr)
1342
{
1343
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1344
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1345
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1346
    }
1347
}
1348

    
1349
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1350
   tested self modifying code */
1351
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1352
{
1353
    int i;
1354

    
1355
    phys_addr &= TARGET_PAGE_MASK;
1356
    phys_addr += (long)phys_ram_base;
1357
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1358
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1359
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1360
}
1361

    
1362
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1363
                                         unsigned long start, unsigned long length)
1364
{
1365
    unsigned long addr;
1366
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1367
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1368
        if ((addr - start) < length) {
1369
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1370
        }
1371
    }
1372
}
1373

    
1374
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1375
{
1376
    CPUState *env;
1377
    unsigned long length, start1;
1378
    int i;
1379

    
1380
    start &= TARGET_PAGE_MASK;
1381
    end = TARGET_PAGE_ALIGN(end);
1382

    
1383
    length = end - start;
1384
    if (length == 0)
1385
        return;
1386
    memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1387

    
1388
    env = cpu_single_env;
1389
    /* we modify the TLB cache so that the dirty bit will be set again
1390
       when accessing the range */
1391
    start1 = start + (unsigned long)phys_ram_base;
1392
    for(i = 0; i < CPU_TLB_SIZE; i++)
1393
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1394
    for(i = 0; i < CPU_TLB_SIZE; i++)
1395
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1396

    
1397
#if !defined(CONFIG_SOFTMMU)
1398
    /* XXX: this is expensive */
1399
    {
1400
        VirtPageDesc *p;
1401
        int j;
1402
        target_ulong addr;
1403

    
1404
        for(i = 0; i < L1_SIZE; i++) {
1405
            p = l1_virt_map[i];
1406
            if (p) {
1407
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1408
                for(j = 0; j < L2_SIZE; j++) {
1409
                    if (p->valid_tag == virt_valid_tag &&
1410
                        p->phys_addr >= start && p->phys_addr < end &&
1411
                        (p->prot & PROT_WRITE)) {
1412
                        if (addr < MMAP_AREA_END) {
1413
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1414
                                     p->prot & ~PROT_WRITE);
1415
                        }
1416
                    }
1417
                    addr += TARGET_PAGE_SIZE;
1418
                    p++;
1419
                }
1420
            }
1421
        }
1422
    }
1423
#endif
1424
}
1425

    
1426
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1427
                                    unsigned long start)
1428
{
1429
    unsigned long addr;
1430
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1431
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1432
        if (addr == start) {
1433
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1434
        }
1435
    }
1436
}
1437

    
1438
/* update the TLB corresponding to virtual page vaddr and phys addr
1439
   addr so that it is no longer dirty */
1440
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1441
{
1442
    CPUState *env = cpu_single_env;
1443
    int i;
1444

    
1445
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1446

    
1447
    addr &= TARGET_PAGE_MASK;
1448
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1449
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1450
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1451
}
1452

    
1453
/* add a new TLB entry. At most one entry for a given virtual address
1454
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1455
   (can only happen in non SOFTMMU mode for I/O pages or pages
1456
   conflicting with the host address space). */
1457
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1458
                 target_phys_addr_t paddr, int prot, 
1459
                 int is_user, int is_softmmu)
1460
{
1461
    PhysPageDesc *p;
1462
    unsigned long pd;
1463
    TranslationBlock *first_tb;
1464
    unsigned int index;
1465
    target_ulong address;
1466
    unsigned long addend;
1467
    int ret;
1468

    
1469
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1470
    first_tb = NULL;
1471
    if (!p) {
1472
        pd = IO_MEM_UNASSIGNED;
1473
    } else {
1474
        PageDesc *p1;
1475
        pd = p->phys_offset;
1476
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1477
            /* NOTE: we also allocate the page at this stage */
1478
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1479
            first_tb = p1->first_tb;
1480
        }
1481
    }
1482
#if defined(DEBUG_TLB)
1483
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1484
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1485
#endif
1486

    
1487
    ret = 0;
1488
#if !defined(CONFIG_SOFTMMU)
1489
    if (is_softmmu) 
1490
#endif
1491
    {
1492
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1493
            /* IO memory case */
1494
            address = vaddr | pd;
1495
            addend = paddr;
1496
        } else {
1497
            /* standard memory */
1498
            address = vaddr;
1499
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1500
        }
1501
        
1502
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1503
        addend -= vaddr;
1504
        if (prot & PAGE_READ) {
1505
            env->tlb_read[is_user][index].address = address;
1506
            env->tlb_read[is_user][index].addend = addend;
1507
        } else {
1508
            env->tlb_read[is_user][index].address = -1;
1509
            env->tlb_read[is_user][index].addend = -1;
1510
        }
1511
        if (prot & PAGE_WRITE) {
1512
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1513
                /* ROM: access is ignored (same as unassigned) */
1514
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1515
                env->tlb_write[is_user][index].addend = addend;
1516
            } else 
1517
                /* XXX: the PowerPC code seems not ready to handle
1518
                   self modifying code with DCBI */
1519
#if defined(TARGET_HAS_SMC) || 1
1520
            if (first_tb) {
1521
                /* if code is present, we use a specific memory
1522
                   handler. It works only for physical memory access */
1523
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1524
                env->tlb_write[is_user][index].addend = addend;
1525
            } else 
1526
#endif
1527
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1528
                       !cpu_physical_memory_is_dirty(pd)) {
1529
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1530
                env->tlb_write[is_user][index].addend = addend;
1531
            } else {
1532
                env->tlb_write[is_user][index].address = address;
1533
                env->tlb_write[is_user][index].addend = addend;
1534
            }
1535
        } else {
1536
            env->tlb_write[is_user][index].address = -1;
1537
            env->tlb_write[is_user][index].addend = -1;
1538
        }
1539
    }
1540
#if !defined(CONFIG_SOFTMMU)
1541
    else {
1542
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1543
            /* IO access: no mapping is done as it will be handled by the
1544
               soft MMU */
1545
            if (!(env->hflags & HF_SOFTMMU_MASK))
1546
                ret = 2;
1547
        } else {
1548
            void *map_addr;
1549

    
1550
            if (vaddr >= MMAP_AREA_END) {
1551
                ret = 2;
1552
            } else {
1553
                if (prot & PROT_WRITE) {
1554
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1555
#if defined(TARGET_HAS_SMC) || 1
1556
                        first_tb ||
1557
#endif
1558
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1559
                         !cpu_physical_memory_is_dirty(pd))) {
1560
                        /* ROM: we do as if code was inside */
1561
                        /* if code is present, we only map as read only and save the
1562
                           original mapping */
1563
                        VirtPageDesc *vp;
1564
                        
1565
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1566
                        vp->phys_addr = pd;
1567
                        vp->prot = prot;
1568
                        vp->valid_tag = virt_valid_tag;
1569
                        prot &= ~PAGE_WRITE;
1570
                    }
1571
                }
1572
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1573
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1574
                if (map_addr == MAP_FAILED) {
1575
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1576
                              paddr, vaddr);
1577
                }
1578
            }
1579
        }
1580
    }
1581
#endif
1582
    return ret;
1583
}
1584

    
1585
/* called from signal handler: invalidate the code and unprotect the
1586
   page. Return TRUE if the fault was succesfully handled. */
1587
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1588
{
1589
#if !defined(CONFIG_SOFTMMU)
1590
    VirtPageDesc *vp;
1591

    
1592
#if defined(DEBUG_TLB)
1593
    printf("page_unprotect: addr=0x%08x\n", addr);
1594
#endif
1595
    addr &= TARGET_PAGE_MASK;
1596

    
1597
    /* if it is not mapped, no need to worry here */
1598
    if (addr >= MMAP_AREA_END)
1599
        return 0;
1600
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1601
    if (!vp)
1602
        return 0;
1603
    /* NOTE: in this case, validate_tag is _not_ tested as it
1604
       validates only the code TLB */
1605
    if (vp->valid_tag != virt_valid_tag)
1606
        return 0;
1607
    if (!(vp->prot & PAGE_WRITE))
1608
        return 0;
1609
#if defined(DEBUG_TLB)
1610
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1611
           addr, vp->phys_addr, vp->prot);
1612
#endif
1613
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1614
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1615
                  (unsigned long)addr, vp->prot);
1616
    /* set the dirty bit */
1617
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1618
    /* flush the code inside */
1619
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1620
    return 1;
1621
#else
1622
    return 0;
1623
#endif
1624
}
1625

    
1626
#else
1627

    
1628
void tlb_flush(CPUState *env, int flush_global)
1629
{
1630
}
1631

    
1632
void tlb_flush_page(CPUState *env, target_ulong addr)
1633
{
1634
}
1635

    
1636
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1637
                 target_phys_addr_t paddr, int prot, 
1638
                 int is_user, int is_softmmu)
1639
{
1640
    return 0;
1641
}
1642

    
1643
/* dump memory mappings */
1644
void page_dump(FILE *f)
1645
{
1646
    unsigned long start, end;
1647
    int i, j, prot, prot1;
1648
    PageDesc *p;
1649

    
1650
    fprintf(f, "%-8s %-8s %-8s %s\n",
1651
            "start", "end", "size", "prot");
1652
    start = -1;
1653
    end = -1;
1654
    prot = 0;
1655
    for(i = 0; i <= L1_SIZE; i++) {
1656
        if (i < L1_SIZE)
1657
            p = l1_map[i];
1658
        else
1659
            p = NULL;
1660
        for(j = 0;j < L2_SIZE; j++) {
1661
            if (!p)
1662
                prot1 = 0;
1663
            else
1664
                prot1 = p[j].flags;
1665
            if (prot1 != prot) {
1666
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1667
                if (start != -1) {
1668
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1669
                            start, end, end - start, 
1670
                            prot & PAGE_READ ? 'r' : '-',
1671
                            prot & PAGE_WRITE ? 'w' : '-',
1672
                            prot & PAGE_EXEC ? 'x' : '-');
1673
                }
1674
                if (prot1 != 0)
1675
                    start = end;
1676
                else
1677
                    start = -1;
1678
                prot = prot1;
1679
            }
1680
            if (!p)
1681
                break;
1682
        }
1683
    }
1684
}
1685

    
1686
int page_get_flags(unsigned long address)
1687
{
1688
    PageDesc *p;
1689

    
1690
    p = page_find(address >> TARGET_PAGE_BITS);
1691
    if (!p)
1692
        return 0;
1693
    return p->flags;
1694
}
1695

    
1696
/* modify the flags of a page and invalidate the code if
1697
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1698
   depending on PAGE_WRITE */
1699
void page_set_flags(unsigned long start, unsigned long end, int flags)
1700
{
1701
    PageDesc *p;
1702
    unsigned long addr;
1703

    
1704
    start = start & TARGET_PAGE_MASK;
1705
    end = TARGET_PAGE_ALIGN(end);
1706
    if (flags & PAGE_WRITE)
1707
        flags |= PAGE_WRITE_ORG;
1708
    spin_lock(&tb_lock);
1709
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1710
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1711
        /* if the write protection is set, then we invalidate the code
1712
           inside */
1713
        if (!(p->flags & PAGE_WRITE) && 
1714
            (flags & PAGE_WRITE) &&
1715
            p->first_tb) {
1716
            tb_invalidate_phys_page(addr, 0, NULL);
1717
        }
1718
        p->flags = flags;
1719
    }
1720
    spin_unlock(&tb_lock);
1721
}
1722

    
1723
/* called from signal handler: invalidate the code and unprotect the
1724
   page. Return TRUE if the fault was succesfully handled. */
1725
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1726
{
1727
    unsigned int page_index, prot, pindex;
1728
    PageDesc *p, *p1;
1729
    unsigned long host_start, host_end, addr;
1730

    
1731
    host_start = address & host_page_mask;
1732
    page_index = host_start >> TARGET_PAGE_BITS;
1733
    p1 = page_find(page_index);
1734
    if (!p1)
1735
        return 0;
1736
    host_end = host_start + host_page_size;
1737
    p = p1;
1738
    prot = 0;
1739
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1740
        prot |= p->flags;
1741
        p++;
1742
    }
1743
    /* if the page was really writable, then we change its
1744
       protection back to writable */
1745
    if (prot & PAGE_WRITE_ORG) {
1746
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1747
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1748
            mprotect((void *)host_start, host_page_size, 
1749
                     (prot & PAGE_BITS) | PAGE_WRITE);
1750
            p1[pindex].flags |= PAGE_WRITE;
1751
            /* and since the content will be modified, we must invalidate
1752
               the corresponding translated code. */
1753
            tb_invalidate_phys_page(address, pc, puc);
1754
#ifdef DEBUG_TB_CHECK
1755
            tb_invalidate_check(address);
1756
#endif
1757
            return 1;
1758
        }
1759
    }
1760
    return 0;
1761
}
1762

    
1763
/* call this function when system calls directly modify a memory area */
1764
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1765
{
1766
    unsigned long start, end, addr;
1767

    
1768
    start = (unsigned long)data;
1769
    end = start + data_size;
1770
    start &= TARGET_PAGE_MASK;
1771
    end = TARGET_PAGE_ALIGN(end);
1772
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1773
        page_unprotect(addr, 0, NULL);
1774
    }
1775
}
1776

    
1777
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1778
{
1779
}
1780
#endif /* defined(CONFIG_USER_ONLY) */
1781

    
1782
/* register physical memory. 'size' must be a multiple of the target
1783
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1784
   io memory page */
1785
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1786
                                  unsigned long size,
1787
                                  unsigned long phys_offset)
1788
{
1789
    unsigned long addr, end_addr;
1790
    PhysPageDesc *p;
1791

    
1792
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1793
    end_addr = start_addr + size;
1794
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1795
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1796
        p->phys_offset = phys_offset;
1797
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1798
            phys_offset += TARGET_PAGE_SIZE;
1799
    }
1800
}
1801

    
1802
static uint32_t unassigned_mem_readb(target_phys_addr_t addr)
1803
{
1804
    return 0;
1805
}
1806

    
1807
static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val)
1808
{
1809
}
1810

    
1811
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1812
    unassigned_mem_readb,
1813
    unassigned_mem_readb,
1814
    unassigned_mem_readb,
1815
};
1816

    
1817
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1818
    unassigned_mem_writeb,
1819
    unassigned_mem_writeb,
1820
    unassigned_mem_writeb,
1821
};
1822

    
1823
/* self modifying code support in soft mmu mode : writing to a page
1824
   containing code comes to these functions */
1825

    
1826
static void code_mem_writeb(target_phys_addr_t addr, uint32_t val)
1827
{
1828
    unsigned long phys_addr;
1829

    
1830
    phys_addr = addr - (unsigned long)phys_ram_base;
1831
#if !defined(CONFIG_USER_ONLY)
1832
    tb_invalidate_phys_page_fast(phys_addr, 1);
1833
#endif
1834
    stb_raw((uint8_t *)addr, val);
1835
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1836
}
1837

    
1838
static void code_mem_writew(target_phys_addr_t addr, uint32_t val)
1839
{
1840
    unsigned long phys_addr;
1841

    
1842
    phys_addr = addr - (unsigned long)phys_ram_base;
1843
#if !defined(CONFIG_USER_ONLY)
1844
    tb_invalidate_phys_page_fast(phys_addr, 2);
1845
#endif
1846
    stw_raw((uint8_t *)addr, val);
1847
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1848
}
1849

    
1850
static void code_mem_writel(target_phys_addr_t addr, uint32_t val)
1851
{
1852
    unsigned long phys_addr;
1853

    
1854
    phys_addr = addr - (unsigned long)phys_ram_base;
1855
#if !defined(CONFIG_USER_ONLY)
1856
    tb_invalidate_phys_page_fast(phys_addr, 4);
1857
#endif
1858
    stl_raw((uint8_t *)addr, val);
1859
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1860
}
1861

    
1862
static CPUReadMemoryFunc *code_mem_read[3] = {
1863
    NULL, /* never used */
1864
    NULL, /* never used */
1865
    NULL, /* never used */
1866
};
1867

    
1868
static CPUWriteMemoryFunc *code_mem_write[3] = {
1869
    code_mem_writeb,
1870
    code_mem_writew,
1871
    code_mem_writel,
1872
};
1873

    
1874
static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val)
1875
{
1876
    stb_raw((uint8_t *)addr, val);
1877
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1878
}
1879

    
1880
static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val)
1881
{
1882
    stw_raw((uint8_t *)addr, val);
1883
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1884
}
1885

    
1886
static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val)
1887
{
1888
    stl_raw((uint8_t *)addr, val);
1889
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1890
}
1891

    
1892
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1893
    notdirty_mem_writeb,
1894
    notdirty_mem_writew,
1895
    notdirty_mem_writel,
1896
};
1897

    
1898
static void io_mem_init(void)
1899
{
1900
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1901
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1902
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1903
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1904
    io_mem_nb = 5;
1905

    
1906
    /* alloc dirty bits array */
1907
    phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1908
}
1909

    
1910
/* mem_read and mem_write are arrays of functions containing the
1911
   function to access byte (index 0), word (index 1) and dword (index
1912
   2). All functions must be supplied. If io_index is non zero, the
1913
   corresponding io zone is modified. If it is zero, a new io zone is
1914
   allocated. The return value can be used with
1915
   cpu_register_physical_memory(). (-1) is returned if error. */
1916
int cpu_register_io_memory(int io_index,
1917
                           CPUReadMemoryFunc **mem_read,
1918
                           CPUWriteMemoryFunc **mem_write)
1919
{
1920
    int i;
1921

    
1922
    if (io_index <= 0) {
1923
        if (io_index >= IO_MEM_NB_ENTRIES)
1924
            return -1;
1925
        io_index = io_mem_nb++;
1926
    } else {
1927
        if (io_index >= IO_MEM_NB_ENTRIES)
1928
            return -1;
1929
    }
1930
    
1931
    for(i = 0;i < 3; i++) {
1932
        io_mem_read[io_index][i] = mem_read[i];
1933
        io_mem_write[io_index][i] = mem_write[i];
1934
    }
1935
    return io_index << IO_MEM_SHIFT;
1936
}
1937

    
1938
/* physical memory access (slow version, mainly for debug) */
1939
#if defined(CONFIG_USER_ONLY)
1940
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1941
                            int len, int is_write)
1942
{
1943
    int l, flags;
1944
    target_ulong page;
1945

    
1946
    while (len > 0) {
1947
        page = addr & TARGET_PAGE_MASK;
1948
        l = (page + TARGET_PAGE_SIZE) - addr;
1949
        if (l > len)
1950
            l = len;
1951
        flags = page_get_flags(page);
1952
        if (!(flags & PAGE_VALID))
1953
            return;
1954
        if (is_write) {
1955
            if (!(flags & PAGE_WRITE))
1956
                return;
1957
            memcpy((uint8_t *)addr, buf, len);
1958
        } else {
1959
            if (!(flags & PAGE_READ))
1960
                return;
1961
            memcpy(buf, (uint8_t *)addr, len);
1962
        }
1963
        len -= l;
1964
        buf += l;
1965
        addr += l;
1966
    }
1967
}
1968
#else
1969
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1970
                            int len, int is_write)
1971
{
1972
    int l, io_index;
1973
    uint8_t *ptr;
1974
    uint32_t val;
1975
    target_phys_addr_t page;
1976
    unsigned long pd;
1977
    PhysPageDesc *p;
1978
    
1979
    while (len > 0) {
1980
        page = addr & TARGET_PAGE_MASK;
1981
        l = (page + TARGET_PAGE_SIZE) - addr;
1982
        if (l > len)
1983
            l = len;
1984
        p = phys_page_find(page >> TARGET_PAGE_BITS);
1985
        if (!p) {
1986
            pd = IO_MEM_UNASSIGNED;
1987
        } else {
1988
            pd = p->phys_offset;
1989
        }
1990
        
1991
        if (is_write) {
1992
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
1993
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1994
                if (l >= 4 && ((addr & 3) == 0)) {
1995
                    /* 32 bit read access */
1996
                    val = ldl_raw(buf);
1997
                    io_mem_write[io_index][2](addr, val);
1998
                    l = 4;
1999
                } else if (l >= 2 && ((addr & 1) == 0)) {
2000
                    /* 16 bit read access */
2001
                    val = lduw_raw(buf);
2002
                    io_mem_write[io_index][1](addr, val);
2003
                    l = 2;
2004
                } else {
2005
                    /* 8 bit access */
2006
                    val = ldub_raw(buf);
2007
                    io_mem_write[io_index][0](addr, val);
2008
                    l = 1;
2009
                }
2010
            } else {
2011
                unsigned long addr1;
2012
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2013
                /* RAM case */
2014
                ptr = phys_ram_base + addr1;
2015
                memcpy(ptr, buf, l);
2016
                /* invalidate code */
2017
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2018
                /* set dirty bit */
2019
                phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;                
2020
            }
2021
        } else {
2022
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2023
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2024
                /* I/O case */
2025
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2026
                if (l >= 4 && ((addr & 3) == 0)) {
2027
                    /* 32 bit read access */
2028
                    val = io_mem_read[io_index][2](addr);
2029
                    stl_raw(buf, val);
2030
                    l = 4;
2031
                } else if (l >= 2 && ((addr & 1) == 0)) {
2032
                    /* 16 bit read access */
2033
                    val = io_mem_read[io_index][1](addr);
2034
                    stw_raw(buf, val);
2035
                    l = 2;
2036
                } else {
2037
                    /* 8 bit access */
2038
                    val = io_mem_read[io_index][0](addr);
2039
                    stb_raw(buf, val);
2040
                    l = 1;
2041
                }
2042
            } else {
2043
                /* RAM case */
2044
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2045
                    (addr & ~TARGET_PAGE_MASK);
2046
                memcpy(buf, ptr, l);
2047
            }
2048
        }
2049
        len -= l;
2050
        buf += l;
2051
        addr += l;
2052
    }
2053
}
2054
#endif
2055

    
2056
/* virtual memory access for debug */
2057
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2058
                        uint8_t *buf, int len, int is_write)
2059
{
2060
    int l;
2061
    target_ulong page, phys_addr;
2062

    
2063
    while (len > 0) {
2064
        page = addr & TARGET_PAGE_MASK;
2065
        phys_addr = cpu_get_phys_page_debug(env, page);
2066
        /* if no physical page mapped, return an error */
2067
        if (phys_addr == -1)
2068
            return -1;
2069
        l = (page + TARGET_PAGE_SIZE) - addr;
2070
        if (l > len)
2071
            l = len;
2072
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2073
                               buf, l, is_write);
2074
        len -= l;
2075
        buf += l;
2076
        addr += l;
2077
    }
2078
    return 0;
2079
}
2080

    
2081
#if !defined(CONFIG_USER_ONLY) 
2082

    
2083
#define MMUSUFFIX _cmmu
2084
#define GETPC() NULL
2085
#define env cpu_single_env
2086

    
2087
#define SHIFT 0
2088
#include "softmmu_template.h"
2089

    
2090
#define SHIFT 1
2091
#include "softmmu_template.h"
2092

    
2093
#define SHIFT 2
2094
#include "softmmu_template.h"
2095

    
2096
#define SHIFT 3
2097
#include "softmmu_template.h"
2098

    
2099
#undef env
2100

    
2101
#endif