Statistics
| Branch: | Revision:

root / exec.c @ 1247c5f7

History | View | Annotate | Download (67.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
#if defined(TARGET_SPARC64)
55
#define TARGET_PHYS_ADDR_SPACE_BITS 41
56
#elif defined(TARGET_PPC64)
57
#define TARGET_PHYS_ADDR_SPACE_BITS 42
58
#else
59
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60
#define TARGET_PHYS_ADDR_SPACE_BITS 32
61
#endif
62

    
63
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
65
int nb_tbs;
66
/* any access to the tbs or the page table must use this lock */
67
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
68

    
69
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70
uint8_t *code_gen_ptr;
71

    
72
int phys_ram_size;
73
int phys_ram_fd;
74
uint8_t *phys_ram_base;
75
uint8_t *phys_ram_dirty;
76

    
77
CPUState *first_cpu;
78
/* current CPU in the current thread. It is only valid inside
79
   cpu_exec() */
80
CPUState *cpu_single_env; 
81

    
82
typedef struct PageDesc {
83
    /* list of TBs intersecting this ram page */
84
    TranslationBlock *first_tb;
85
    /* in order to optimize self modifying code, we count the number
86
       of lookups we do to a given page to use a bitmap */
87
    unsigned int code_write_count;
88
    uint8_t *code_bitmap;
89
#if defined(CONFIG_USER_ONLY)
90
    unsigned long flags;
91
#endif
92
} PageDesc;
93

    
94
typedef struct PhysPageDesc {
95
    /* offset in host memory of the page + io_index in the low 12 bits */
96
    uint32_t phys_offset;
97
} PhysPageDesc;
98

    
99
#define L2_BITS 10
100
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101

    
102
#define L1_SIZE (1 << L1_BITS)
103
#define L2_SIZE (1 << L2_BITS)
104

    
105
static void io_mem_init(void);
106

    
107
unsigned long qemu_real_host_page_size;
108
unsigned long qemu_host_page_bits;
109
unsigned long qemu_host_page_size;
110
unsigned long qemu_host_page_mask;
111

    
112
/* XXX: for system emulation, it could just be an array */
113
static PageDesc *l1_map[L1_SIZE];
114
PhysPageDesc **l1_phys_map;
115

    
116
/* io memory support */
117
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
119
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
120
static int io_mem_nb;
121

    
122
/* log support */
123
char *logfilename = "/tmp/qemu.log";
124
FILE *logfile;
125
int loglevel;
126

    
127
/* statistics */
128
static int tlb_flush_count;
129
static int tb_flush_count;
130
static int tb_phys_invalidate_count;
131

    
132
static void page_init(void)
133
{
134
    /* NOTE: we can always suppose that qemu_host_page_size >=
135
       TARGET_PAGE_SIZE */
136
#ifdef _WIN32
137
    {
138
        SYSTEM_INFO system_info;
139
        DWORD old_protect;
140
        
141
        GetSystemInfo(&system_info);
142
        qemu_real_host_page_size = system_info.dwPageSize;
143
        
144
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145
                       PAGE_EXECUTE_READWRITE, &old_protect);
146
    }
147
#else
148
    qemu_real_host_page_size = getpagesize();
149
    {
150
        unsigned long start, end;
151

    
152
        start = (unsigned long)code_gen_buffer;
153
        start &= ~(qemu_real_host_page_size - 1);
154
        
155
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156
        end += qemu_real_host_page_size - 1;
157
        end &= ~(qemu_real_host_page_size - 1);
158
        
159
        mprotect((void *)start, end - start, 
160
                 PROT_READ | PROT_WRITE | PROT_EXEC);
161
    }
162
#endif
163

    
164
    if (qemu_host_page_size == 0)
165
        qemu_host_page_size = qemu_real_host_page_size;
166
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
167
        qemu_host_page_size = TARGET_PAGE_SIZE;
168
    qemu_host_page_bits = 0;
169
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170
        qemu_host_page_bits++;
171
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
172
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
174
}
175

    
176
static inline PageDesc *page_find_alloc(unsigned int index)
177
{
178
    PageDesc **lp, *p;
179

    
180
    lp = &l1_map[index >> L2_BITS];
181
    p = *lp;
182
    if (!p) {
183
        /* allocate if not found */
184
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
185
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
186
        *lp = p;
187
    }
188
    return p + (index & (L2_SIZE - 1));
189
}
190

    
191
static inline PageDesc *page_find(unsigned int index)
192
{
193
    PageDesc *p;
194

    
195
    p = l1_map[index >> L2_BITS];
196
    if (!p)
197
        return 0;
198
    return p + (index & (L2_SIZE - 1));
199
}
200

    
201
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
202
{
203
    void **lp, **p;
204

    
205
    p = (void **)l1_phys_map;
206
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
207

    
208
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
210
#endif
211
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
212
    p = *lp;
213
    if (!p) {
214
        /* allocate if not found */
215
        if (!alloc)
216
            return NULL;
217
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218
        memset(p, 0, sizeof(void *) * L1_SIZE);
219
        *lp = p;
220
    }
221
#endif
222
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
223
    p = *lp;
224
    if (!p) {
225
        /* allocate if not found */
226
        if (!alloc)
227
            return NULL;
228
        p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
229
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
230
        *lp = p;
231
    }
232
    return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
233
}
234

    
235
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
236
{
237
    return phys_page_find_alloc(index, 0);
238
}
239

    
240
#if !defined(CONFIG_USER_ONLY)
241
static void tlb_protect_code(ram_addr_t ram_addr);
242
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
243
                                    target_ulong vaddr);
244
#endif
245

    
246
void cpu_exec_init(CPUState *env)
247
{
248
    CPUState **penv;
249
    int cpu_index;
250

    
251
    if (!code_gen_ptr) {
252
        code_gen_ptr = code_gen_buffer;
253
        page_init();
254
        io_mem_init();
255
    }
256
    env->next_cpu = NULL;
257
    penv = &first_cpu;
258
    cpu_index = 0;
259
    while (*penv != NULL) {
260
        penv = (CPUState **)&(*penv)->next_cpu;
261
        cpu_index++;
262
    }
263
    env->cpu_index = cpu_index;
264
    *penv = env;
265
}
266

    
267
static inline void invalidate_page_bitmap(PageDesc *p)
268
{
269
    if (p->code_bitmap) {
270
        qemu_free(p->code_bitmap);
271
        p->code_bitmap = NULL;
272
    }
273
    p->code_write_count = 0;
274
}
275

    
276
/* set to NULL all the 'first_tb' fields in all PageDescs */
277
static void page_flush_tb(void)
278
{
279
    int i, j;
280
    PageDesc *p;
281

    
282
    for(i = 0; i < L1_SIZE; i++) {
283
        p = l1_map[i];
284
        if (p) {
285
            for(j = 0; j < L2_SIZE; j++) {
286
                p->first_tb = NULL;
287
                invalidate_page_bitmap(p);
288
                p++;
289
            }
290
        }
291
    }
292
}
293

    
294
/* flush all the translation blocks */
295
/* XXX: tb_flush is currently not thread safe */
296
void tb_flush(CPUState *env1)
297
{
298
    CPUState *env;
299
#if defined(DEBUG_FLUSH)
300
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
301
           code_gen_ptr - code_gen_buffer, 
302
           nb_tbs, 
303
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
304
#endif
305
    nb_tbs = 0;
306
    
307
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
308
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309
    }
310

    
311
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
312
    page_flush_tb();
313

    
314
    code_gen_ptr = code_gen_buffer;
315
    /* XXX: flush processor icache at this point if cache flush is
316
       expensive */
317
    tb_flush_count++;
318
}
319

    
320
#ifdef DEBUG_TB_CHECK
321

    
322
static void tb_invalidate_check(unsigned long address)
323
{
324
    TranslationBlock *tb;
325
    int i;
326
    address &= TARGET_PAGE_MASK;
327
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330
                  address >= tb->pc + tb->size)) {
331
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332
                       address, tb->pc, tb->size);
333
            }
334
        }
335
    }
336
}
337

    
338
/* verify that all the pages have correct rights for code */
339
static void tb_page_check(void)
340
{
341
    TranslationBlock *tb;
342
    int i, flags1, flags2;
343
    
344
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346
            flags1 = page_get_flags(tb->pc);
347
            flags2 = page_get_flags(tb->pc + tb->size - 1);
348
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350
                       tb->pc, tb->size, flags1, flags2);
351
            }
352
        }
353
    }
354
}
355

    
356
void tb_jmp_check(TranslationBlock *tb)
357
{
358
    TranslationBlock *tb1;
359
    unsigned int n1;
360

    
361
    /* suppress any remaining jumps to this TB */
362
    tb1 = tb->jmp_first;
363
    for(;;) {
364
        n1 = (long)tb1 & 3;
365
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
366
        if (n1 == 2)
367
            break;
368
        tb1 = tb1->jmp_next[n1];
369
    }
370
    /* check end of list */
371
    if (tb1 != tb) {
372
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
373
    }
374
}
375

    
376
#endif
377

    
378
/* invalidate one TB */
379
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
380
                             int next_offset)
381
{
382
    TranslationBlock *tb1;
383
    for(;;) {
384
        tb1 = *ptb;
385
        if (tb1 == tb) {
386
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
387
            break;
388
        }
389
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
390
    }
391
}
392

    
393
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
394
{
395
    TranslationBlock *tb1;
396
    unsigned int n1;
397

    
398
    for(;;) {
399
        tb1 = *ptb;
400
        n1 = (long)tb1 & 3;
401
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
402
        if (tb1 == tb) {
403
            *ptb = tb1->page_next[n1];
404
            break;
405
        }
406
        ptb = &tb1->page_next[n1];
407
    }
408
}
409

    
410
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
411
{
412
    TranslationBlock *tb1, **ptb;
413
    unsigned int n1;
414

    
415
    ptb = &tb->jmp_next[n];
416
    tb1 = *ptb;
417
    if (tb1) {
418
        /* find tb(n) in circular list */
419
        for(;;) {
420
            tb1 = *ptb;
421
            n1 = (long)tb1 & 3;
422
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
423
            if (n1 == n && tb1 == tb)
424
                break;
425
            if (n1 == 2) {
426
                ptb = &tb1->jmp_first;
427
            } else {
428
                ptb = &tb1->jmp_next[n1];
429
            }
430
        }
431
        /* now we can suppress tb(n) from the list */
432
        *ptb = tb->jmp_next[n];
433

    
434
        tb->jmp_next[n] = NULL;
435
    }
436
}
437

    
438
/* reset the jump entry 'n' of a TB so that it is not chained to
439
   another TB */
440
static inline void tb_reset_jump(TranslationBlock *tb, int n)
441
{
442
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
443
}
444

    
445
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
446
{
447
    CPUState *env;
448
    PageDesc *p;
449
    unsigned int h, n1;
450
    target_ulong phys_pc;
451
    TranslationBlock *tb1, *tb2;
452
    
453
    /* remove the TB from the hash list */
454
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455
    h = tb_phys_hash_func(phys_pc);
456
    tb_remove(&tb_phys_hash[h], tb, 
457
              offsetof(TranslationBlock, phys_hash_next));
458

    
459
    /* remove the TB from the page list */
460
    if (tb->page_addr[0] != page_addr) {
461
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462
        tb_page_remove(&p->first_tb, tb);
463
        invalidate_page_bitmap(p);
464
    }
465
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467
        tb_page_remove(&p->first_tb, tb);
468
        invalidate_page_bitmap(p);
469
    }
470

    
471
    tb_invalidated_flag = 1;
472

    
473
    /* remove the TB from the hash list */
474
    h = tb_jmp_cache_hash_func(tb->pc);
475
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
476
        if (env->tb_jmp_cache[h] == tb)
477
            env->tb_jmp_cache[h] = NULL;
478
    }
479

    
480
    /* suppress this TB from the two jump lists */
481
    tb_jmp_remove(tb, 0);
482
    tb_jmp_remove(tb, 1);
483

    
484
    /* suppress any remaining jumps to this TB */
485
    tb1 = tb->jmp_first;
486
    for(;;) {
487
        n1 = (long)tb1 & 3;
488
        if (n1 == 2)
489
            break;
490
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
491
        tb2 = tb1->jmp_next[n1];
492
        tb_reset_jump(tb1, n1);
493
        tb1->jmp_next[n1] = NULL;
494
        tb1 = tb2;
495
    }
496
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
497

    
498
    tb_phys_invalidate_count++;
499
}
500

    
501
static inline void set_bits(uint8_t *tab, int start, int len)
502
{
503
    int end, mask, end1;
504

    
505
    end = start + len;
506
    tab += start >> 3;
507
    mask = 0xff << (start & 7);
508
    if ((start & ~7) == (end & ~7)) {
509
        if (start < end) {
510
            mask &= ~(0xff << (end & 7));
511
            *tab |= mask;
512
        }
513
    } else {
514
        *tab++ |= mask;
515
        start = (start + 8) & ~7;
516
        end1 = end & ~7;
517
        while (start < end1) {
518
            *tab++ = 0xff;
519
            start += 8;
520
        }
521
        if (start < end) {
522
            mask = ~(0xff << (end & 7));
523
            *tab |= mask;
524
        }
525
    }
526
}
527

    
528
static void build_page_bitmap(PageDesc *p)
529
{
530
    int n, tb_start, tb_end;
531
    TranslationBlock *tb;
532
    
533
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
534
    if (!p->code_bitmap)
535
        return;
536
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
537

    
538
    tb = p->first_tb;
539
    while (tb != NULL) {
540
        n = (long)tb & 3;
541
        tb = (TranslationBlock *)((long)tb & ~3);
542
        /* NOTE: this is subtle as a TB may span two physical pages */
543
        if (n == 0) {
544
            /* NOTE: tb_end may be after the end of the page, but
545
               it is not a problem */
546
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
547
            tb_end = tb_start + tb->size;
548
            if (tb_end > TARGET_PAGE_SIZE)
549
                tb_end = TARGET_PAGE_SIZE;
550
        } else {
551
            tb_start = 0;
552
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
553
        }
554
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555
        tb = tb->page_next[n];
556
    }
557
}
558

    
559
#ifdef TARGET_HAS_PRECISE_SMC
560

    
561
static void tb_gen_code(CPUState *env, 
562
                        target_ulong pc, target_ulong cs_base, int flags,
563
                        int cflags)
564
{
565
    TranslationBlock *tb;
566
    uint8_t *tc_ptr;
567
    target_ulong phys_pc, phys_page2, virt_page2;
568
    int code_gen_size;
569

    
570
    phys_pc = get_phys_addr_code(env, pc);
571
    tb = tb_alloc(pc);
572
    if (!tb) {
573
        /* flush must be done */
574
        tb_flush(env);
575
        /* cannot fail at this point */
576
        tb = tb_alloc(pc);
577
    }
578
    tc_ptr = code_gen_ptr;
579
    tb->tc_ptr = tc_ptr;
580
    tb->cs_base = cs_base;
581
    tb->flags = flags;
582
    tb->cflags = cflags;
583
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
585
    
586
    /* check next page if needed */
587
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
588
    phys_page2 = -1;
589
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
590
        phys_page2 = get_phys_addr_code(env, virt_page2);
591
    }
592
    tb_link_phys(tb, phys_pc, phys_page2);
593
}
594
#endif
595
    
596
/* invalidate all TBs which intersect with the target physical page
597
   starting in range [start;end[. NOTE: start and end must refer to
598
   the same physical page. 'is_cpu_write_access' should be true if called
599
   from a real cpu write access: the virtual CPU will exit the current
600
   TB if code is modified inside this TB. */
601
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
602
                                   int is_cpu_write_access)
603
{
604
    int n, current_tb_modified, current_tb_not_found, current_flags;
605
    CPUState *env = cpu_single_env;
606
    PageDesc *p;
607
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
608
    target_ulong tb_start, tb_end;
609
    target_ulong current_pc, current_cs_base;
610

    
611
    p = page_find(start >> TARGET_PAGE_BITS);
612
    if (!p) 
613
        return;
614
    if (!p->code_bitmap && 
615
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616
        is_cpu_write_access) {
617
        /* build code bitmap */
618
        build_page_bitmap(p);
619
    }
620

    
621
    /* we remove all the TBs in the range [start, end[ */
622
    /* XXX: see if in some cases it could be faster to invalidate all the code */
623
    current_tb_not_found = is_cpu_write_access;
624
    current_tb_modified = 0;
625
    current_tb = NULL; /* avoid warning */
626
    current_pc = 0; /* avoid warning */
627
    current_cs_base = 0; /* avoid warning */
628
    current_flags = 0; /* avoid warning */
629
    tb = p->first_tb;
630
    while (tb != NULL) {
631
        n = (long)tb & 3;
632
        tb = (TranslationBlock *)((long)tb & ~3);
633
        tb_next = tb->page_next[n];
634
        /* NOTE: this is subtle as a TB may span two physical pages */
635
        if (n == 0) {
636
            /* NOTE: tb_end may be after the end of the page, but
637
               it is not a problem */
638
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639
            tb_end = tb_start + tb->size;
640
        } else {
641
            tb_start = tb->page_addr[1];
642
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643
        }
644
        if (!(tb_end <= start || tb_start >= end)) {
645
#ifdef TARGET_HAS_PRECISE_SMC
646
            if (current_tb_not_found) {
647
                current_tb_not_found = 0;
648
                current_tb = NULL;
649
                if (env->mem_write_pc) {
650
                    /* now we have a real cpu fault */
651
                    current_tb = tb_find_pc(env->mem_write_pc);
652
                }
653
            }
654
            if (current_tb == tb &&
655
                !(current_tb->cflags & CF_SINGLE_INSN)) {
656
                /* If we are modifying the current TB, we must stop
657
                its execution. We could be more precise by checking
658
                that the modification is after the current PC, but it
659
                would require a specialized function to partially
660
                restore the CPU state */
661
                
662
                current_tb_modified = 1;
663
                cpu_restore_state(current_tb, env, 
664
                                  env->mem_write_pc, NULL);
665
#if defined(TARGET_I386)
666
                current_flags = env->hflags;
667
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668
                current_cs_base = (target_ulong)env->segs[R_CS].base;
669
                current_pc = current_cs_base + env->eip;
670
#else
671
#error unsupported CPU
672
#endif
673
            }
674
#endif /* TARGET_HAS_PRECISE_SMC */
675
            /* we need to do that to handle the case where a signal
676
               occurs while doing tb_phys_invalidate() */
677
            saved_tb = NULL;
678
            if (env) {
679
                saved_tb = env->current_tb;
680
                env->current_tb = NULL;
681
            }
682
            tb_phys_invalidate(tb, -1);
683
            if (env) {
684
                env->current_tb = saved_tb;
685
                if (env->interrupt_request && env->current_tb)
686
                    cpu_interrupt(env, env->interrupt_request);
687
            }
688
        }
689
        tb = tb_next;
690
    }
691
#if !defined(CONFIG_USER_ONLY)
692
    /* if no code remaining, no need to continue to use slow writes */
693
    if (!p->first_tb) {
694
        invalidate_page_bitmap(p);
695
        if (is_cpu_write_access) {
696
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
697
        }
698
    }
699
#endif
700
#ifdef TARGET_HAS_PRECISE_SMC
701
    if (current_tb_modified) {
702
        /* we generate a block containing just the instruction
703
           modifying the memory. It will ensure that it cannot modify
704
           itself */
705
        env->current_tb = NULL;
706
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
707
                    CF_SINGLE_INSN);
708
        cpu_resume_from_signal(env, NULL);
709
    }
710
#endif
711
}
712

    
713
/* len must be <= 8 and start must be a multiple of len */
714
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
715
{
716
    PageDesc *p;
717
    int offset, b;
718
#if 0
719
    if (1) {
720
        if (loglevel) {
721
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
722
                   cpu_single_env->mem_write_vaddr, len, 
723
                   cpu_single_env->eip, 
724
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
725
        }
726
    }
727
#endif
728
    p = page_find(start >> TARGET_PAGE_BITS);
729
    if (!p) 
730
        return;
731
    if (p->code_bitmap) {
732
        offset = start & ~TARGET_PAGE_MASK;
733
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
734
        if (b & ((1 << len) - 1))
735
            goto do_invalidate;
736
    } else {
737
    do_invalidate:
738
        tb_invalidate_phys_page_range(start, start + len, 1);
739
    }
740
}
741

    
742
#if !defined(CONFIG_SOFTMMU)
743
static void tb_invalidate_phys_page(target_ulong addr, 
744
                                    unsigned long pc, void *puc)
745
{
746
    int n, current_flags, current_tb_modified;
747
    target_ulong current_pc, current_cs_base;
748
    PageDesc *p;
749
    TranslationBlock *tb, *current_tb;
750
#ifdef TARGET_HAS_PRECISE_SMC
751
    CPUState *env = cpu_single_env;
752
#endif
753

    
754
    addr &= TARGET_PAGE_MASK;
755
    p = page_find(addr >> TARGET_PAGE_BITS);
756
    if (!p) 
757
        return;
758
    tb = p->first_tb;
759
    current_tb_modified = 0;
760
    current_tb = NULL;
761
    current_pc = 0; /* avoid warning */
762
    current_cs_base = 0; /* avoid warning */
763
    current_flags = 0; /* avoid warning */
764
#ifdef TARGET_HAS_PRECISE_SMC
765
    if (tb && pc != 0) {
766
        current_tb = tb_find_pc(pc);
767
    }
768
#endif
769
    while (tb != NULL) {
770
        n = (long)tb & 3;
771
        tb = (TranslationBlock *)((long)tb & ~3);
772
#ifdef TARGET_HAS_PRECISE_SMC
773
        if (current_tb == tb &&
774
            !(current_tb->cflags & CF_SINGLE_INSN)) {
775
                /* If we are modifying the current TB, we must stop
776
                   its execution. We could be more precise by checking
777
                   that the modification is after the current PC, but it
778
                   would require a specialized function to partially
779
                   restore the CPU state */
780
            
781
            current_tb_modified = 1;
782
            cpu_restore_state(current_tb, env, pc, puc);
783
#if defined(TARGET_I386)
784
            current_flags = env->hflags;
785
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786
            current_cs_base = (target_ulong)env->segs[R_CS].base;
787
            current_pc = current_cs_base + env->eip;
788
#else
789
#error unsupported CPU
790
#endif
791
        }
792
#endif /* TARGET_HAS_PRECISE_SMC */
793
        tb_phys_invalidate(tb, addr);
794
        tb = tb->page_next[n];
795
    }
796
    p->first_tb = NULL;
797
#ifdef TARGET_HAS_PRECISE_SMC
798
    if (current_tb_modified) {
799
        /* we generate a block containing just the instruction
800
           modifying the memory. It will ensure that it cannot modify
801
           itself */
802
        env->current_tb = NULL;
803
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
804
                    CF_SINGLE_INSN);
805
        cpu_resume_from_signal(env, puc);
806
    }
807
#endif
808
}
809
#endif
810

    
811
/* add the tb in the target page and protect it if necessary */
812
static inline void tb_alloc_page(TranslationBlock *tb, 
813
                                 unsigned int n, unsigned int page_addr)
814
{
815
    PageDesc *p;
816
    TranslationBlock *last_first_tb;
817

    
818
    tb->page_addr[n] = page_addr;
819
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
820
    tb->page_next[n] = p->first_tb;
821
    last_first_tb = p->first_tb;
822
    p->first_tb = (TranslationBlock *)((long)tb | n);
823
    invalidate_page_bitmap(p);
824

    
825
#if defined(TARGET_HAS_SMC) || 1
826

    
827
#if defined(CONFIG_USER_ONLY)
828
    if (p->flags & PAGE_WRITE) {
829
        unsigned long host_start, host_end, addr;
830
        int prot;
831

    
832
        /* force the host page as non writable (writes will have a
833
           page fault + mprotect overhead) */
834
        host_start = page_addr & qemu_host_page_mask;
835
        host_end = host_start + qemu_host_page_size;
836
        prot = 0;
837
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838
            prot |= page_get_flags(addr);
839
        mprotect((void *)host_start, qemu_host_page_size, 
840
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
841
#ifdef DEBUG_TB_INVALIDATE
842
        printf("protecting code page: 0x%08lx\n", 
843
               host_start);
844
#endif
845
        p->flags &= ~PAGE_WRITE;
846
    }
847
#else
848
    /* if some code is already present, then the pages are already
849
       protected. So we handle the case where only the first TB is
850
       allocated in a physical page */
851
    if (!last_first_tb) {
852
        tlb_protect_code(page_addr);
853
    }
854
#endif
855

    
856
#endif /* TARGET_HAS_SMC */
857
}
858

    
859
/* Allocate a new translation block. Flush the translation buffer if
860
   too many translation blocks or too much generated code. */
861
TranslationBlock *tb_alloc(target_ulong pc)
862
{
863
    TranslationBlock *tb;
864

    
865
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
866
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
867
        return NULL;
868
    tb = &tbs[nb_tbs++];
869
    tb->pc = pc;
870
    tb->cflags = 0;
871
    return tb;
872
}
873

    
874
/* add a new TB and link it to the physical page tables. phys_page2 is
875
   (-1) to indicate that only one page contains the TB. */
876
void tb_link_phys(TranslationBlock *tb, 
877
                  target_ulong phys_pc, target_ulong phys_page2)
878
{
879
    unsigned int h;
880
    TranslationBlock **ptb;
881

    
882
    /* add in the physical hash table */
883
    h = tb_phys_hash_func(phys_pc);
884
    ptb = &tb_phys_hash[h];
885
    tb->phys_hash_next = *ptb;
886
    *ptb = tb;
887

    
888
    /* add in the page list */
889
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
890
    if (phys_page2 != -1)
891
        tb_alloc_page(tb, 1, phys_page2);
892
    else
893
        tb->page_addr[1] = -1;
894

    
895
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
896
    tb->jmp_next[0] = NULL;
897
    tb->jmp_next[1] = NULL;
898
#ifdef USE_CODE_COPY
899
    tb->cflags &= ~CF_FP_USED;
900
    if (tb->cflags & CF_TB_FP_USED)
901
        tb->cflags |= CF_FP_USED;
902
#endif
903

    
904
    /* init original jump addresses */
905
    if (tb->tb_next_offset[0] != 0xffff)
906
        tb_reset_jump(tb, 0);
907
    if (tb->tb_next_offset[1] != 0xffff)
908
        tb_reset_jump(tb, 1);
909

    
910
#ifdef DEBUG_TB_CHECK
911
    tb_page_check();
912
#endif
913
}
914

    
915
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
916
   tb[1].tc_ptr. Return NULL if not found */
917
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
918
{
919
    int m_min, m_max, m;
920
    unsigned long v;
921
    TranslationBlock *tb;
922

    
923
    if (nb_tbs <= 0)
924
        return NULL;
925
    if (tc_ptr < (unsigned long)code_gen_buffer ||
926
        tc_ptr >= (unsigned long)code_gen_ptr)
927
        return NULL;
928
    /* binary search (cf Knuth) */
929
    m_min = 0;
930
    m_max = nb_tbs - 1;
931
    while (m_min <= m_max) {
932
        m = (m_min + m_max) >> 1;
933
        tb = &tbs[m];
934
        v = (unsigned long)tb->tc_ptr;
935
        if (v == tc_ptr)
936
            return tb;
937
        else if (tc_ptr < v) {
938
            m_max = m - 1;
939
        } else {
940
            m_min = m + 1;
941
        }
942
    } 
943
    return &tbs[m_max];
944
}
945

    
946
static void tb_reset_jump_recursive(TranslationBlock *tb);
947

    
948
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
949
{
950
    TranslationBlock *tb1, *tb_next, **ptb;
951
    unsigned int n1;
952

    
953
    tb1 = tb->jmp_next[n];
954
    if (tb1 != NULL) {
955
        /* find head of list */
956
        for(;;) {
957
            n1 = (long)tb1 & 3;
958
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
959
            if (n1 == 2)
960
                break;
961
            tb1 = tb1->jmp_next[n1];
962
        }
963
        /* we are now sure now that tb jumps to tb1 */
964
        tb_next = tb1;
965

    
966
        /* remove tb from the jmp_first list */
967
        ptb = &tb_next->jmp_first;
968
        for(;;) {
969
            tb1 = *ptb;
970
            n1 = (long)tb1 & 3;
971
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
972
            if (n1 == n && tb1 == tb)
973
                break;
974
            ptb = &tb1->jmp_next[n1];
975
        }
976
        *ptb = tb->jmp_next[n];
977
        tb->jmp_next[n] = NULL;
978
        
979
        /* suppress the jump to next tb in generated code */
980
        tb_reset_jump(tb, n);
981

    
982
        /* suppress jumps in the tb on which we could have jumped */
983
        tb_reset_jump_recursive(tb_next);
984
    }
985
}
986

    
987
static void tb_reset_jump_recursive(TranslationBlock *tb)
988
{
989
    tb_reset_jump_recursive2(tb, 0);
990
    tb_reset_jump_recursive2(tb, 1);
991
}
992

    
993
#if defined(TARGET_HAS_ICE)
994
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
995
{
996
    target_ulong phys_addr;
997

    
998
    phys_addr = cpu_get_phys_page_debug(env, pc);
999
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1000
}
1001
#endif
1002

    
1003
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1004
   breakpoint is reached */
1005
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1006
{
1007
#if defined(TARGET_HAS_ICE)
1008
    int i;
1009
    
1010
    for(i = 0; i < env->nb_breakpoints; i++) {
1011
        if (env->breakpoints[i] == pc)
1012
            return 0;
1013
    }
1014

    
1015
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1016
        return -1;
1017
    env->breakpoints[env->nb_breakpoints++] = pc;
1018
    
1019
    breakpoint_invalidate(env, pc);
1020
    return 0;
1021
#else
1022
    return -1;
1023
#endif
1024
}
1025

    
1026
/* remove a breakpoint */
1027
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1028
{
1029
#if defined(TARGET_HAS_ICE)
1030
    int i;
1031
    for(i = 0; i < env->nb_breakpoints; i++) {
1032
        if (env->breakpoints[i] == pc)
1033
            goto found;
1034
    }
1035
    return -1;
1036
 found:
1037
    env->nb_breakpoints--;
1038
    if (i < env->nb_breakpoints)
1039
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1040

    
1041
    breakpoint_invalidate(env, pc);
1042
    return 0;
1043
#else
1044
    return -1;
1045
#endif
1046
}
1047

    
1048
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1049
   CPU loop after each instruction */
1050
void cpu_single_step(CPUState *env, int enabled)
1051
{
1052
#if defined(TARGET_HAS_ICE)
1053
    if (env->singlestep_enabled != enabled) {
1054
        env->singlestep_enabled = enabled;
1055
        /* must flush all the translated code to avoid inconsistancies */
1056
        /* XXX: only flush what is necessary */
1057
        tb_flush(env);
1058
    }
1059
#endif
1060
}
1061

    
1062
/* enable or disable low levels log */
1063
void cpu_set_log(int log_flags)
1064
{
1065
    loglevel = log_flags;
1066
    if (loglevel && !logfile) {
1067
        logfile = fopen(logfilename, "w");
1068
        if (!logfile) {
1069
            perror(logfilename);
1070
            _exit(1);
1071
        }
1072
#if !defined(CONFIG_SOFTMMU)
1073
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1074
        {
1075
            static uint8_t logfile_buf[4096];
1076
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1077
        }
1078
#else
1079
        setvbuf(logfile, NULL, _IOLBF, 0);
1080
#endif
1081
    }
1082
}
1083

    
1084
void cpu_set_log_filename(const char *filename)
1085
{
1086
    logfilename = strdup(filename);
1087
}
1088

    
1089
/* mask must never be zero, except for A20 change call */
1090
void cpu_interrupt(CPUState *env, int mask)
1091
{
1092
    TranslationBlock *tb;
1093
    static int interrupt_lock;
1094

    
1095
    env->interrupt_request |= mask;
1096
    /* if the cpu is currently executing code, we must unlink it and
1097
       all the potentially executing TB */
1098
    tb = env->current_tb;
1099
    if (tb && !testandset(&interrupt_lock)) {
1100
        env->current_tb = NULL;
1101
        tb_reset_jump_recursive(tb);
1102
        interrupt_lock = 0;
1103
    }
1104
}
1105

    
1106
void cpu_reset_interrupt(CPUState *env, int mask)
1107
{
1108
    env->interrupt_request &= ~mask;
1109
}
1110

    
1111
CPULogItem cpu_log_items[] = {
1112
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1113
      "show generated host assembly code for each compiled TB" },
1114
    { CPU_LOG_TB_IN_ASM, "in_asm",
1115
      "show target assembly code for each compiled TB" },
1116
    { CPU_LOG_TB_OP, "op", 
1117
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1118
#ifdef TARGET_I386
1119
    { CPU_LOG_TB_OP_OPT, "op_opt",
1120
      "show micro ops after optimization for each compiled TB" },
1121
#endif
1122
    { CPU_LOG_INT, "int",
1123
      "show interrupts/exceptions in short format" },
1124
    { CPU_LOG_EXEC, "exec",
1125
      "show trace before each executed TB (lots of logs)" },
1126
    { CPU_LOG_TB_CPU, "cpu",
1127
      "show CPU state before bloc translation" },
1128
#ifdef TARGET_I386
1129
    { CPU_LOG_PCALL, "pcall",
1130
      "show protected mode far calls/returns/exceptions" },
1131
#endif
1132
#ifdef DEBUG_IOPORT
1133
    { CPU_LOG_IOPORT, "ioport",
1134
      "show all i/o ports accesses" },
1135
#endif
1136
    { 0, NULL, NULL },
1137
};
1138

    
1139
static int cmp1(const char *s1, int n, const char *s2)
1140
{
1141
    if (strlen(s2) != n)
1142
        return 0;
1143
    return memcmp(s1, s2, n) == 0;
1144
}
1145
      
1146
/* takes a comma separated list of log masks. Return 0 if error. */
1147
int cpu_str_to_log_mask(const char *str)
1148
{
1149
    CPULogItem *item;
1150
    int mask;
1151
    const char *p, *p1;
1152

    
1153
    p = str;
1154
    mask = 0;
1155
    for(;;) {
1156
        p1 = strchr(p, ',');
1157
        if (!p1)
1158
            p1 = p + strlen(p);
1159
        if(cmp1(p,p1-p,"all")) {
1160
                for(item = cpu_log_items; item->mask != 0; item++) {
1161
                        mask |= item->mask;
1162
                }
1163
        } else {
1164
        for(item = cpu_log_items; item->mask != 0; item++) {
1165
            if (cmp1(p, p1 - p, item->name))
1166
                goto found;
1167
        }
1168
        return 0;
1169
        }
1170
    found:
1171
        mask |= item->mask;
1172
        if (*p1 != ',')
1173
            break;
1174
        p = p1 + 1;
1175
    }
1176
    return mask;
1177
}
1178

    
1179
void cpu_abort(CPUState *env, const char *fmt, ...)
1180
{
1181
    va_list ap;
1182

    
1183
    va_start(ap, fmt);
1184
    fprintf(stderr, "qemu: fatal: ");
1185
    vfprintf(stderr, fmt, ap);
1186
    fprintf(stderr, "\n");
1187
#ifdef TARGET_I386
1188
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1189
#else
1190
    cpu_dump_state(env, stderr, fprintf, 0);
1191
#endif
1192
    va_end(ap);
1193
    abort();
1194
}
1195

    
1196
#if !defined(CONFIG_USER_ONLY)
1197

    
1198
/* NOTE: if flush_global is true, also flush global entries (not
1199
   implemented yet) */
1200
void tlb_flush(CPUState *env, int flush_global)
1201
{
1202
    int i;
1203

    
1204
#if defined(DEBUG_TLB)
1205
    printf("tlb_flush:\n");
1206
#endif
1207
    /* must reset current TB so that interrupts cannot modify the
1208
       links while we are modifying them */
1209
    env->current_tb = NULL;
1210

    
1211
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1212
        env->tlb_table[0][i].addr_read = -1;
1213
        env->tlb_table[0][i].addr_write = -1;
1214
        env->tlb_table[0][i].addr_code = -1;
1215
        env->tlb_table[1][i].addr_read = -1;
1216
        env->tlb_table[1][i].addr_write = -1;
1217
        env->tlb_table[1][i].addr_code = -1;
1218
    }
1219

    
1220
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1221

    
1222
#if !defined(CONFIG_SOFTMMU)
1223
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1224
#endif
1225
#ifdef USE_KQEMU
1226
    if (env->kqemu_enabled) {
1227
        kqemu_flush(env, flush_global);
1228
    }
1229
#endif
1230
    tlb_flush_count++;
1231
}
1232

    
1233
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1234
{
1235
    if (addr == (tlb_entry->addr_read & 
1236
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1237
        addr == (tlb_entry->addr_write & 
1238
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1239
        addr == (tlb_entry->addr_code & 
1240
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1241
        tlb_entry->addr_read = -1;
1242
        tlb_entry->addr_write = -1;
1243
        tlb_entry->addr_code = -1;
1244
    }
1245
}
1246

    
1247
void tlb_flush_page(CPUState *env, target_ulong addr)
1248
{
1249
    int i;
1250
    TranslationBlock *tb;
1251

    
1252
#if defined(DEBUG_TLB)
1253
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1254
#endif
1255
    /* must reset current TB so that interrupts cannot modify the
1256
       links while we are modifying them */
1257
    env->current_tb = NULL;
1258

    
1259
    addr &= TARGET_PAGE_MASK;
1260
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1261
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1262
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1263

    
1264
    for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1265
        tb = env->tb_jmp_cache[i];
1266
        if (tb && 
1267
            ((tb->pc & TARGET_PAGE_MASK) == addr ||
1268
             ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1269
            env->tb_jmp_cache[i] = NULL;
1270
        }
1271
    }
1272

    
1273
#if !defined(CONFIG_SOFTMMU)
1274
    if (addr < MMAP_AREA_END)
1275
        munmap((void *)addr, TARGET_PAGE_SIZE);
1276
#endif
1277
#ifdef USE_KQEMU
1278
    if (env->kqemu_enabled) {
1279
        kqemu_flush_page(env, addr);
1280
    }
1281
#endif
1282
}
1283

    
1284
/* update the TLBs so that writes to code in the virtual page 'addr'
1285
   can be detected */
1286
static void tlb_protect_code(ram_addr_t ram_addr)
1287
{
1288
    cpu_physical_memory_reset_dirty(ram_addr, 
1289
                                    ram_addr + TARGET_PAGE_SIZE,
1290
                                    CODE_DIRTY_FLAG);
1291
}
1292

    
1293
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1294
   tested for self modifying code */
1295
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1296
                                    target_ulong vaddr)
1297
{
1298
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1299
}
1300

    
1301
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1302
                                         unsigned long start, unsigned long length)
1303
{
1304
    unsigned long addr;
1305
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1306
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1307
        if ((addr - start) < length) {
1308
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1309
        }
1310
    }
1311
}
1312

    
1313
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1314
                                     int dirty_flags)
1315
{
1316
    CPUState *env;
1317
    unsigned long length, start1;
1318
    int i, mask, len;
1319
    uint8_t *p;
1320

    
1321
    start &= TARGET_PAGE_MASK;
1322
    end = TARGET_PAGE_ALIGN(end);
1323

    
1324
    length = end - start;
1325
    if (length == 0)
1326
        return;
1327
    len = length >> TARGET_PAGE_BITS;
1328
#ifdef USE_KQEMU
1329
    /* XXX: should not depend on cpu context */
1330
    env = first_cpu;
1331
    if (env->kqemu_enabled) {
1332
        ram_addr_t addr;
1333
        addr = start;
1334
        for(i = 0; i < len; i++) {
1335
            kqemu_set_notdirty(env, addr);
1336
            addr += TARGET_PAGE_SIZE;
1337
        }
1338
    }
1339
#endif
1340
    mask = ~dirty_flags;
1341
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1342
    for(i = 0; i < len; i++)
1343
        p[i] &= mask;
1344

    
1345
    /* we modify the TLB cache so that the dirty bit will be set again
1346
       when accessing the range */
1347
    start1 = start + (unsigned long)phys_ram_base;
1348
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1349
        for(i = 0; i < CPU_TLB_SIZE; i++)
1350
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1351
        for(i = 0; i < CPU_TLB_SIZE; i++)
1352
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1353
    }
1354

    
1355
#if !defined(CONFIG_SOFTMMU)
1356
    /* XXX: this is expensive */
1357
    {
1358
        VirtPageDesc *p;
1359
        int j;
1360
        target_ulong addr;
1361

    
1362
        for(i = 0; i < L1_SIZE; i++) {
1363
            p = l1_virt_map[i];
1364
            if (p) {
1365
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1366
                for(j = 0; j < L2_SIZE; j++) {
1367
                    if (p->valid_tag == virt_valid_tag &&
1368
                        p->phys_addr >= start && p->phys_addr < end &&
1369
                        (p->prot & PROT_WRITE)) {
1370
                        if (addr < MMAP_AREA_END) {
1371
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1372
                                     p->prot & ~PROT_WRITE);
1373
                        }
1374
                    }
1375
                    addr += TARGET_PAGE_SIZE;
1376
                    p++;
1377
                }
1378
            }
1379
        }
1380
    }
1381
#endif
1382
}
1383

    
1384
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1385
{
1386
    ram_addr_t ram_addr;
1387

    
1388
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1389
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1390
            tlb_entry->addend - (unsigned long)phys_ram_base;
1391
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1392
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1393
        }
1394
    }
1395
}
1396

    
1397
/* update the TLB according to the current state of the dirty bits */
1398
void cpu_tlb_update_dirty(CPUState *env)
1399
{
1400
    int i;
1401
    for(i = 0; i < CPU_TLB_SIZE; i++)
1402
        tlb_update_dirty(&env->tlb_table[0][i]);
1403
    for(i = 0; i < CPU_TLB_SIZE; i++)
1404
        tlb_update_dirty(&env->tlb_table[1][i]);
1405
}
1406

    
1407
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1408
                                  unsigned long start)
1409
{
1410
    unsigned long addr;
1411
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1412
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1413
        if (addr == start) {
1414
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1415
        }
1416
    }
1417
}
1418

    
1419
/* update the TLB corresponding to virtual page vaddr and phys addr
1420
   addr so that it is no longer dirty */
1421
static inline void tlb_set_dirty(CPUState *env,
1422
                                 unsigned long addr, target_ulong vaddr)
1423
{
1424
    int i;
1425

    
1426
    addr &= TARGET_PAGE_MASK;
1427
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1428
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1429
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1430
}
1431

    
1432
/* add a new TLB entry. At most one entry for a given virtual address
1433
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1434
   (can only happen in non SOFTMMU mode for I/O pages or pages
1435
   conflicting with the host address space). */
1436
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1437
                      target_phys_addr_t paddr, int prot, 
1438
                      int is_user, int is_softmmu)
1439
{
1440
    PhysPageDesc *p;
1441
    unsigned long pd;
1442
    unsigned int index;
1443
    target_ulong address;
1444
    target_phys_addr_t addend;
1445
    int ret;
1446
    CPUTLBEntry *te;
1447

    
1448
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1449
    if (!p) {
1450
        pd = IO_MEM_UNASSIGNED;
1451
    } else {
1452
        pd = p->phys_offset;
1453
    }
1454
#if defined(DEBUG_TLB)
1455
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1456
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1457
#endif
1458

    
1459
    ret = 0;
1460
#if !defined(CONFIG_SOFTMMU)
1461
    if (is_softmmu) 
1462
#endif
1463
    {
1464
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1465
            /* IO memory case */
1466
            address = vaddr | pd;
1467
            addend = paddr;
1468
        } else {
1469
            /* standard memory */
1470
            address = vaddr;
1471
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1472
        }
1473
        
1474
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1475
        addend -= vaddr;
1476
        te = &env->tlb_table[is_user][index];
1477
        te->addend = addend;
1478
        if (prot & PAGE_READ) {
1479
            te->addr_read = address;
1480
        } else {
1481
            te->addr_read = -1;
1482
        }
1483
        if (prot & PAGE_EXEC) {
1484
            te->addr_code = address;
1485
        } else {
1486
            te->addr_code = -1;
1487
        }
1488
        if (prot & PAGE_WRITE) {
1489
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1490
                /* ROM: access is ignored (same as unassigned) */
1491
                te->addr_write = vaddr | IO_MEM_ROM;
1492
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1493
                       !cpu_physical_memory_is_dirty(pd)) {
1494
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1495
            } else {
1496
                te->addr_write = address;
1497
            }
1498
        } else {
1499
            te->addr_write = -1;
1500
        }
1501
    }
1502
#if !defined(CONFIG_SOFTMMU)
1503
    else {
1504
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1505
            /* IO access: no mapping is done as it will be handled by the
1506
               soft MMU */
1507
            if (!(env->hflags & HF_SOFTMMU_MASK))
1508
                ret = 2;
1509
        } else {
1510
            void *map_addr;
1511

    
1512
            if (vaddr >= MMAP_AREA_END) {
1513
                ret = 2;
1514
            } else {
1515
                if (prot & PROT_WRITE) {
1516
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1517
#if defined(TARGET_HAS_SMC) || 1
1518
                        first_tb ||
1519
#endif
1520
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1521
                         !cpu_physical_memory_is_dirty(pd))) {
1522
                        /* ROM: we do as if code was inside */
1523
                        /* if code is present, we only map as read only and save the
1524
                           original mapping */
1525
                        VirtPageDesc *vp;
1526
                        
1527
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1528
                        vp->phys_addr = pd;
1529
                        vp->prot = prot;
1530
                        vp->valid_tag = virt_valid_tag;
1531
                        prot &= ~PAGE_WRITE;
1532
                    }
1533
                }
1534
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1535
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1536
                if (map_addr == MAP_FAILED) {
1537
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1538
                              paddr, vaddr);
1539
                }
1540
            }
1541
        }
1542
    }
1543
#endif
1544
    return ret;
1545
}
1546

    
1547
/* called from signal handler: invalidate the code and unprotect the
1548
   page. Return TRUE if the fault was succesfully handled. */
1549
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1550
{
1551
#if !defined(CONFIG_SOFTMMU)
1552
    VirtPageDesc *vp;
1553

    
1554
#if defined(DEBUG_TLB)
1555
    printf("page_unprotect: addr=0x%08x\n", addr);
1556
#endif
1557
    addr &= TARGET_PAGE_MASK;
1558

    
1559
    /* if it is not mapped, no need to worry here */
1560
    if (addr >= MMAP_AREA_END)
1561
        return 0;
1562
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1563
    if (!vp)
1564
        return 0;
1565
    /* NOTE: in this case, validate_tag is _not_ tested as it
1566
       validates only the code TLB */
1567
    if (vp->valid_tag != virt_valid_tag)
1568
        return 0;
1569
    if (!(vp->prot & PAGE_WRITE))
1570
        return 0;
1571
#if defined(DEBUG_TLB)
1572
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1573
           addr, vp->phys_addr, vp->prot);
1574
#endif
1575
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1576
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1577
                  (unsigned long)addr, vp->prot);
1578
    /* set the dirty bit */
1579
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1580
    /* flush the code inside */
1581
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1582
    return 1;
1583
#else
1584
    return 0;
1585
#endif
1586
}
1587

    
1588
#else
1589

    
1590
void tlb_flush(CPUState *env, int flush_global)
1591
{
1592
}
1593

    
1594
void tlb_flush_page(CPUState *env, target_ulong addr)
1595
{
1596
}
1597

    
1598
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1599
                      target_phys_addr_t paddr, int prot, 
1600
                      int is_user, int is_softmmu)
1601
{
1602
    return 0;
1603
}
1604

    
1605
/* dump memory mappings */
1606
void page_dump(FILE *f)
1607
{
1608
    unsigned long start, end;
1609
    int i, j, prot, prot1;
1610
    PageDesc *p;
1611

    
1612
    fprintf(f, "%-8s %-8s %-8s %s\n",
1613
            "start", "end", "size", "prot");
1614
    start = -1;
1615
    end = -1;
1616
    prot = 0;
1617
    for(i = 0; i <= L1_SIZE; i++) {
1618
        if (i < L1_SIZE)
1619
            p = l1_map[i];
1620
        else
1621
            p = NULL;
1622
        for(j = 0;j < L2_SIZE; j++) {
1623
            if (!p)
1624
                prot1 = 0;
1625
            else
1626
                prot1 = p[j].flags;
1627
            if (prot1 != prot) {
1628
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1629
                if (start != -1) {
1630
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1631
                            start, end, end - start, 
1632
                            prot & PAGE_READ ? 'r' : '-',
1633
                            prot & PAGE_WRITE ? 'w' : '-',
1634
                            prot & PAGE_EXEC ? 'x' : '-');
1635
                }
1636
                if (prot1 != 0)
1637
                    start = end;
1638
                else
1639
                    start = -1;
1640
                prot = prot1;
1641
            }
1642
            if (!p)
1643
                break;
1644
        }
1645
    }
1646
}
1647

    
1648
int page_get_flags(unsigned long address)
1649
{
1650
    PageDesc *p;
1651

    
1652
    p = page_find(address >> TARGET_PAGE_BITS);
1653
    if (!p)
1654
        return 0;
1655
    return p->flags;
1656
}
1657

    
1658
/* modify the flags of a page and invalidate the code if
1659
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1660
   depending on PAGE_WRITE */
1661
void page_set_flags(unsigned long start, unsigned long end, int flags)
1662
{
1663
    PageDesc *p;
1664
    unsigned long addr;
1665

    
1666
    start = start & TARGET_PAGE_MASK;
1667
    end = TARGET_PAGE_ALIGN(end);
1668
    if (flags & PAGE_WRITE)
1669
        flags |= PAGE_WRITE_ORG;
1670
    spin_lock(&tb_lock);
1671
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1672
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1673
        /* if the write protection is set, then we invalidate the code
1674
           inside */
1675
        if (!(p->flags & PAGE_WRITE) && 
1676
            (flags & PAGE_WRITE) &&
1677
            p->first_tb) {
1678
            tb_invalidate_phys_page(addr, 0, NULL);
1679
        }
1680
        p->flags = flags;
1681
    }
1682
    spin_unlock(&tb_lock);
1683
}
1684

    
1685
/* called from signal handler: invalidate the code and unprotect the
1686
   page. Return TRUE if the fault was succesfully handled. */
1687
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1688
{
1689
    unsigned int page_index, prot, pindex;
1690
    PageDesc *p, *p1;
1691
    unsigned long host_start, host_end, addr;
1692

    
1693
    host_start = address & qemu_host_page_mask;
1694
    page_index = host_start >> TARGET_PAGE_BITS;
1695
    p1 = page_find(page_index);
1696
    if (!p1)
1697
        return 0;
1698
    host_end = host_start + qemu_host_page_size;
1699
    p = p1;
1700
    prot = 0;
1701
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1702
        prot |= p->flags;
1703
        p++;
1704
    }
1705
    /* if the page was really writable, then we change its
1706
       protection back to writable */
1707
    if (prot & PAGE_WRITE_ORG) {
1708
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1709
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1710
            mprotect((void *)host_start, qemu_host_page_size, 
1711
                     (prot & PAGE_BITS) | PAGE_WRITE);
1712
            p1[pindex].flags |= PAGE_WRITE;
1713
            /* and since the content will be modified, we must invalidate
1714
               the corresponding translated code. */
1715
            tb_invalidate_phys_page(address, pc, puc);
1716
#ifdef DEBUG_TB_CHECK
1717
            tb_invalidate_check(address);
1718
#endif
1719
            return 1;
1720
        }
1721
    }
1722
    return 0;
1723
}
1724

    
1725
/* call this function when system calls directly modify a memory area */
1726
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1727
{
1728
    unsigned long start, end, addr;
1729

    
1730
    start = (unsigned long)data;
1731
    end = start + data_size;
1732
    start &= TARGET_PAGE_MASK;
1733
    end = TARGET_PAGE_ALIGN(end);
1734
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1735
        page_unprotect(addr, 0, NULL);
1736
    }
1737
}
1738

    
1739
static inline void tlb_set_dirty(CPUState *env,
1740
                                 unsigned long addr, target_ulong vaddr)
1741
{
1742
}
1743
#endif /* defined(CONFIG_USER_ONLY) */
1744

    
1745
/* register physical memory. 'size' must be a multiple of the target
1746
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1747
   io memory page */
1748
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1749
                                  unsigned long size,
1750
                                  unsigned long phys_offset)
1751
{
1752
    target_phys_addr_t addr, end_addr;
1753
    PhysPageDesc *p;
1754

    
1755
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1756
    end_addr = start_addr + size;
1757
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1758
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1759
        p->phys_offset = phys_offset;
1760
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1761
            phys_offset += TARGET_PAGE_SIZE;
1762
    }
1763
}
1764

    
1765
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1766
{
1767
    return 0;
1768
}
1769

    
1770
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1771
{
1772
}
1773

    
1774
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1775
    unassigned_mem_readb,
1776
    unassigned_mem_readb,
1777
    unassigned_mem_readb,
1778
};
1779

    
1780
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1781
    unassigned_mem_writeb,
1782
    unassigned_mem_writeb,
1783
    unassigned_mem_writeb,
1784
};
1785

    
1786
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1787
{
1788
    unsigned long ram_addr;
1789
    int dirty_flags;
1790
    ram_addr = addr - (unsigned long)phys_ram_base;
1791
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1792
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1793
#if !defined(CONFIG_USER_ONLY)
1794
        tb_invalidate_phys_page_fast(ram_addr, 1);
1795
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1796
#endif
1797
    }
1798
    stb_p((uint8_t *)(long)addr, val);
1799
#ifdef USE_KQEMU
1800
    if (cpu_single_env->kqemu_enabled &&
1801
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1802
        kqemu_modify_page(cpu_single_env, ram_addr);
1803
#endif
1804
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1805
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1806
    /* we remove the notdirty callback only if the code has been
1807
       flushed */
1808
    if (dirty_flags == 0xff)
1809
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1810
}
1811

    
1812
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1813
{
1814
    unsigned long ram_addr;
1815
    int dirty_flags;
1816
    ram_addr = addr - (unsigned long)phys_ram_base;
1817
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1818
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1819
#if !defined(CONFIG_USER_ONLY)
1820
        tb_invalidate_phys_page_fast(ram_addr, 2);
1821
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1822
#endif
1823
    }
1824
    stw_p((uint8_t *)(long)addr, val);
1825
#ifdef USE_KQEMU
1826
    if (cpu_single_env->kqemu_enabled &&
1827
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1828
        kqemu_modify_page(cpu_single_env, ram_addr);
1829
#endif
1830
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1831
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1832
    /* we remove the notdirty callback only if the code has been
1833
       flushed */
1834
    if (dirty_flags == 0xff)
1835
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1836
}
1837

    
1838
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1839
{
1840
    unsigned long ram_addr;
1841
    int dirty_flags;
1842
    ram_addr = addr - (unsigned long)phys_ram_base;
1843
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1844
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1845
#if !defined(CONFIG_USER_ONLY)
1846
        tb_invalidate_phys_page_fast(ram_addr, 4);
1847
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1848
#endif
1849
    }
1850
    stl_p((uint8_t *)(long)addr, val);
1851
#ifdef USE_KQEMU
1852
    if (cpu_single_env->kqemu_enabled &&
1853
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1854
        kqemu_modify_page(cpu_single_env, ram_addr);
1855
#endif
1856
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1857
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1858
    /* we remove the notdirty callback only if the code has been
1859
       flushed */
1860
    if (dirty_flags == 0xff)
1861
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1862
}
1863

    
1864
static CPUReadMemoryFunc *error_mem_read[3] = {
1865
    NULL, /* never used */
1866
    NULL, /* never used */
1867
    NULL, /* never used */
1868
};
1869

    
1870
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1871
    notdirty_mem_writeb,
1872
    notdirty_mem_writew,
1873
    notdirty_mem_writel,
1874
};
1875

    
1876
static void io_mem_init(void)
1877
{
1878
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1879
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1880
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1881
    io_mem_nb = 5;
1882

    
1883
    /* alloc dirty bits array */
1884
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1885
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1886
}
1887

    
1888
/* mem_read and mem_write are arrays of functions containing the
1889
   function to access byte (index 0), word (index 1) and dword (index
1890
   2). All functions must be supplied. If io_index is non zero, the
1891
   corresponding io zone is modified. If it is zero, a new io zone is
1892
   allocated. The return value can be used with
1893
   cpu_register_physical_memory(). (-1) is returned if error. */
1894
int cpu_register_io_memory(int io_index,
1895
                           CPUReadMemoryFunc **mem_read,
1896
                           CPUWriteMemoryFunc **mem_write,
1897
                           void *opaque)
1898
{
1899
    int i;
1900

    
1901
    if (io_index <= 0) {
1902
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1903
            return -1;
1904
        io_index = io_mem_nb++;
1905
    } else {
1906
        if (io_index >= IO_MEM_NB_ENTRIES)
1907
            return -1;
1908
    }
1909

    
1910
    for(i = 0;i < 3; i++) {
1911
        io_mem_read[io_index][i] = mem_read[i];
1912
        io_mem_write[io_index][i] = mem_write[i];
1913
    }
1914
    io_mem_opaque[io_index] = opaque;
1915
    return io_index << IO_MEM_SHIFT;
1916
}
1917

    
1918
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1919
{
1920
    return io_mem_write[io_index >> IO_MEM_SHIFT];
1921
}
1922

    
1923
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1924
{
1925
    return io_mem_read[io_index >> IO_MEM_SHIFT];
1926
}
1927

    
1928
/* physical memory access (slow version, mainly for debug) */
1929
#if defined(CONFIG_USER_ONLY)
1930
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1931
                            int len, int is_write)
1932
{
1933
    int l, flags;
1934
    target_ulong page;
1935

    
1936
    while (len > 0) {
1937
        page = addr & TARGET_PAGE_MASK;
1938
        l = (page + TARGET_PAGE_SIZE) - addr;
1939
        if (l > len)
1940
            l = len;
1941
        flags = page_get_flags(page);
1942
        if (!(flags & PAGE_VALID))
1943
            return;
1944
        if (is_write) {
1945
            if (!(flags & PAGE_WRITE))
1946
                return;
1947
            memcpy((uint8_t *)addr, buf, len);
1948
        } else {
1949
            if (!(flags & PAGE_READ))
1950
                return;
1951
            memcpy(buf, (uint8_t *)addr, len);
1952
        }
1953
        len -= l;
1954
        buf += l;
1955
        addr += l;
1956
    }
1957
}
1958

    
1959
#else
1960
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1961
                            int len, int is_write)
1962
{
1963
    int l, io_index;
1964
    uint8_t *ptr;
1965
    uint32_t val;
1966
    target_phys_addr_t page;
1967
    unsigned long pd;
1968
    PhysPageDesc *p;
1969
    
1970
    while (len > 0) {
1971
        page = addr & TARGET_PAGE_MASK;
1972
        l = (page + TARGET_PAGE_SIZE) - addr;
1973
        if (l > len)
1974
            l = len;
1975
        p = phys_page_find(page >> TARGET_PAGE_BITS);
1976
        if (!p) {
1977
            pd = IO_MEM_UNASSIGNED;
1978
        } else {
1979
            pd = p->phys_offset;
1980
        }
1981
        
1982
        if (is_write) {
1983
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1984
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1985
                /* XXX: could force cpu_single_env to NULL to avoid
1986
                   potential bugs */
1987
                if (l >= 4 && ((addr & 3) == 0)) {
1988
                    /* 32 bit write access */
1989
                    val = ldl_p(buf);
1990
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
1991
                    l = 4;
1992
                } else if (l >= 2 && ((addr & 1) == 0)) {
1993
                    /* 16 bit write access */
1994
                    val = lduw_p(buf);
1995
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
1996
                    l = 2;
1997
                } else {
1998
                    /* 8 bit write access */
1999
                    val = ldub_p(buf);
2000
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2001
                    l = 1;
2002
                }
2003
            } else {
2004
                unsigned long addr1;
2005
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2006
                /* RAM case */
2007
                ptr = phys_ram_base + addr1;
2008
                memcpy(ptr, buf, l);
2009
                if (!cpu_physical_memory_is_dirty(addr1)) {
2010
                    /* invalidate code */
2011
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2012
                    /* set dirty bit */
2013
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2014
                        (0xff & ~CODE_DIRTY_FLAG);
2015
                }
2016
            }
2017
        } else {
2018
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2019
                /* I/O case */
2020
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2021
                if (l >= 4 && ((addr & 3) == 0)) {
2022
                    /* 32 bit read access */
2023
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2024
                    stl_p(buf, val);
2025
                    l = 4;
2026
                } else if (l >= 2 && ((addr & 1) == 0)) {
2027
                    /* 16 bit read access */
2028
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2029
                    stw_p(buf, val);
2030
                    l = 2;
2031
                } else {
2032
                    /* 8 bit read access */
2033
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2034
                    stb_p(buf, val);
2035
                    l = 1;
2036
                }
2037
            } else {
2038
                /* RAM case */
2039
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2040
                    (addr & ~TARGET_PAGE_MASK);
2041
                memcpy(buf, ptr, l);
2042
            }
2043
        }
2044
        len -= l;
2045
        buf += l;
2046
        addr += l;
2047
    }
2048
}
2049

    
2050
/* warning: addr must be aligned */
2051
uint32_t ldl_phys(target_phys_addr_t addr)
2052
{
2053
    int io_index;
2054
    uint8_t *ptr;
2055
    uint32_t val;
2056
    unsigned long pd;
2057
    PhysPageDesc *p;
2058

    
2059
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2060
    if (!p) {
2061
        pd = IO_MEM_UNASSIGNED;
2062
    } else {
2063
        pd = p->phys_offset;
2064
    }
2065
        
2066
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2067
        /* I/O case */
2068
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2069
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2070
    } else {
2071
        /* RAM case */
2072
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2073
            (addr & ~TARGET_PAGE_MASK);
2074
        val = ldl_p(ptr);
2075
    }
2076
    return val;
2077
}
2078

    
2079
/* warning: addr must be aligned */
2080
uint64_t ldq_phys(target_phys_addr_t addr)
2081
{
2082
    int io_index;
2083
    uint8_t *ptr;
2084
    uint64_t val;
2085
    unsigned long pd;
2086
    PhysPageDesc *p;
2087

    
2088
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2089
    if (!p) {
2090
        pd = IO_MEM_UNASSIGNED;
2091
    } else {
2092
        pd = p->phys_offset;
2093
    }
2094
        
2095
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2096
        /* I/O case */
2097
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2098
#ifdef TARGET_WORDS_BIGENDIAN
2099
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2100
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2101
#else
2102
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2103
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2104
#endif
2105
    } else {
2106
        /* RAM case */
2107
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2108
            (addr & ~TARGET_PAGE_MASK);
2109
        val = ldq_p(ptr);
2110
    }
2111
    return val;
2112
}
2113

    
2114
/* XXX: optimize */
2115
uint32_t ldub_phys(target_phys_addr_t addr)
2116
{
2117
    uint8_t val;
2118
    cpu_physical_memory_read(addr, &val, 1);
2119
    return val;
2120
}
2121

    
2122
/* XXX: optimize */
2123
uint32_t lduw_phys(target_phys_addr_t addr)
2124
{
2125
    uint16_t val;
2126
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2127
    return tswap16(val);
2128
}
2129

    
2130
/* warning: addr must be aligned. The ram page is not masked as dirty
2131
   and the code inside is not invalidated. It is useful if the dirty
2132
   bits are used to track modified PTEs */
2133
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2134
{
2135
    int io_index;
2136
    uint8_t *ptr;
2137
    unsigned long pd;
2138
    PhysPageDesc *p;
2139

    
2140
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2141
    if (!p) {
2142
        pd = IO_MEM_UNASSIGNED;
2143
    } else {
2144
        pd = p->phys_offset;
2145
    }
2146
        
2147
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2148
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2149
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2150
    } else {
2151
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2152
            (addr & ~TARGET_PAGE_MASK);
2153
        stl_p(ptr, val);
2154
    }
2155
}
2156

    
2157
/* warning: addr must be aligned */
2158
void stl_phys(target_phys_addr_t addr, uint32_t val)
2159
{
2160
    int io_index;
2161
    uint8_t *ptr;
2162
    unsigned long pd;
2163
    PhysPageDesc *p;
2164

    
2165
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2166
    if (!p) {
2167
        pd = IO_MEM_UNASSIGNED;
2168
    } else {
2169
        pd = p->phys_offset;
2170
    }
2171
        
2172
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2173
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2174
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2175
    } else {
2176
        unsigned long addr1;
2177
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2178
        /* RAM case */
2179
        ptr = phys_ram_base + addr1;
2180
        stl_p(ptr, val);
2181
        if (!cpu_physical_memory_is_dirty(addr1)) {
2182
            /* invalidate code */
2183
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2184
            /* set dirty bit */
2185
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2186
                (0xff & ~CODE_DIRTY_FLAG);
2187
        }
2188
    }
2189
}
2190

    
2191
/* XXX: optimize */
2192
void stb_phys(target_phys_addr_t addr, uint32_t val)
2193
{
2194
    uint8_t v = val;
2195
    cpu_physical_memory_write(addr, &v, 1);
2196
}
2197

    
2198
/* XXX: optimize */
2199
void stw_phys(target_phys_addr_t addr, uint32_t val)
2200
{
2201
    uint16_t v = tswap16(val);
2202
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2203
}
2204

    
2205
/* XXX: optimize */
2206
void stq_phys(target_phys_addr_t addr, uint64_t val)
2207
{
2208
    val = tswap64(val);
2209
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2210
}
2211

    
2212
#endif
2213

    
2214
/* virtual memory access for debug */
2215
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2216
                        uint8_t *buf, int len, int is_write)
2217
{
2218
    int l;
2219
    target_ulong page, phys_addr;
2220

    
2221
    while (len > 0) {
2222
        page = addr & TARGET_PAGE_MASK;
2223
        phys_addr = cpu_get_phys_page_debug(env, page);
2224
        /* if no physical page mapped, return an error */
2225
        if (phys_addr == -1)
2226
            return -1;
2227
        l = (page + TARGET_PAGE_SIZE) - addr;
2228
        if (l > len)
2229
            l = len;
2230
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2231
                               buf, l, is_write);
2232
        len -= l;
2233
        buf += l;
2234
        addr += l;
2235
    }
2236
    return 0;
2237
}
2238

    
2239
void dump_exec_info(FILE *f,
2240
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2241
{
2242
    int i, target_code_size, max_target_code_size;
2243
    int direct_jmp_count, direct_jmp2_count, cross_page;
2244
    TranslationBlock *tb;
2245
    
2246
    target_code_size = 0;
2247
    max_target_code_size = 0;
2248
    cross_page = 0;
2249
    direct_jmp_count = 0;
2250
    direct_jmp2_count = 0;
2251
    for(i = 0; i < nb_tbs; i++) {
2252
        tb = &tbs[i];
2253
        target_code_size += tb->size;
2254
        if (tb->size > max_target_code_size)
2255
            max_target_code_size = tb->size;
2256
        if (tb->page_addr[1] != -1)
2257
            cross_page++;
2258
        if (tb->tb_next_offset[0] != 0xffff) {
2259
            direct_jmp_count++;
2260
            if (tb->tb_next_offset[1] != 0xffff) {
2261
                direct_jmp2_count++;
2262
            }
2263
        }
2264
    }
2265
    /* XXX: avoid using doubles ? */
2266
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2267
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2268
                nb_tbs ? target_code_size / nb_tbs : 0,
2269
                max_target_code_size);
2270
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2271
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2272
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2273
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2274
            cross_page, 
2275
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2276
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2277
                direct_jmp_count, 
2278
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2279
                direct_jmp2_count,
2280
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2281
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2282
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2283
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2284
}
2285

    
2286
#if !defined(CONFIG_USER_ONLY) 
2287

    
2288
#define MMUSUFFIX _cmmu
2289
#define GETPC() NULL
2290
#define env cpu_single_env
2291
#define SOFTMMU_CODE_ACCESS
2292

    
2293
#define SHIFT 0
2294
#include "softmmu_template.h"
2295

    
2296
#define SHIFT 1
2297
#include "softmmu_template.h"
2298

    
2299
#define SHIFT 2
2300
#include "softmmu_template.h"
2301

    
2302
#define SHIFT 3
2303
#include "softmmu_template.h"
2304

    
2305
#undef env
2306

    
2307
#endif