Statistics
| Branch: | Revision:

root / exec.c @ 8f40c388

History | View | Annotate | Download (70.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44

    
45
/* make various TB consistency checks */
46
//#define DEBUG_TB_CHECK 
47
//#define DEBUG_TLB_CHECK 
48

    
49
#if !defined(CONFIG_USER_ONLY)
50
/* TB consistency checks only implemented for usermode emulation.  */
51
#undef DEBUG_TB_CHECK
52
#endif
53

    
54
/* threshold to flush the translated code buffer */
55
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
56

    
57
#define SMC_BITMAP_USE_THRESHOLD 10
58

    
59
#define MMAP_AREA_START        0x00000000
60
#define MMAP_AREA_END          0xa8000000
61

    
62
#if defined(TARGET_SPARC64)
63
#define TARGET_PHYS_ADDR_SPACE_BITS 41
64
#elif defined(TARGET_PPC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 42
66
#else
67
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
68
#define TARGET_PHYS_ADDR_SPACE_BITS 32
69
#endif
70

    
71
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
72
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
73
int nb_tbs;
74
/* any access to the tbs or the page table must use this lock */
75
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
76

    
77
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
78
uint8_t *code_gen_ptr;
79

    
80
int phys_ram_size;
81
int phys_ram_fd;
82
uint8_t *phys_ram_base;
83
uint8_t *phys_ram_dirty;
84

    
85
CPUState *first_cpu;
86
/* current CPU in the current thread. It is only valid inside
87
   cpu_exec() */
88
CPUState *cpu_single_env; 
89

    
90
typedef struct PageDesc {
91
    /* list of TBs intersecting this ram page */
92
    TranslationBlock *first_tb;
93
    /* in order to optimize self modifying code, we count the number
94
       of lookups we do to a given page to use a bitmap */
95
    unsigned int code_write_count;
96
    uint8_t *code_bitmap;
97
#if defined(CONFIG_USER_ONLY)
98
    unsigned long flags;
99
#endif
100
} PageDesc;
101

    
102
typedef struct PhysPageDesc {
103
    /* offset in host memory of the page + io_index in the low 12 bits */
104
    uint32_t phys_offset;
105
} PhysPageDesc;
106

    
107
#define L2_BITS 10
108
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
109

    
110
#define L1_SIZE (1 << L1_BITS)
111
#define L2_SIZE (1 << L2_BITS)
112

    
113
static void io_mem_init(void);
114

    
115
unsigned long qemu_real_host_page_size;
116
unsigned long qemu_host_page_bits;
117
unsigned long qemu_host_page_size;
118
unsigned long qemu_host_page_mask;
119

    
120
/* XXX: for system emulation, it could just be an array */
121
static PageDesc *l1_map[L1_SIZE];
122
PhysPageDesc **l1_phys_map;
123

    
124
/* io memory support */
125
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
126
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
127
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
128
static int io_mem_nb;
129

    
130
/* log support */
131
char *logfilename = "/tmp/qemu.log";
132
FILE *logfile;
133
int loglevel;
134

    
135
/* statistics */
136
static int tlb_flush_count;
137
static int tb_flush_count;
138
static int tb_phys_invalidate_count;
139

    
140
static void page_init(void)
141
{
142
    /* NOTE: we can always suppose that qemu_host_page_size >=
143
       TARGET_PAGE_SIZE */
144
#ifdef _WIN32
145
    {
146
        SYSTEM_INFO system_info;
147
        DWORD old_protect;
148
        
149
        GetSystemInfo(&system_info);
150
        qemu_real_host_page_size = system_info.dwPageSize;
151
        
152
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
153
                       PAGE_EXECUTE_READWRITE, &old_protect);
154
    }
155
#else
156
    qemu_real_host_page_size = getpagesize();
157
    {
158
        unsigned long start, end;
159

    
160
        start = (unsigned long)code_gen_buffer;
161
        start &= ~(qemu_real_host_page_size - 1);
162
        
163
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
164
        end += qemu_real_host_page_size - 1;
165
        end &= ~(qemu_real_host_page_size - 1);
166
        
167
        mprotect((void *)start, end - start, 
168
                 PROT_READ | PROT_WRITE | PROT_EXEC);
169
    }
170
#endif
171

    
172
    if (qemu_host_page_size == 0)
173
        qemu_host_page_size = qemu_real_host_page_size;
174
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
175
        qemu_host_page_size = TARGET_PAGE_SIZE;
176
    qemu_host_page_bits = 0;
177
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
178
        qemu_host_page_bits++;
179
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
180
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
181
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
182
}
183

    
184
static inline PageDesc *page_find_alloc(unsigned int index)
185
{
186
    PageDesc **lp, *p;
187

    
188
    lp = &l1_map[index >> L2_BITS];
189
    p = *lp;
190
    if (!p) {
191
        /* allocate if not found */
192
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
193
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
194
        *lp = p;
195
    }
196
    return p + (index & (L2_SIZE - 1));
197
}
198

    
199
static inline PageDesc *page_find(unsigned int index)
200
{
201
    PageDesc *p;
202

    
203
    p = l1_map[index >> L2_BITS];
204
    if (!p)
205
        return 0;
206
    return p + (index & (L2_SIZE - 1));
207
}
208

    
209
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
210
{
211
    void **lp, **p;
212
    PhysPageDesc *pd;
213

    
214
    p = (void **)l1_phys_map;
215
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
216

    
217
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
218
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
219
#endif
220
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
221
    p = *lp;
222
    if (!p) {
223
        /* allocate if not found */
224
        if (!alloc)
225
            return NULL;
226
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
227
        memset(p, 0, sizeof(void *) * L1_SIZE);
228
        *lp = p;
229
    }
230
#endif
231
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
232
    pd = *lp;
233
    if (!pd) {
234
        int i;
235
        /* allocate if not found */
236
        if (!alloc)
237
            return NULL;
238
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
239
        *lp = pd;
240
        for (i = 0; i < L2_SIZE; i++)
241
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
242
    }
243
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
244
}
245

    
246
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
247
{
248
    return phys_page_find_alloc(index, 0);
249
}
250

    
251
#if !defined(CONFIG_USER_ONLY)
252
static void tlb_protect_code(ram_addr_t ram_addr);
253
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
254
                                    target_ulong vaddr);
255
#endif
256

    
257
void cpu_exec_init(CPUState *env)
258
{
259
    CPUState **penv;
260
    int cpu_index;
261

    
262
    if (!code_gen_ptr) {
263
        code_gen_ptr = code_gen_buffer;
264
        page_init();
265
        io_mem_init();
266
    }
267
    env->next_cpu = NULL;
268
    penv = &first_cpu;
269
    cpu_index = 0;
270
    while (*penv != NULL) {
271
        penv = (CPUState **)&(*penv)->next_cpu;
272
        cpu_index++;
273
    }
274
    env->cpu_index = cpu_index;
275
    *penv = env;
276
}
277

    
278
static inline void invalidate_page_bitmap(PageDesc *p)
279
{
280
    if (p->code_bitmap) {
281
        qemu_free(p->code_bitmap);
282
        p->code_bitmap = NULL;
283
    }
284
    p->code_write_count = 0;
285
}
286

    
287
/* set to NULL all the 'first_tb' fields in all PageDescs */
288
static void page_flush_tb(void)
289
{
290
    int i, j;
291
    PageDesc *p;
292

    
293
    for(i = 0; i < L1_SIZE; i++) {
294
        p = l1_map[i];
295
        if (p) {
296
            for(j = 0; j < L2_SIZE; j++) {
297
                p->first_tb = NULL;
298
                invalidate_page_bitmap(p);
299
                p++;
300
            }
301
        }
302
    }
303
}
304

    
305
/* flush all the translation blocks */
306
/* XXX: tb_flush is currently not thread safe */
307
void tb_flush(CPUState *env1)
308
{
309
    CPUState *env;
310
#if defined(DEBUG_FLUSH)
311
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
312
           code_gen_ptr - code_gen_buffer, 
313
           nb_tbs, 
314
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
315
#endif
316
    nb_tbs = 0;
317
    
318
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
319
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
320
    }
321

    
322
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
323
    page_flush_tb();
324

    
325
    code_gen_ptr = code_gen_buffer;
326
    /* XXX: flush processor icache at this point if cache flush is
327
       expensive */
328
    tb_flush_count++;
329
}
330

    
331
#ifdef DEBUG_TB_CHECK
332

    
333
static void tb_invalidate_check(unsigned long address)
334
{
335
    TranslationBlock *tb;
336
    int i;
337
    address &= TARGET_PAGE_MASK;
338
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
339
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
340
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
341
                  address >= tb->pc + tb->size)) {
342
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
343
                       address, (long)tb->pc, tb->size);
344
            }
345
        }
346
    }
347
}
348

    
349
/* verify that all the pages have correct rights for code */
350
static void tb_page_check(void)
351
{
352
    TranslationBlock *tb;
353
    int i, flags1, flags2;
354
    
355
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
356
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
357
            flags1 = page_get_flags(tb->pc);
358
            flags2 = page_get_flags(tb->pc + tb->size - 1);
359
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
360
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
361
                       (long)tb->pc, tb->size, flags1, flags2);
362
            }
363
        }
364
    }
365
}
366

    
367
void tb_jmp_check(TranslationBlock *tb)
368
{
369
    TranslationBlock *tb1;
370
    unsigned int n1;
371

    
372
    /* suppress any remaining jumps to this TB */
373
    tb1 = tb->jmp_first;
374
    for(;;) {
375
        n1 = (long)tb1 & 3;
376
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
377
        if (n1 == 2)
378
            break;
379
        tb1 = tb1->jmp_next[n1];
380
    }
381
    /* check end of list */
382
    if (tb1 != tb) {
383
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
384
    }
385
}
386

    
387
#endif
388

    
389
/* invalidate one TB */
390
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
391
                             int next_offset)
392
{
393
    TranslationBlock *tb1;
394
    for(;;) {
395
        tb1 = *ptb;
396
        if (tb1 == tb) {
397
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
398
            break;
399
        }
400
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
401
    }
402
}
403

    
404
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
405
{
406
    TranslationBlock *tb1;
407
    unsigned int n1;
408

    
409
    for(;;) {
410
        tb1 = *ptb;
411
        n1 = (long)tb1 & 3;
412
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
413
        if (tb1 == tb) {
414
            *ptb = tb1->page_next[n1];
415
            break;
416
        }
417
        ptb = &tb1->page_next[n1];
418
    }
419
}
420

    
421
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
422
{
423
    TranslationBlock *tb1, **ptb;
424
    unsigned int n1;
425

    
426
    ptb = &tb->jmp_next[n];
427
    tb1 = *ptb;
428
    if (tb1) {
429
        /* find tb(n) in circular list */
430
        for(;;) {
431
            tb1 = *ptb;
432
            n1 = (long)tb1 & 3;
433
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
434
            if (n1 == n && tb1 == tb)
435
                break;
436
            if (n1 == 2) {
437
                ptb = &tb1->jmp_first;
438
            } else {
439
                ptb = &tb1->jmp_next[n1];
440
            }
441
        }
442
        /* now we can suppress tb(n) from the list */
443
        *ptb = tb->jmp_next[n];
444

    
445
        tb->jmp_next[n] = NULL;
446
    }
447
}
448

    
449
/* reset the jump entry 'n' of a TB so that it is not chained to
450
   another TB */
451
static inline void tb_reset_jump(TranslationBlock *tb, int n)
452
{
453
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
454
}
455

    
456
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
457
{
458
    CPUState *env;
459
    PageDesc *p;
460
    unsigned int h, n1;
461
    target_ulong phys_pc;
462
    TranslationBlock *tb1, *tb2;
463
    
464
    /* remove the TB from the hash list */
465
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
466
    h = tb_phys_hash_func(phys_pc);
467
    tb_remove(&tb_phys_hash[h], tb, 
468
              offsetof(TranslationBlock, phys_hash_next));
469

    
470
    /* remove the TB from the page list */
471
    if (tb->page_addr[0] != page_addr) {
472
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
473
        tb_page_remove(&p->first_tb, tb);
474
        invalidate_page_bitmap(p);
475
    }
476
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
477
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
478
        tb_page_remove(&p->first_tb, tb);
479
        invalidate_page_bitmap(p);
480
    }
481

    
482
    tb_invalidated_flag = 1;
483

    
484
    /* remove the TB from the hash list */
485
    h = tb_jmp_cache_hash_func(tb->pc);
486
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
487
        if (env->tb_jmp_cache[h] == tb)
488
            env->tb_jmp_cache[h] = NULL;
489
    }
490

    
491
    /* suppress this TB from the two jump lists */
492
    tb_jmp_remove(tb, 0);
493
    tb_jmp_remove(tb, 1);
494

    
495
    /* suppress any remaining jumps to this TB */
496
    tb1 = tb->jmp_first;
497
    for(;;) {
498
        n1 = (long)tb1 & 3;
499
        if (n1 == 2)
500
            break;
501
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
502
        tb2 = tb1->jmp_next[n1];
503
        tb_reset_jump(tb1, n1);
504
        tb1->jmp_next[n1] = NULL;
505
        tb1 = tb2;
506
    }
507
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
508

    
509
    tb_phys_invalidate_count++;
510
}
511

    
512
static inline void set_bits(uint8_t *tab, int start, int len)
513
{
514
    int end, mask, end1;
515

    
516
    end = start + len;
517
    tab += start >> 3;
518
    mask = 0xff << (start & 7);
519
    if ((start & ~7) == (end & ~7)) {
520
        if (start < end) {
521
            mask &= ~(0xff << (end & 7));
522
            *tab |= mask;
523
        }
524
    } else {
525
        *tab++ |= mask;
526
        start = (start + 8) & ~7;
527
        end1 = end & ~7;
528
        while (start < end1) {
529
            *tab++ = 0xff;
530
            start += 8;
531
        }
532
        if (start < end) {
533
            mask = ~(0xff << (end & 7));
534
            *tab |= mask;
535
        }
536
    }
537
}
538

    
539
static void build_page_bitmap(PageDesc *p)
540
{
541
    int n, tb_start, tb_end;
542
    TranslationBlock *tb;
543
    
544
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
545
    if (!p->code_bitmap)
546
        return;
547
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
548

    
549
    tb = p->first_tb;
550
    while (tb != NULL) {
551
        n = (long)tb & 3;
552
        tb = (TranslationBlock *)((long)tb & ~3);
553
        /* NOTE: this is subtle as a TB may span two physical pages */
554
        if (n == 0) {
555
            /* NOTE: tb_end may be after the end of the page, but
556
               it is not a problem */
557
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
558
            tb_end = tb_start + tb->size;
559
            if (tb_end > TARGET_PAGE_SIZE)
560
                tb_end = TARGET_PAGE_SIZE;
561
        } else {
562
            tb_start = 0;
563
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
564
        }
565
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
566
        tb = tb->page_next[n];
567
    }
568
}
569

    
570
#ifdef TARGET_HAS_PRECISE_SMC
571

    
572
static void tb_gen_code(CPUState *env, 
573
                        target_ulong pc, target_ulong cs_base, int flags,
574
                        int cflags)
575
{
576
    TranslationBlock *tb;
577
    uint8_t *tc_ptr;
578
    target_ulong phys_pc, phys_page2, virt_page2;
579
    int code_gen_size;
580

    
581
    phys_pc = get_phys_addr_code(env, pc);
582
    tb = tb_alloc(pc);
583
    if (!tb) {
584
        /* flush must be done */
585
        tb_flush(env);
586
        /* cannot fail at this point */
587
        tb = tb_alloc(pc);
588
    }
589
    tc_ptr = code_gen_ptr;
590
    tb->tc_ptr = tc_ptr;
591
    tb->cs_base = cs_base;
592
    tb->flags = flags;
593
    tb->cflags = cflags;
594
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
595
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
596
    
597
    /* check next page if needed */
598
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
599
    phys_page2 = -1;
600
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
601
        phys_page2 = get_phys_addr_code(env, virt_page2);
602
    }
603
    tb_link_phys(tb, phys_pc, phys_page2);
604
}
605
#endif
606
    
607
/* invalidate all TBs which intersect with the target physical page
608
   starting in range [start;end[. NOTE: start and end must refer to
609
   the same physical page. 'is_cpu_write_access' should be true if called
610
   from a real cpu write access: the virtual CPU will exit the current
611
   TB if code is modified inside this TB. */
612
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
613
                                   int is_cpu_write_access)
614
{
615
    int n, current_tb_modified, current_tb_not_found, current_flags;
616
    CPUState *env = cpu_single_env;
617
    PageDesc *p;
618
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
619
    target_ulong tb_start, tb_end;
620
    target_ulong current_pc, current_cs_base;
621

    
622
    p = page_find(start >> TARGET_PAGE_BITS);
623
    if (!p) 
624
        return;
625
    if (!p->code_bitmap && 
626
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
627
        is_cpu_write_access) {
628
        /* build code bitmap */
629
        build_page_bitmap(p);
630
    }
631

    
632
    /* we remove all the TBs in the range [start, end[ */
633
    /* XXX: see if in some cases it could be faster to invalidate all the code */
634
    current_tb_not_found = is_cpu_write_access;
635
    current_tb_modified = 0;
636
    current_tb = NULL; /* avoid warning */
637
    current_pc = 0; /* avoid warning */
638
    current_cs_base = 0; /* avoid warning */
639
    current_flags = 0; /* avoid warning */
640
    tb = p->first_tb;
641
    while (tb != NULL) {
642
        n = (long)tb & 3;
643
        tb = (TranslationBlock *)((long)tb & ~3);
644
        tb_next = tb->page_next[n];
645
        /* NOTE: this is subtle as a TB may span two physical pages */
646
        if (n == 0) {
647
            /* NOTE: tb_end may be after the end of the page, but
648
               it is not a problem */
649
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
650
            tb_end = tb_start + tb->size;
651
        } else {
652
            tb_start = tb->page_addr[1];
653
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
654
        }
655
        if (!(tb_end <= start || tb_start >= end)) {
656
#ifdef TARGET_HAS_PRECISE_SMC
657
            if (current_tb_not_found) {
658
                current_tb_not_found = 0;
659
                current_tb = NULL;
660
                if (env->mem_write_pc) {
661
                    /* now we have a real cpu fault */
662
                    current_tb = tb_find_pc(env->mem_write_pc);
663
                }
664
            }
665
            if (current_tb == tb &&
666
                !(current_tb->cflags & CF_SINGLE_INSN)) {
667
                /* If we are modifying the current TB, we must stop
668
                its execution. We could be more precise by checking
669
                that the modification is after the current PC, but it
670
                would require a specialized function to partially
671
                restore the CPU state */
672
                
673
                current_tb_modified = 1;
674
                cpu_restore_state(current_tb, env, 
675
                                  env->mem_write_pc, NULL);
676
#if defined(TARGET_I386)
677
                current_flags = env->hflags;
678
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
679
                current_cs_base = (target_ulong)env->segs[R_CS].base;
680
                current_pc = current_cs_base + env->eip;
681
#else
682
#error unsupported CPU
683
#endif
684
            }
685
#endif /* TARGET_HAS_PRECISE_SMC */
686
            /* we need to do that to handle the case where a signal
687
               occurs while doing tb_phys_invalidate() */
688
            saved_tb = NULL;
689
            if (env) {
690
                saved_tb = env->current_tb;
691
                env->current_tb = NULL;
692
            }
693
            tb_phys_invalidate(tb, -1);
694
            if (env) {
695
                env->current_tb = saved_tb;
696
                if (env->interrupt_request && env->current_tb)
697
                    cpu_interrupt(env, env->interrupt_request);
698
            }
699
        }
700
        tb = tb_next;
701
    }
702
#if !defined(CONFIG_USER_ONLY)
703
    /* if no code remaining, no need to continue to use slow writes */
704
    if (!p->first_tb) {
705
        invalidate_page_bitmap(p);
706
        if (is_cpu_write_access) {
707
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
708
        }
709
    }
710
#endif
711
#ifdef TARGET_HAS_PRECISE_SMC
712
    if (current_tb_modified) {
713
        /* we generate a block containing just the instruction
714
           modifying the memory. It will ensure that it cannot modify
715
           itself */
716
        env->current_tb = NULL;
717
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
718
                    CF_SINGLE_INSN);
719
        cpu_resume_from_signal(env, NULL);
720
    }
721
#endif
722
}
723

    
724
/* len must be <= 8 and start must be a multiple of len */
725
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
726
{
727
    PageDesc *p;
728
    int offset, b;
729
#if 0
730
    if (1) {
731
        if (loglevel) {
732
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
733
                   cpu_single_env->mem_write_vaddr, len, 
734
                   cpu_single_env->eip, 
735
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
736
        }
737
    }
738
#endif
739
    p = page_find(start >> TARGET_PAGE_BITS);
740
    if (!p) 
741
        return;
742
    if (p->code_bitmap) {
743
        offset = start & ~TARGET_PAGE_MASK;
744
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
745
        if (b & ((1 << len) - 1))
746
            goto do_invalidate;
747
    } else {
748
    do_invalidate:
749
        tb_invalidate_phys_page_range(start, start + len, 1);
750
    }
751
}
752

    
753
#if !defined(CONFIG_SOFTMMU)
754
static void tb_invalidate_phys_page(target_ulong addr, 
755
                                    unsigned long pc, void *puc)
756
{
757
    int n, current_flags, current_tb_modified;
758
    target_ulong current_pc, current_cs_base;
759
    PageDesc *p;
760
    TranslationBlock *tb, *current_tb;
761
#ifdef TARGET_HAS_PRECISE_SMC
762
    CPUState *env = cpu_single_env;
763
#endif
764

    
765
    addr &= TARGET_PAGE_MASK;
766
    p = page_find(addr >> TARGET_PAGE_BITS);
767
    if (!p) 
768
        return;
769
    tb = p->first_tb;
770
    current_tb_modified = 0;
771
    current_tb = NULL;
772
    current_pc = 0; /* avoid warning */
773
    current_cs_base = 0; /* avoid warning */
774
    current_flags = 0; /* avoid warning */
775
#ifdef TARGET_HAS_PRECISE_SMC
776
    if (tb && pc != 0) {
777
        current_tb = tb_find_pc(pc);
778
    }
779
#endif
780
    while (tb != NULL) {
781
        n = (long)tb & 3;
782
        tb = (TranslationBlock *)((long)tb & ~3);
783
#ifdef TARGET_HAS_PRECISE_SMC
784
        if (current_tb == tb &&
785
            !(current_tb->cflags & CF_SINGLE_INSN)) {
786
                /* If we are modifying the current TB, we must stop
787
                   its execution. We could be more precise by checking
788
                   that the modification is after the current PC, but it
789
                   would require a specialized function to partially
790
                   restore the CPU state */
791
            
792
            current_tb_modified = 1;
793
            cpu_restore_state(current_tb, env, pc, puc);
794
#if defined(TARGET_I386)
795
            current_flags = env->hflags;
796
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
797
            current_cs_base = (target_ulong)env->segs[R_CS].base;
798
            current_pc = current_cs_base + env->eip;
799
#else
800
#error unsupported CPU
801
#endif
802
        }
803
#endif /* TARGET_HAS_PRECISE_SMC */
804
        tb_phys_invalidate(tb, addr);
805
        tb = tb->page_next[n];
806
    }
807
    p->first_tb = NULL;
808
#ifdef TARGET_HAS_PRECISE_SMC
809
    if (current_tb_modified) {
810
        /* we generate a block containing just the instruction
811
           modifying the memory. It will ensure that it cannot modify
812
           itself */
813
        env->current_tb = NULL;
814
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
815
                    CF_SINGLE_INSN);
816
        cpu_resume_from_signal(env, puc);
817
    }
818
#endif
819
}
820
#endif
821

    
822
/* add the tb in the target page and protect it if necessary */
823
static inline void tb_alloc_page(TranslationBlock *tb, 
824
                                 unsigned int n, target_ulong page_addr)
825
{
826
    PageDesc *p;
827
    TranslationBlock *last_first_tb;
828

    
829
    tb->page_addr[n] = page_addr;
830
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
831
    tb->page_next[n] = p->first_tb;
832
    last_first_tb = p->first_tb;
833
    p->first_tb = (TranslationBlock *)((long)tb | n);
834
    invalidate_page_bitmap(p);
835

    
836
#if defined(TARGET_HAS_SMC) || 1
837

    
838
#if defined(CONFIG_USER_ONLY)
839
    if (p->flags & PAGE_WRITE) {
840
        target_ulong addr;
841
        PageDesc *p2;
842
        int prot;
843

    
844
        /* force the host page as non writable (writes will have a
845
           page fault + mprotect overhead) */
846
        page_addr &= qemu_host_page_mask;
847
        prot = 0;
848
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
849
            addr += TARGET_PAGE_SIZE) {
850

    
851
            p2 = page_find (addr >> TARGET_PAGE_BITS);
852
            if (!p2)
853
                continue;
854
            prot |= p2->flags;
855
            p2->flags &= ~PAGE_WRITE;
856
            page_get_flags(addr);
857
          }
858
        mprotect(g2h(page_addr), qemu_host_page_size, 
859
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
860
#ifdef DEBUG_TB_INVALIDATE
861
        printf("protecting code page: 0x%08lx\n", 
862
               page_addr);
863
#endif
864
    }
865
#else
866
    /* if some code is already present, then the pages are already
867
       protected. So we handle the case where only the first TB is
868
       allocated in a physical page */
869
    if (!last_first_tb) {
870
        tlb_protect_code(page_addr);
871
    }
872
#endif
873

    
874
#endif /* TARGET_HAS_SMC */
875
}
876

    
877
/* Allocate a new translation block. Flush the translation buffer if
878
   too many translation blocks or too much generated code. */
879
TranslationBlock *tb_alloc(target_ulong pc)
880
{
881
    TranslationBlock *tb;
882

    
883
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
884
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
885
        return NULL;
886
    tb = &tbs[nb_tbs++];
887
    tb->pc = pc;
888
    tb->cflags = 0;
889
    return tb;
890
}
891

    
892
/* add a new TB and link it to the physical page tables. phys_page2 is
893
   (-1) to indicate that only one page contains the TB. */
894
void tb_link_phys(TranslationBlock *tb, 
895
                  target_ulong phys_pc, target_ulong phys_page2)
896
{
897
    unsigned int h;
898
    TranslationBlock **ptb;
899

    
900
    /* add in the physical hash table */
901
    h = tb_phys_hash_func(phys_pc);
902
    ptb = &tb_phys_hash[h];
903
    tb->phys_hash_next = *ptb;
904
    *ptb = tb;
905

    
906
    /* add in the page list */
907
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
908
    if (phys_page2 != -1)
909
        tb_alloc_page(tb, 1, phys_page2);
910
    else
911
        tb->page_addr[1] = -1;
912

    
913
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
914
    tb->jmp_next[0] = NULL;
915
    tb->jmp_next[1] = NULL;
916
#ifdef USE_CODE_COPY
917
    tb->cflags &= ~CF_FP_USED;
918
    if (tb->cflags & CF_TB_FP_USED)
919
        tb->cflags |= CF_FP_USED;
920
#endif
921

    
922
    /* init original jump addresses */
923
    if (tb->tb_next_offset[0] != 0xffff)
924
        tb_reset_jump(tb, 0);
925
    if (tb->tb_next_offset[1] != 0xffff)
926
        tb_reset_jump(tb, 1);
927

    
928
#ifdef DEBUG_TB_CHECK
929
    tb_page_check();
930
#endif
931
}
932

    
933
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
934
   tb[1].tc_ptr. Return NULL if not found */
935
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
936
{
937
    int m_min, m_max, m;
938
    unsigned long v;
939
    TranslationBlock *tb;
940

    
941
    if (nb_tbs <= 0)
942
        return NULL;
943
    if (tc_ptr < (unsigned long)code_gen_buffer ||
944
        tc_ptr >= (unsigned long)code_gen_ptr)
945
        return NULL;
946
    /* binary search (cf Knuth) */
947
    m_min = 0;
948
    m_max = nb_tbs - 1;
949
    while (m_min <= m_max) {
950
        m = (m_min + m_max) >> 1;
951
        tb = &tbs[m];
952
        v = (unsigned long)tb->tc_ptr;
953
        if (v == tc_ptr)
954
            return tb;
955
        else if (tc_ptr < v) {
956
            m_max = m - 1;
957
        } else {
958
            m_min = m + 1;
959
        }
960
    } 
961
    return &tbs[m_max];
962
}
963

    
964
static void tb_reset_jump_recursive(TranslationBlock *tb);
965

    
966
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
967
{
968
    TranslationBlock *tb1, *tb_next, **ptb;
969
    unsigned int n1;
970

    
971
    tb1 = tb->jmp_next[n];
972
    if (tb1 != NULL) {
973
        /* find head of list */
974
        for(;;) {
975
            n1 = (long)tb1 & 3;
976
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
977
            if (n1 == 2)
978
                break;
979
            tb1 = tb1->jmp_next[n1];
980
        }
981
        /* we are now sure now that tb jumps to tb1 */
982
        tb_next = tb1;
983

    
984
        /* remove tb from the jmp_first list */
985
        ptb = &tb_next->jmp_first;
986
        for(;;) {
987
            tb1 = *ptb;
988
            n1 = (long)tb1 & 3;
989
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
990
            if (n1 == n && tb1 == tb)
991
                break;
992
            ptb = &tb1->jmp_next[n1];
993
        }
994
        *ptb = tb->jmp_next[n];
995
        tb->jmp_next[n] = NULL;
996
        
997
        /* suppress the jump to next tb in generated code */
998
        tb_reset_jump(tb, n);
999

    
1000
        /* suppress jumps in the tb on which we could have jumped */
1001
        tb_reset_jump_recursive(tb_next);
1002
    }
1003
}
1004

    
1005
static void tb_reset_jump_recursive(TranslationBlock *tb)
1006
{
1007
    tb_reset_jump_recursive2(tb, 0);
1008
    tb_reset_jump_recursive2(tb, 1);
1009
}
1010

    
1011
#if defined(TARGET_HAS_ICE)
1012
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1013
{
1014
    target_ulong addr, pd;
1015
    ram_addr_t ram_addr;
1016
    PhysPageDesc *p;
1017

    
1018
    addr = cpu_get_phys_page_debug(env, pc);
1019
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1020
    if (!p) {
1021
        pd = IO_MEM_UNASSIGNED;
1022
    } else {
1023
        pd = p->phys_offset;
1024
    }
1025
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1026
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1027
}
1028
#endif
1029

    
1030
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1031
   breakpoint is reached */
1032
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1033
{
1034
#if defined(TARGET_HAS_ICE)
1035
    int i;
1036
    
1037
    for(i = 0; i < env->nb_breakpoints; i++) {
1038
        if (env->breakpoints[i] == pc)
1039
            return 0;
1040
    }
1041

    
1042
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1043
        return -1;
1044
    env->breakpoints[env->nb_breakpoints++] = pc;
1045
    
1046
    breakpoint_invalidate(env, pc);
1047
    return 0;
1048
#else
1049
    return -1;
1050
#endif
1051
}
1052

    
1053
/* remove a breakpoint */
1054
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1055
{
1056
#if defined(TARGET_HAS_ICE)
1057
    int i;
1058
    for(i = 0; i < env->nb_breakpoints; i++) {
1059
        if (env->breakpoints[i] == pc)
1060
            goto found;
1061
    }
1062
    return -1;
1063
 found:
1064
    env->nb_breakpoints--;
1065
    if (i < env->nb_breakpoints)
1066
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1067

    
1068
    breakpoint_invalidate(env, pc);
1069
    return 0;
1070
#else
1071
    return -1;
1072
#endif
1073
}
1074

    
1075
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1076
   CPU loop after each instruction */
1077
void cpu_single_step(CPUState *env, int enabled)
1078
{
1079
#if defined(TARGET_HAS_ICE)
1080
    if (env->singlestep_enabled != enabled) {
1081
        env->singlestep_enabled = enabled;
1082
        /* must flush all the translated code to avoid inconsistancies */
1083
        /* XXX: only flush what is necessary */
1084
        tb_flush(env);
1085
    }
1086
#endif
1087
}
1088

    
1089
/* enable or disable low levels log */
1090
void cpu_set_log(int log_flags)
1091
{
1092
    loglevel = log_flags;
1093
    if (loglevel && !logfile) {
1094
        logfile = fopen(logfilename, "w");
1095
        if (!logfile) {
1096
            perror(logfilename);
1097
            _exit(1);
1098
        }
1099
#if !defined(CONFIG_SOFTMMU)
1100
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1101
        {
1102
            static uint8_t logfile_buf[4096];
1103
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1104
        }
1105
#else
1106
        setvbuf(logfile, NULL, _IOLBF, 0);
1107
#endif
1108
    }
1109
}
1110

    
1111
void cpu_set_log_filename(const char *filename)
1112
{
1113
    logfilename = strdup(filename);
1114
}
1115

    
1116
/* mask must never be zero, except for A20 change call */
1117
void cpu_interrupt(CPUState *env, int mask)
1118
{
1119
    TranslationBlock *tb;
1120
    static int interrupt_lock;
1121

    
1122
    env->interrupt_request |= mask;
1123
    /* if the cpu is currently executing code, we must unlink it and
1124
       all the potentially executing TB */
1125
    tb = env->current_tb;
1126
    if (tb && !testandset(&interrupt_lock)) {
1127
        env->current_tb = NULL;
1128
        tb_reset_jump_recursive(tb);
1129
        interrupt_lock = 0;
1130
    }
1131
}
1132

    
1133
void cpu_reset_interrupt(CPUState *env, int mask)
1134
{
1135
    env->interrupt_request &= ~mask;
1136
}
1137

    
1138
CPULogItem cpu_log_items[] = {
1139
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1140
      "show generated host assembly code for each compiled TB" },
1141
    { CPU_LOG_TB_IN_ASM, "in_asm",
1142
      "show target assembly code for each compiled TB" },
1143
    { CPU_LOG_TB_OP, "op", 
1144
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1145
#ifdef TARGET_I386
1146
    { CPU_LOG_TB_OP_OPT, "op_opt",
1147
      "show micro ops after optimization for each compiled TB" },
1148
#endif
1149
    { CPU_LOG_INT, "int",
1150
      "show interrupts/exceptions in short format" },
1151
    { CPU_LOG_EXEC, "exec",
1152
      "show trace before each executed TB (lots of logs)" },
1153
    { CPU_LOG_TB_CPU, "cpu",
1154
      "show CPU state before bloc translation" },
1155
#ifdef TARGET_I386
1156
    { CPU_LOG_PCALL, "pcall",
1157
      "show protected mode far calls/returns/exceptions" },
1158
#endif
1159
#ifdef DEBUG_IOPORT
1160
    { CPU_LOG_IOPORT, "ioport",
1161
      "show all i/o ports accesses" },
1162
#endif
1163
    { 0, NULL, NULL },
1164
};
1165

    
1166
static int cmp1(const char *s1, int n, const char *s2)
1167
{
1168
    if (strlen(s2) != n)
1169
        return 0;
1170
    return memcmp(s1, s2, n) == 0;
1171
}
1172
      
1173
/* takes a comma separated list of log masks. Return 0 if error. */
1174
int cpu_str_to_log_mask(const char *str)
1175
{
1176
    CPULogItem *item;
1177
    int mask;
1178
    const char *p, *p1;
1179

    
1180
    p = str;
1181
    mask = 0;
1182
    for(;;) {
1183
        p1 = strchr(p, ',');
1184
        if (!p1)
1185
            p1 = p + strlen(p);
1186
        if(cmp1(p,p1-p,"all")) {
1187
                for(item = cpu_log_items; item->mask != 0; item++) {
1188
                        mask |= item->mask;
1189
                }
1190
        } else {
1191
        for(item = cpu_log_items; item->mask != 0; item++) {
1192
            if (cmp1(p, p1 - p, item->name))
1193
                goto found;
1194
        }
1195
        return 0;
1196
        }
1197
    found:
1198
        mask |= item->mask;
1199
        if (*p1 != ',')
1200
            break;
1201
        p = p1 + 1;
1202
    }
1203
    return mask;
1204
}
1205

    
1206
void cpu_abort(CPUState *env, const char *fmt, ...)
1207
{
1208
    va_list ap;
1209

    
1210
    va_start(ap, fmt);
1211
    fprintf(stderr, "qemu: fatal: ");
1212
    vfprintf(stderr, fmt, ap);
1213
    fprintf(stderr, "\n");
1214
#ifdef TARGET_I386
1215
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1216
#else
1217
    cpu_dump_state(env, stderr, fprintf, 0);
1218
#endif
1219
    va_end(ap);
1220
    abort();
1221
}
1222

    
1223
#if !defined(CONFIG_USER_ONLY)
1224

    
1225
/* NOTE: if flush_global is true, also flush global entries (not
1226
   implemented yet) */
1227
void tlb_flush(CPUState *env, int flush_global)
1228
{
1229
    int i;
1230

    
1231
#if defined(DEBUG_TLB)
1232
    printf("tlb_flush:\n");
1233
#endif
1234
    /* must reset current TB so that interrupts cannot modify the
1235
       links while we are modifying them */
1236
    env->current_tb = NULL;
1237

    
1238
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1239
        env->tlb_table[0][i].addr_read = -1;
1240
        env->tlb_table[0][i].addr_write = -1;
1241
        env->tlb_table[0][i].addr_code = -1;
1242
        env->tlb_table[1][i].addr_read = -1;
1243
        env->tlb_table[1][i].addr_write = -1;
1244
        env->tlb_table[1][i].addr_code = -1;
1245
    }
1246

    
1247
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1248

    
1249
#if !defined(CONFIG_SOFTMMU)
1250
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1251
#endif
1252
#ifdef USE_KQEMU
1253
    if (env->kqemu_enabled) {
1254
        kqemu_flush(env, flush_global);
1255
    }
1256
#endif
1257
    tlb_flush_count++;
1258
}
1259

    
1260
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1261
{
1262
    if (addr == (tlb_entry->addr_read & 
1263
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1264
        addr == (tlb_entry->addr_write & 
1265
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1266
        addr == (tlb_entry->addr_code & 
1267
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1268
        tlb_entry->addr_read = -1;
1269
        tlb_entry->addr_write = -1;
1270
        tlb_entry->addr_code = -1;
1271
    }
1272
}
1273

    
1274
void tlb_flush_page(CPUState *env, target_ulong addr)
1275
{
1276
    int i;
1277
    TranslationBlock *tb;
1278

    
1279
#if defined(DEBUG_TLB)
1280
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1281
#endif
1282
    /* must reset current TB so that interrupts cannot modify the
1283
       links while we are modifying them */
1284
    env->current_tb = NULL;
1285

    
1286
    addr &= TARGET_PAGE_MASK;
1287
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1288
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1289
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1290

    
1291
    for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1292
        tb = env->tb_jmp_cache[i];
1293
        if (tb && 
1294
            ((tb->pc & TARGET_PAGE_MASK) == addr ||
1295
             ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1296
            env->tb_jmp_cache[i] = NULL;
1297
        }
1298
    }
1299

    
1300
#if !defined(CONFIG_SOFTMMU)
1301
    if (addr < MMAP_AREA_END)
1302
        munmap((void *)addr, TARGET_PAGE_SIZE);
1303
#endif
1304
#ifdef USE_KQEMU
1305
    if (env->kqemu_enabled) {
1306
        kqemu_flush_page(env, addr);
1307
    }
1308
#endif
1309
}
1310

    
1311
/* update the TLBs so that writes to code in the virtual page 'addr'
1312
   can be detected */
1313
static void tlb_protect_code(ram_addr_t ram_addr)
1314
{
1315
    cpu_physical_memory_reset_dirty(ram_addr, 
1316
                                    ram_addr + TARGET_PAGE_SIZE,
1317
                                    CODE_DIRTY_FLAG);
1318
}
1319

    
1320
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1321
   tested for self modifying code */
1322
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1323
                                    target_ulong vaddr)
1324
{
1325
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1326
}
1327

    
1328
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1329
                                         unsigned long start, unsigned long length)
1330
{
1331
    unsigned long addr;
1332
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1333
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1334
        if ((addr - start) < length) {
1335
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1336
        }
1337
    }
1338
}
1339

    
1340
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1341
                                     int dirty_flags)
1342
{
1343
    CPUState *env;
1344
    unsigned long length, start1;
1345
    int i, mask, len;
1346
    uint8_t *p;
1347

    
1348
    start &= TARGET_PAGE_MASK;
1349
    end = TARGET_PAGE_ALIGN(end);
1350

    
1351
    length = end - start;
1352
    if (length == 0)
1353
        return;
1354
    len = length >> TARGET_PAGE_BITS;
1355
#ifdef USE_KQEMU
1356
    /* XXX: should not depend on cpu context */
1357
    env = first_cpu;
1358
    if (env->kqemu_enabled) {
1359
        ram_addr_t addr;
1360
        addr = start;
1361
        for(i = 0; i < len; i++) {
1362
            kqemu_set_notdirty(env, addr);
1363
            addr += TARGET_PAGE_SIZE;
1364
        }
1365
    }
1366
#endif
1367
    mask = ~dirty_flags;
1368
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1369
    for(i = 0; i < len; i++)
1370
        p[i] &= mask;
1371

    
1372
    /* we modify the TLB cache so that the dirty bit will be set again
1373
       when accessing the range */
1374
    start1 = start + (unsigned long)phys_ram_base;
1375
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1376
        for(i = 0; i < CPU_TLB_SIZE; i++)
1377
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1378
        for(i = 0; i < CPU_TLB_SIZE; i++)
1379
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1380
    }
1381

    
1382
#if !defined(CONFIG_SOFTMMU)
1383
    /* XXX: this is expensive */
1384
    {
1385
        VirtPageDesc *p;
1386
        int j;
1387
        target_ulong addr;
1388

    
1389
        for(i = 0; i < L1_SIZE; i++) {
1390
            p = l1_virt_map[i];
1391
            if (p) {
1392
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1393
                for(j = 0; j < L2_SIZE; j++) {
1394
                    if (p->valid_tag == virt_valid_tag &&
1395
                        p->phys_addr >= start && p->phys_addr < end &&
1396
                        (p->prot & PROT_WRITE)) {
1397
                        if (addr < MMAP_AREA_END) {
1398
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1399
                                     p->prot & ~PROT_WRITE);
1400
                        }
1401
                    }
1402
                    addr += TARGET_PAGE_SIZE;
1403
                    p++;
1404
                }
1405
            }
1406
        }
1407
    }
1408
#endif
1409
}
1410

    
1411
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1412
{
1413
    ram_addr_t ram_addr;
1414

    
1415
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1416
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1417
            tlb_entry->addend - (unsigned long)phys_ram_base;
1418
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1419
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1420
        }
1421
    }
1422
}
1423

    
1424
/* update the TLB according to the current state of the dirty bits */
1425
void cpu_tlb_update_dirty(CPUState *env)
1426
{
1427
    int i;
1428
    for(i = 0; i < CPU_TLB_SIZE; i++)
1429
        tlb_update_dirty(&env->tlb_table[0][i]);
1430
    for(i = 0; i < CPU_TLB_SIZE; i++)
1431
        tlb_update_dirty(&env->tlb_table[1][i]);
1432
}
1433

    
1434
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1435
                                  unsigned long start)
1436
{
1437
    unsigned long addr;
1438
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1439
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1440
        if (addr == start) {
1441
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1442
        }
1443
    }
1444
}
1445

    
1446
/* update the TLB corresponding to virtual page vaddr and phys addr
1447
   addr so that it is no longer dirty */
1448
static inline void tlb_set_dirty(CPUState *env,
1449
                                 unsigned long addr, target_ulong vaddr)
1450
{
1451
    int i;
1452

    
1453
    addr &= TARGET_PAGE_MASK;
1454
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1455
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1456
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1457
}
1458

    
1459
/* add a new TLB entry. At most one entry for a given virtual address
1460
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1461
   (can only happen in non SOFTMMU mode for I/O pages or pages
1462
   conflicting with the host address space). */
1463
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1464
                      target_phys_addr_t paddr, int prot, 
1465
                      int is_user, int is_softmmu)
1466
{
1467
    PhysPageDesc *p;
1468
    unsigned long pd;
1469
    unsigned int index;
1470
    target_ulong address;
1471
    target_phys_addr_t addend;
1472
    int ret;
1473
    CPUTLBEntry *te;
1474

    
1475
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1476
    if (!p) {
1477
        pd = IO_MEM_UNASSIGNED;
1478
    } else {
1479
        pd = p->phys_offset;
1480
    }
1481
#if defined(DEBUG_TLB)
1482
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1483
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1484
#endif
1485

    
1486
    ret = 0;
1487
#if !defined(CONFIG_SOFTMMU)
1488
    if (is_softmmu) 
1489
#endif
1490
    {
1491
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1492
            /* IO memory case */
1493
            address = vaddr | pd;
1494
            addend = paddr;
1495
        } else {
1496
            /* standard memory */
1497
            address = vaddr;
1498
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1499
        }
1500
        
1501
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1502
        addend -= vaddr;
1503
        te = &env->tlb_table[is_user][index];
1504
        te->addend = addend;
1505
        if (prot & PAGE_READ) {
1506
            te->addr_read = address;
1507
        } else {
1508
            te->addr_read = -1;
1509
        }
1510
        if (prot & PAGE_EXEC) {
1511
            te->addr_code = address;
1512
        } else {
1513
            te->addr_code = -1;
1514
        }
1515
        if (prot & PAGE_WRITE) {
1516
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1517
                (pd & IO_MEM_ROMD)) {
1518
                /* write access calls the I/O callback */
1519
                te->addr_write = vaddr | 
1520
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1521
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1522
                       !cpu_physical_memory_is_dirty(pd)) {
1523
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1524
            } else {
1525
                te->addr_write = address;
1526
            }
1527
        } else {
1528
            te->addr_write = -1;
1529
        }
1530
    }
1531
#if !defined(CONFIG_SOFTMMU)
1532
    else {
1533
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1534
            /* IO access: no mapping is done as it will be handled by the
1535
               soft MMU */
1536
            if (!(env->hflags & HF_SOFTMMU_MASK))
1537
                ret = 2;
1538
        } else {
1539
            void *map_addr;
1540

    
1541
            if (vaddr >= MMAP_AREA_END) {
1542
                ret = 2;
1543
            } else {
1544
                if (prot & PROT_WRITE) {
1545
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1546
#if defined(TARGET_HAS_SMC) || 1
1547
                        first_tb ||
1548
#endif
1549
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1550
                         !cpu_physical_memory_is_dirty(pd))) {
1551
                        /* ROM: we do as if code was inside */
1552
                        /* if code is present, we only map as read only and save the
1553
                           original mapping */
1554
                        VirtPageDesc *vp;
1555
                        
1556
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1557
                        vp->phys_addr = pd;
1558
                        vp->prot = prot;
1559
                        vp->valid_tag = virt_valid_tag;
1560
                        prot &= ~PAGE_WRITE;
1561
                    }
1562
                }
1563
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1564
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1565
                if (map_addr == MAP_FAILED) {
1566
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1567
                              paddr, vaddr);
1568
                }
1569
            }
1570
        }
1571
    }
1572
#endif
1573
    return ret;
1574
}
1575

    
1576
/* called from signal handler: invalidate the code and unprotect the
1577
   page. Return TRUE if the fault was succesfully handled. */
1578
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1579
{
1580
#if !defined(CONFIG_SOFTMMU)
1581
    VirtPageDesc *vp;
1582

    
1583
#if defined(DEBUG_TLB)
1584
    printf("page_unprotect: addr=0x%08x\n", addr);
1585
#endif
1586
    addr &= TARGET_PAGE_MASK;
1587

    
1588
    /* if it is not mapped, no need to worry here */
1589
    if (addr >= MMAP_AREA_END)
1590
        return 0;
1591
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1592
    if (!vp)
1593
        return 0;
1594
    /* NOTE: in this case, validate_tag is _not_ tested as it
1595
       validates only the code TLB */
1596
    if (vp->valid_tag != virt_valid_tag)
1597
        return 0;
1598
    if (!(vp->prot & PAGE_WRITE))
1599
        return 0;
1600
#if defined(DEBUG_TLB)
1601
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1602
           addr, vp->phys_addr, vp->prot);
1603
#endif
1604
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1605
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1606
                  (unsigned long)addr, vp->prot);
1607
    /* set the dirty bit */
1608
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1609
    /* flush the code inside */
1610
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1611
    return 1;
1612
#else
1613
    return 0;
1614
#endif
1615
}
1616

    
1617
#else
1618

    
1619
void tlb_flush(CPUState *env, int flush_global)
1620
{
1621
}
1622

    
1623
void tlb_flush_page(CPUState *env, target_ulong addr)
1624
{
1625
}
1626

    
1627
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1628
                      target_phys_addr_t paddr, int prot, 
1629
                      int is_user, int is_softmmu)
1630
{
1631
    return 0;
1632
}
1633

    
1634
/* dump memory mappings */
1635
void page_dump(FILE *f)
1636
{
1637
    unsigned long start, end;
1638
    int i, j, prot, prot1;
1639
    PageDesc *p;
1640

    
1641
    fprintf(f, "%-8s %-8s %-8s %s\n",
1642
            "start", "end", "size", "prot");
1643
    start = -1;
1644
    end = -1;
1645
    prot = 0;
1646
    for(i = 0; i <= L1_SIZE; i++) {
1647
        if (i < L1_SIZE)
1648
            p = l1_map[i];
1649
        else
1650
            p = NULL;
1651
        for(j = 0;j < L2_SIZE; j++) {
1652
            if (!p)
1653
                prot1 = 0;
1654
            else
1655
                prot1 = p[j].flags;
1656
            if (prot1 != prot) {
1657
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1658
                if (start != -1) {
1659
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1660
                            start, end, end - start, 
1661
                            prot & PAGE_READ ? 'r' : '-',
1662
                            prot & PAGE_WRITE ? 'w' : '-',
1663
                            prot & PAGE_EXEC ? 'x' : '-');
1664
                }
1665
                if (prot1 != 0)
1666
                    start = end;
1667
                else
1668
                    start = -1;
1669
                prot = prot1;
1670
            }
1671
            if (!p)
1672
                break;
1673
        }
1674
    }
1675
}
1676

    
1677
int page_get_flags(target_ulong address)
1678
{
1679
    PageDesc *p;
1680

    
1681
    p = page_find(address >> TARGET_PAGE_BITS);
1682
    if (!p)
1683
        return 0;
1684
    return p->flags;
1685
}
1686

    
1687
/* modify the flags of a page and invalidate the code if
1688
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1689
   depending on PAGE_WRITE */
1690
void page_set_flags(target_ulong start, target_ulong end, int flags)
1691
{
1692
    PageDesc *p;
1693
    target_ulong addr;
1694

    
1695
    start = start & TARGET_PAGE_MASK;
1696
    end = TARGET_PAGE_ALIGN(end);
1697
    if (flags & PAGE_WRITE)
1698
        flags |= PAGE_WRITE_ORG;
1699
    spin_lock(&tb_lock);
1700
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1701
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1702
        /* if the write protection is set, then we invalidate the code
1703
           inside */
1704
        if (!(p->flags & PAGE_WRITE) && 
1705
            (flags & PAGE_WRITE) &&
1706
            p->first_tb) {
1707
            tb_invalidate_phys_page(addr, 0, NULL);
1708
        }
1709
        p->flags = flags;
1710
    }
1711
    spin_unlock(&tb_lock);
1712
}
1713

    
1714
/* called from signal handler: invalidate the code and unprotect the
1715
   page. Return TRUE if the fault was succesfully handled. */
1716
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1717
{
1718
    unsigned int page_index, prot, pindex;
1719
    PageDesc *p, *p1;
1720
    target_ulong host_start, host_end, addr;
1721

    
1722
    host_start = address & qemu_host_page_mask;
1723
    page_index = host_start >> TARGET_PAGE_BITS;
1724
    p1 = page_find(page_index);
1725
    if (!p1)
1726
        return 0;
1727
    host_end = host_start + qemu_host_page_size;
1728
    p = p1;
1729
    prot = 0;
1730
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1731
        prot |= p->flags;
1732
        p++;
1733
    }
1734
    /* if the page was really writable, then we change its
1735
       protection back to writable */
1736
    if (prot & PAGE_WRITE_ORG) {
1737
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1738
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1739
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1740
                     (prot & PAGE_BITS) | PAGE_WRITE);
1741
            p1[pindex].flags |= PAGE_WRITE;
1742
            /* and since the content will be modified, we must invalidate
1743
               the corresponding translated code. */
1744
            tb_invalidate_phys_page(address, pc, puc);
1745
#ifdef DEBUG_TB_CHECK
1746
            tb_invalidate_check(address);
1747
#endif
1748
            return 1;
1749
        }
1750
    }
1751
    return 0;
1752
}
1753

    
1754
/* call this function when system calls directly modify a memory area */
1755
/* ??? This should be redundant now we have lock_user.  */
1756
void page_unprotect_range(target_ulong data, target_ulong data_size)
1757
{
1758
    target_ulong start, end, addr;
1759

    
1760
    start = data;
1761
    end = start + data_size;
1762
    start &= TARGET_PAGE_MASK;
1763
    end = TARGET_PAGE_ALIGN(end);
1764
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1765
        page_unprotect(addr, 0, NULL);
1766
    }
1767
}
1768

    
1769
static inline void tlb_set_dirty(CPUState *env,
1770
                                 unsigned long addr, target_ulong vaddr)
1771
{
1772
}
1773
#endif /* defined(CONFIG_USER_ONLY) */
1774

    
1775
/* register physical memory. 'size' must be a multiple of the target
1776
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1777
   io memory page */
1778
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1779
                                  unsigned long size,
1780
                                  unsigned long phys_offset)
1781
{
1782
    target_phys_addr_t addr, end_addr;
1783
    PhysPageDesc *p;
1784
    CPUState *env;
1785

    
1786
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1787
    end_addr = start_addr + size;
1788
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1789
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1790
        p->phys_offset = phys_offset;
1791
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1792
            (phys_offset & IO_MEM_ROMD))
1793
            phys_offset += TARGET_PAGE_SIZE;
1794
    }
1795
    
1796
    /* since each CPU stores ram addresses in its TLB cache, we must
1797
       reset the modified entries */
1798
    /* XXX: slow ! */
1799
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1800
        tlb_flush(env, 1);
1801
    }
1802
}
1803

    
1804
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1805
{
1806
    return 0;
1807
}
1808

    
1809
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1810
{
1811
}
1812

    
1813
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1814
    unassigned_mem_readb,
1815
    unassigned_mem_readb,
1816
    unassigned_mem_readb,
1817
};
1818

    
1819
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1820
    unassigned_mem_writeb,
1821
    unassigned_mem_writeb,
1822
    unassigned_mem_writeb,
1823
};
1824

    
1825
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1826
{
1827
    unsigned long ram_addr;
1828
    int dirty_flags;
1829
    ram_addr = addr - (unsigned long)phys_ram_base;
1830
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1831
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1832
#if !defined(CONFIG_USER_ONLY)
1833
        tb_invalidate_phys_page_fast(ram_addr, 1);
1834
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1835
#endif
1836
    }
1837
    stb_p((uint8_t *)(long)addr, val);
1838
#ifdef USE_KQEMU
1839
    if (cpu_single_env->kqemu_enabled &&
1840
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1841
        kqemu_modify_page(cpu_single_env, ram_addr);
1842
#endif
1843
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1844
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1845
    /* we remove the notdirty callback only if the code has been
1846
       flushed */
1847
    if (dirty_flags == 0xff)
1848
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1849
}
1850

    
1851
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1852
{
1853
    unsigned long ram_addr;
1854
    int dirty_flags;
1855
    ram_addr = addr - (unsigned long)phys_ram_base;
1856
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1857
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1858
#if !defined(CONFIG_USER_ONLY)
1859
        tb_invalidate_phys_page_fast(ram_addr, 2);
1860
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1861
#endif
1862
    }
1863
    stw_p((uint8_t *)(long)addr, val);
1864
#ifdef USE_KQEMU
1865
    if (cpu_single_env->kqemu_enabled &&
1866
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1867
        kqemu_modify_page(cpu_single_env, ram_addr);
1868
#endif
1869
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1870
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1871
    /* we remove the notdirty callback only if the code has been
1872
       flushed */
1873
    if (dirty_flags == 0xff)
1874
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1875
}
1876

    
1877
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1878
{
1879
    unsigned long ram_addr;
1880
    int dirty_flags;
1881
    ram_addr = addr - (unsigned long)phys_ram_base;
1882
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1883
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1884
#if !defined(CONFIG_USER_ONLY)
1885
        tb_invalidate_phys_page_fast(ram_addr, 4);
1886
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1887
#endif
1888
    }
1889
    stl_p((uint8_t *)(long)addr, val);
1890
#ifdef USE_KQEMU
1891
    if (cpu_single_env->kqemu_enabled &&
1892
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1893
        kqemu_modify_page(cpu_single_env, ram_addr);
1894
#endif
1895
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1896
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1897
    /* we remove the notdirty callback only if the code has been
1898
       flushed */
1899
    if (dirty_flags == 0xff)
1900
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1901
}
1902

    
1903
static CPUReadMemoryFunc *error_mem_read[3] = {
1904
    NULL, /* never used */
1905
    NULL, /* never used */
1906
    NULL, /* never used */
1907
};
1908

    
1909
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1910
    notdirty_mem_writeb,
1911
    notdirty_mem_writew,
1912
    notdirty_mem_writel,
1913
};
1914

    
1915
static void io_mem_init(void)
1916
{
1917
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1918
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1919
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1920
    io_mem_nb = 5;
1921

    
1922
    /* alloc dirty bits array */
1923
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1924
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1925
}
1926

    
1927
/* mem_read and mem_write are arrays of functions containing the
1928
   function to access byte (index 0), word (index 1) and dword (index
1929
   2). All functions must be supplied. If io_index is non zero, the
1930
   corresponding io zone is modified. If it is zero, a new io zone is
1931
   allocated. The return value can be used with
1932
   cpu_register_physical_memory(). (-1) is returned if error. */
1933
int cpu_register_io_memory(int io_index,
1934
                           CPUReadMemoryFunc **mem_read,
1935
                           CPUWriteMemoryFunc **mem_write,
1936
                           void *opaque)
1937
{
1938
    int i;
1939

    
1940
    if (io_index <= 0) {
1941
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1942
            return -1;
1943
        io_index = io_mem_nb++;
1944
    } else {
1945
        if (io_index >= IO_MEM_NB_ENTRIES)
1946
            return -1;
1947
    }
1948

    
1949
    for(i = 0;i < 3; i++) {
1950
        io_mem_read[io_index][i] = mem_read[i];
1951
        io_mem_write[io_index][i] = mem_write[i];
1952
    }
1953
    io_mem_opaque[io_index] = opaque;
1954
    return io_index << IO_MEM_SHIFT;
1955
}
1956

    
1957
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1958
{
1959
    return io_mem_write[io_index >> IO_MEM_SHIFT];
1960
}
1961

    
1962
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1963
{
1964
    return io_mem_read[io_index >> IO_MEM_SHIFT];
1965
}
1966

    
1967
/* physical memory access (slow version, mainly for debug) */
1968
#if defined(CONFIG_USER_ONLY)
1969
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1970
                            int len, int is_write)
1971
{
1972
    int l, flags;
1973
    target_ulong page;
1974
    void * p;
1975

    
1976
    while (len > 0) {
1977
        page = addr & TARGET_PAGE_MASK;
1978
        l = (page + TARGET_PAGE_SIZE) - addr;
1979
        if (l > len)
1980
            l = len;
1981
        flags = page_get_flags(page);
1982
        if (!(flags & PAGE_VALID))
1983
            return;
1984
        if (is_write) {
1985
            if (!(flags & PAGE_WRITE))
1986
                return;
1987
            p = lock_user(addr, len, 0);
1988
            memcpy(p, buf, len);
1989
            unlock_user(p, addr, len);
1990
        } else {
1991
            if (!(flags & PAGE_READ))
1992
                return;
1993
            p = lock_user(addr, len, 1);
1994
            memcpy(buf, p, len);
1995
            unlock_user(p, addr, 0);
1996
        }
1997
        len -= l;
1998
        buf += l;
1999
        addr += l;
2000
    }
2001
}
2002

    
2003
#else
2004
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2005
                            int len, int is_write)
2006
{
2007
    int l, io_index;
2008
    uint8_t *ptr;
2009
    uint32_t val;
2010
    target_phys_addr_t page;
2011
    unsigned long pd;
2012
    PhysPageDesc *p;
2013
    
2014
    while (len > 0) {
2015
        page = addr & TARGET_PAGE_MASK;
2016
        l = (page + TARGET_PAGE_SIZE) - addr;
2017
        if (l > len)
2018
            l = len;
2019
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2020
        if (!p) {
2021
            pd = IO_MEM_UNASSIGNED;
2022
        } else {
2023
            pd = p->phys_offset;
2024
        }
2025
        
2026
        if (is_write) {
2027
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2028
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2029
                /* XXX: could force cpu_single_env to NULL to avoid
2030
                   potential bugs */
2031
                if (l >= 4 && ((addr & 3) == 0)) {
2032
                    /* 32 bit write access */
2033
                    val = ldl_p(buf);
2034
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2035
                    l = 4;
2036
                } else if (l >= 2 && ((addr & 1) == 0)) {
2037
                    /* 16 bit write access */
2038
                    val = lduw_p(buf);
2039
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2040
                    l = 2;
2041
                } else {
2042
                    /* 8 bit write access */
2043
                    val = ldub_p(buf);
2044
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2045
                    l = 1;
2046
                }
2047
            } else {
2048
                unsigned long addr1;
2049
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2050
                /* RAM case */
2051
                ptr = phys_ram_base + addr1;
2052
                memcpy(ptr, buf, l);
2053
                if (!cpu_physical_memory_is_dirty(addr1)) {
2054
                    /* invalidate code */
2055
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2056
                    /* set dirty bit */
2057
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2058
                        (0xff & ~CODE_DIRTY_FLAG);
2059
                }
2060
            }
2061
        } else {
2062
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2063
                !(pd & IO_MEM_ROMD)) {
2064
                /* I/O case */
2065
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2066
                if (l >= 4 && ((addr & 3) == 0)) {
2067
                    /* 32 bit read access */
2068
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2069
                    stl_p(buf, val);
2070
                    l = 4;
2071
                } else if (l >= 2 && ((addr & 1) == 0)) {
2072
                    /* 16 bit read access */
2073
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2074
                    stw_p(buf, val);
2075
                    l = 2;
2076
                } else {
2077
                    /* 8 bit read access */
2078
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2079
                    stb_p(buf, val);
2080
                    l = 1;
2081
                }
2082
            } else {
2083
                /* RAM case */
2084
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2085
                    (addr & ~TARGET_PAGE_MASK);
2086
                memcpy(buf, ptr, l);
2087
            }
2088
        }
2089
        len -= l;
2090
        buf += l;
2091
        addr += l;
2092
    }
2093
}
2094

    
2095
/* used for ROM loading : can write in RAM and ROM */
2096
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2097
                                   const uint8_t *buf, int len)
2098
{
2099
    int l;
2100
    uint8_t *ptr;
2101
    target_phys_addr_t page;
2102
    unsigned long pd;
2103
    PhysPageDesc *p;
2104
    
2105
    while (len > 0) {
2106
        page = addr & TARGET_PAGE_MASK;
2107
        l = (page + TARGET_PAGE_SIZE) - addr;
2108
        if (l > len)
2109
            l = len;
2110
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2111
        if (!p) {
2112
            pd = IO_MEM_UNASSIGNED;
2113
        } else {
2114
            pd = p->phys_offset;
2115
        }
2116
        
2117
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2118
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2119
            !(pd & IO_MEM_ROMD)) {
2120
            /* do nothing */
2121
        } else {
2122
            unsigned long addr1;
2123
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2124
            /* ROM/RAM case */
2125
            ptr = phys_ram_base + addr1;
2126
            memcpy(ptr, buf, l);
2127
        }
2128
        len -= l;
2129
        buf += l;
2130
        addr += l;
2131
    }
2132
}
2133

    
2134

    
2135
/* warning: addr must be aligned */
2136
uint32_t ldl_phys(target_phys_addr_t addr)
2137
{
2138
    int io_index;
2139
    uint8_t *ptr;
2140
    uint32_t val;
2141
    unsigned long pd;
2142
    PhysPageDesc *p;
2143

    
2144
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2145
    if (!p) {
2146
        pd = IO_MEM_UNASSIGNED;
2147
    } else {
2148
        pd = p->phys_offset;
2149
    }
2150
        
2151
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2152
        !(pd & IO_MEM_ROMD)) {
2153
        /* I/O case */
2154
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2155
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2156
    } else {
2157
        /* RAM case */
2158
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2159
            (addr & ~TARGET_PAGE_MASK);
2160
        val = ldl_p(ptr);
2161
    }
2162
    return val;
2163
}
2164

    
2165
/* warning: addr must be aligned */
2166
uint64_t ldq_phys(target_phys_addr_t addr)
2167
{
2168
    int io_index;
2169
    uint8_t *ptr;
2170
    uint64_t val;
2171
    unsigned long pd;
2172
    PhysPageDesc *p;
2173

    
2174
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2175
    if (!p) {
2176
        pd = IO_MEM_UNASSIGNED;
2177
    } else {
2178
        pd = p->phys_offset;
2179
    }
2180
        
2181
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2182
        !(pd & IO_MEM_ROMD)) {
2183
        /* I/O case */
2184
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2185
#ifdef TARGET_WORDS_BIGENDIAN
2186
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2187
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2188
#else
2189
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2190
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2191
#endif
2192
    } else {
2193
        /* RAM case */
2194
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2195
            (addr & ~TARGET_PAGE_MASK);
2196
        val = ldq_p(ptr);
2197
    }
2198
    return val;
2199
}
2200

    
2201
/* XXX: optimize */
2202
uint32_t ldub_phys(target_phys_addr_t addr)
2203
{
2204
    uint8_t val;
2205
    cpu_physical_memory_read(addr, &val, 1);
2206
    return val;
2207
}
2208

    
2209
/* XXX: optimize */
2210
uint32_t lduw_phys(target_phys_addr_t addr)
2211
{
2212
    uint16_t val;
2213
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2214
    return tswap16(val);
2215
}
2216

    
2217
/* warning: addr must be aligned. The ram page is not masked as dirty
2218
   and the code inside is not invalidated. It is useful if the dirty
2219
   bits are used to track modified PTEs */
2220
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2221
{
2222
    int io_index;
2223
    uint8_t *ptr;
2224
    unsigned long pd;
2225
    PhysPageDesc *p;
2226

    
2227
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2228
    if (!p) {
2229
        pd = IO_MEM_UNASSIGNED;
2230
    } else {
2231
        pd = p->phys_offset;
2232
    }
2233
        
2234
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2235
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2236
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2237
    } else {
2238
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2239
            (addr & ~TARGET_PAGE_MASK);
2240
        stl_p(ptr, val);
2241
    }
2242
}
2243

    
2244
/* warning: addr must be aligned */
2245
void stl_phys(target_phys_addr_t addr, uint32_t val)
2246
{
2247
    int io_index;
2248
    uint8_t *ptr;
2249
    unsigned long pd;
2250
    PhysPageDesc *p;
2251

    
2252
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2253
    if (!p) {
2254
        pd = IO_MEM_UNASSIGNED;
2255
    } else {
2256
        pd = p->phys_offset;
2257
    }
2258
        
2259
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2260
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2261
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2262
    } else {
2263
        unsigned long addr1;
2264
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2265
        /* RAM case */
2266
        ptr = phys_ram_base + addr1;
2267
        stl_p(ptr, val);
2268
        if (!cpu_physical_memory_is_dirty(addr1)) {
2269
            /* invalidate code */
2270
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2271
            /* set dirty bit */
2272
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2273
                (0xff & ~CODE_DIRTY_FLAG);
2274
        }
2275
    }
2276
}
2277

    
2278
/* XXX: optimize */
2279
void stb_phys(target_phys_addr_t addr, uint32_t val)
2280
{
2281
    uint8_t v = val;
2282
    cpu_physical_memory_write(addr, &v, 1);
2283
}
2284

    
2285
/* XXX: optimize */
2286
void stw_phys(target_phys_addr_t addr, uint32_t val)
2287
{
2288
    uint16_t v = tswap16(val);
2289
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2290
}
2291

    
2292
/* XXX: optimize */
2293
void stq_phys(target_phys_addr_t addr, uint64_t val)
2294
{
2295
    val = tswap64(val);
2296
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2297
}
2298

    
2299
#endif
2300

    
2301
/* virtual memory access for debug */
2302
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2303
                        uint8_t *buf, int len, int is_write)
2304
{
2305
    int l;
2306
    target_ulong page, phys_addr;
2307

    
2308
    while (len > 0) {
2309
        page = addr & TARGET_PAGE_MASK;
2310
        phys_addr = cpu_get_phys_page_debug(env, page);
2311
        /* if no physical page mapped, return an error */
2312
        if (phys_addr == -1)
2313
            return -1;
2314
        l = (page + TARGET_PAGE_SIZE) - addr;
2315
        if (l > len)
2316
            l = len;
2317
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2318
                               buf, l, is_write);
2319
        len -= l;
2320
        buf += l;
2321
        addr += l;
2322
    }
2323
    return 0;
2324
}
2325

    
2326
void dump_exec_info(FILE *f,
2327
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2328
{
2329
    int i, target_code_size, max_target_code_size;
2330
    int direct_jmp_count, direct_jmp2_count, cross_page;
2331
    TranslationBlock *tb;
2332
    
2333
    target_code_size = 0;
2334
    max_target_code_size = 0;
2335
    cross_page = 0;
2336
    direct_jmp_count = 0;
2337
    direct_jmp2_count = 0;
2338
    for(i = 0; i < nb_tbs; i++) {
2339
        tb = &tbs[i];
2340
        target_code_size += tb->size;
2341
        if (tb->size > max_target_code_size)
2342
            max_target_code_size = tb->size;
2343
        if (tb->page_addr[1] != -1)
2344
            cross_page++;
2345
        if (tb->tb_next_offset[0] != 0xffff) {
2346
            direct_jmp_count++;
2347
            if (tb->tb_next_offset[1] != 0xffff) {
2348
                direct_jmp2_count++;
2349
            }
2350
        }
2351
    }
2352
    /* XXX: avoid using doubles ? */
2353
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2354
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2355
                nb_tbs ? target_code_size / nb_tbs : 0,
2356
                max_target_code_size);
2357
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2358
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2359
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2360
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2361
            cross_page, 
2362
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2363
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2364
                direct_jmp_count, 
2365
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2366
                direct_jmp2_count,
2367
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2368
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2369
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2370
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2371
}
2372

    
2373
#if !defined(CONFIG_USER_ONLY) 
2374

    
2375
#define MMUSUFFIX _cmmu
2376
#define GETPC() NULL
2377
#define env cpu_single_env
2378
#define SOFTMMU_CODE_ACCESS
2379

    
2380
#define SHIFT 0
2381
#include "softmmu_template.h"
2382

    
2383
#define SHIFT 1
2384
#include "softmmu_template.h"
2385

    
2386
#define SHIFT 2
2387
#include "softmmu_template.h"
2388

    
2389
#define SHIFT 3
2390
#include "softmmu_template.h"
2391

    
2392
#undef env
2393

    
2394
#endif