Statistics
| Branch: | Revision:

root / exec.c @ 6f5a9f7e

History | View | Annotate | Download (66.3 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
#if defined(TARGET_SPARC64)
55
#define TARGET_PHYS_ADDR_SPACE_BITS 41
56
#elif defined(TARGET_PPC64)
57
#define TARGET_PHYS_ADDR_SPACE_BITS 42
58
#else
59
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60
#define TARGET_PHYS_ADDR_SPACE_BITS 32
61
#endif
62

    
63
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
65
int nb_tbs;
66
/* any access to the tbs or the page table must use this lock */
67
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
68

    
69
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70
uint8_t *code_gen_ptr;
71

    
72
int phys_ram_size;
73
int phys_ram_fd;
74
uint8_t *phys_ram_base;
75
uint8_t *phys_ram_dirty;
76

    
77
CPUState *first_cpu;
78
/* current CPU in the current thread. It is only valid inside
79
   cpu_exec() */
80
CPUState *cpu_single_env; 
81

    
82
typedef struct PageDesc {
83
    /* list of TBs intersecting this ram page */
84
    TranslationBlock *first_tb;
85
    /* in order to optimize self modifying code, we count the number
86
       of lookups we do to a given page to use a bitmap */
87
    unsigned int code_write_count;
88
    uint8_t *code_bitmap;
89
#if defined(CONFIG_USER_ONLY)
90
    unsigned long flags;
91
#endif
92
} PageDesc;
93

    
94
typedef struct PhysPageDesc {
95
    /* offset in host memory of the page + io_index in the low 12 bits */
96
    uint32_t phys_offset;
97
} PhysPageDesc;
98

    
99
#define L2_BITS 10
100
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101

    
102
#define L1_SIZE (1 << L1_BITS)
103
#define L2_SIZE (1 << L2_BITS)
104

    
105
static void io_mem_init(void);
106

    
107
unsigned long qemu_real_host_page_size;
108
unsigned long qemu_host_page_bits;
109
unsigned long qemu_host_page_size;
110
unsigned long qemu_host_page_mask;
111

    
112
/* XXX: for system emulation, it could just be an array */
113
static PageDesc *l1_map[L1_SIZE];
114
PhysPageDesc **l1_phys_map;
115

    
116
/* io memory support */
117
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
119
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
120
static int io_mem_nb;
121

    
122
/* log support */
123
char *logfilename = "/tmp/qemu.log";
124
FILE *logfile;
125
int loglevel;
126

    
127
/* statistics */
128
static int tlb_flush_count;
129
static int tb_flush_count;
130
static int tb_phys_invalidate_count;
131

    
132
static void page_init(void)
133
{
134
    /* NOTE: we can always suppose that qemu_host_page_size >=
135
       TARGET_PAGE_SIZE */
136
#ifdef _WIN32
137
    {
138
        SYSTEM_INFO system_info;
139
        DWORD old_protect;
140
        
141
        GetSystemInfo(&system_info);
142
        qemu_real_host_page_size = system_info.dwPageSize;
143
        
144
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145
                       PAGE_EXECUTE_READWRITE, &old_protect);
146
    }
147
#else
148
    qemu_real_host_page_size = getpagesize();
149
    {
150
        unsigned long start, end;
151

    
152
        start = (unsigned long)code_gen_buffer;
153
        start &= ~(qemu_real_host_page_size - 1);
154
        
155
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156
        end += qemu_real_host_page_size - 1;
157
        end &= ~(qemu_real_host_page_size - 1);
158
        
159
        mprotect((void *)start, end - start, 
160
                 PROT_READ | PROT_WRITE | PROT_EXEC);
161
    }
162
#endif
163

    
164
    if (qemu_host_page_size == 0)
165
        qemu_host_page_size = qemu_real_host_page_size;
166
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
167
        qemu_host_page_size = TARGET_PAGE_SIZE;
168
    qemu_host_page_bits = 0;
169
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170
        qemu_host_page_bits++;
171
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
172
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
174
}
175

    
176
static inline PageDesc *page_find_alloc(unsigned int index)
177
{
178
    PageDesc **lp, *p;
179

    
180
    lp = &l1_map[index >> L2_BITS];
181
    p = *lp;
182
    if (!p) {
183
        /* allocate if not found */
184
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
185
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
186
        *lp = p;
187
    }
188
    return p + (index & (L2_SIZE - 1));
189
}
190

    
191
static inline PageDesc *page_find(unsigned int index)
192
{
193
    PageDesc *p;
194

    
195
    p = l1_map[index >> L2_BITS];
196
    if (!p)
197
        return 0;
198
    return p + (index & (L2_SIZE - 1));
199
}
200

    
201
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
202
{
203
    void **lp, **p;
204

    
205
    p = (void **)l1_phys_map;
206
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
207

    
208
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
210
#endif
211
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
212
    p = *lp;
213
    if (!p) {
214
        /* allocate if not found */
215
        if (!alloc)
216
            return NULL;
217
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218
        memset(p, 0, sizeof(void *) * L1_SIZE);
219
        *lp = p;
220
    }
221
#endif
222
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
223
    p = *lp;
224
    if (!p) {
225
        /* allocate if not found */
226
        if (!alloc)
227
            return NULL;
228
        p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
229
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
230
        *lp = p;
231
    }
232
    return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
233
}
234

    
235
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
236
{
237
    return phys_page_find_alloc(index, 0);
238
}
239

    
240
#if !defined(CONFIG_USER_ONLY)
241
static void tlb_protect_code(ram_addr_t ram_addr);
242
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
243
                                    target_ulong vaddr);
244
#endif
245

    
246
void cpu_exec_init(CPUState *env)
247
{
248
    CPUState **penv;
249
    int cpu_index;
250

    
251
    if (!code_gen_ptr) {
252
        code_gen_ptr = code_gen_buffer;
253
        page_init();
254
        io_mem_init();
255
    }
256
    env->next_cpu = NULL;
257
    penv = &first_cpu;
258
    cpu_index = 0;
259
    while (*penv != NULL) {
260
        penv = (CPUState **)&(*penv)->next_cpu;
261
        cpu_index++;
262
    }
263
    env->cpu_index = cpu_index;
264
    *penv = env;
265
}
266

    
267
static inline void invalidate_page_bitmap(PageDesc *p)
268
{
269
    if (p->code_bitmap) {
270
        qemu_free(p->code_bitmap);
271
        p->code_bitmap = NULL;
272
    }
273
    p->code_write_count = 0;
274
}
275

    
276
/* set to NULL all the 'first_tb' fields in all PageDescs */
277
static void page_flush_tb(void)
278
{
279
    int i, j;
280
    PageDesc *p;
281

    
282
    for(i = 0; i < L1_SIZE; i++) {
283
        p = l1_map[i];
284
        if (p) {
285
            for(j = 0; j < L2_SIZE; j++) {
286
                p->first_tb = NULL;
287
                invalidate_page_bitmap(p);
288
                p++;
289
            }
290
        }
291
    }
292
}
293

    
294
/* flush all the translation blocks */
295
/* XXX: tb_flush is currently not thread safe */
296
void tb_flush(CPUState *env1)
297
{
298
    CPUState *env;
299
#if defined(DEBUG_FLUSH)
300
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
301
           code_gen_ptr - code_gen_buffer, 
302
           nb_tbs, 
303
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
304
#endif
305
    nb_tbs = 0;
306
    
307
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
308
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309
    }
310

    
311
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
312
    page_flush_tb();
313

    
314
    code_gen_ptr = code_gen_buffer;
315
    /* XXX: flush processor icache at this point if cache flush is
316
       expensive */
317
    tb_flush_count++;
318
}
319

    
320
#ifdef DEBUG_TB_CHECK
321

    
322
static void tb_invalidate_check(unsigned long address)
323
{
324
    TranslationBlock *tb;
325
    int i;
326
    address &= TARGET_PAGE_MASK;
327
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330
                  address >= tb->pc + tb->size)) {
331
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332
                       address, tb->pc, tb->size);
333
            }
334
        }
335
    }
336
}
337

    
338
/* verify that all the pages have correct rights for code */
339
static void tb_page_check(void)
340
{
341
    TranslationBlock *tb;
342
    int i, flags1, flags2;
343
    
344
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346
            flags1 = page_get_flags(tb->pc);
347
            flags2 = page_get_flags(tb->pc + tb->size - 1);
348
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350
                       tb->pc, tb->size, flags1, flags2);
351
            }
352
        }
353
    }
354
}
355

    
356
void tb_jmp_check(TranslationBlock *tb)
357
{
358
    TranslationBlock *tb1;
359
    unsigned int n1;
360

    
361
    /* suppress any remaining jumps to this TB */
362
    tb1 = tb->jmp_first;
363
    for(;;) {
364
        n1 = (long)tb1 & 3;
365
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
366
        if (n1 == 2)
367
            break;
368
        tb1 = tb1->jmp_next[n1];
369
    }
370
    /* check end of list */
371
    if (tb1 != tb) {
372
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
373
    }
374
}
375

    
376
#endif
377

    
378
/* invalidate one TB */
379
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
380
                             int next_offset)
381
{
382
    TranslationBlock *tb1;
383
    for(;;) {
384
        tb1 = *ptb;
385
        if (tb1 == tb) {
386
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
387
            break;
388
        }
389
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
390
    }
391
}
392

    
393
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
394
{
395
    TranslationBlock *tb1;
396
    unsigned int n1;
397

    
398
    for(;;) {
399
        tb1 = *ptb;
400
        n1 = (long)tb1 & 3;
401
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
402
        if (tb1 == tb) {
403
            *ptb = tb1->page_next[n1];
404
            break;
405
        }
406
        ptb = &tb1->page_next[n1];
407
    }
408
}
409

    
410
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
411
{
412
    TranslationBlock *tb1, **ptb;
413
    unsigned int n1;
414

    
415
    ptb = &tb->jmp_next[n];
416
    tb1 = *ptb;
417
    if (tb1) {
418
        /* find tb(n) in circular list */
419
        for(;;) {
420
            tb1 = *ptb;
421
            n1 = (long)tb1 & 3;
422
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
423
            if (n1 == n && tb1 == tb)
424
                break;
425
            if (n1 == 2) {
426
                ptb = &tb1->jmp_first;
427
            } else {
428
                ptb = &tb1->jmp_next[n1];
429
            }
430
        }
431
        /* now we can suppress tb(n) from the list */
432
        *ptb = tb->jmp_next[n];
433

    
434
        tb->jmp_next[n] = NULL;
435
    }
436
}
437

    
438
/* reset the jump entry 'n' of a TB so that it is not chained to
439
   another TB */
440
static inline void tb_reset_jump(TranslationBlock *tb, int n)
441
{
442
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
443
}
444

    
445
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
446
{
447
    CPUState *env;
448
    PageDesc *p;
449
    unsigned int h, n1;
450
    target_ulong phys_pc;
451
    TranslationBlock *tb1, *tb2;
452
    
453
    /* remove the TB from the hash list */
454
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455
    h = tb_phys_hash_func(phys_pc);
456
    tb_remove(&tb_phys_hash[h], tb, 
457
              offsetof(TranslationBlock, phys_hash_next));
458

    
459
    /* remove the TB from the page list */
460
    if (tb->page_addr[0] != page_addr) {
461
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462
        tb_page_remove(&p->first_tb, tb);
463
        invalidate_page_bitmap(p);
464
    }
465
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467
        tb_page_remove(&p->first_tb, tb);
468
        invalidate_page_bitmap(p);
469
    }
470

    
471
    tb_invalidated_flag = 1;
472

    
473
    /* remove the TB from the hash list */
474
    h = tb_jmp_cache_hash_func(tb->pc);
475
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
476
        if (env->tb_jmp_cache[h] == tb)
477
            env->tb_jmp_cache[h] = NULL;
478
    }
479

    
480
    /* suppress this TB from the two jump lists */
481
    tb_jmp_remove(tb, 0);
482
    tb_jmp_remove(tb, 1);
483

    
484
    /* suppress any remaining jumps to this TB */
485
    tb1 = tb->jmp_first;
486
    for(;;) {
487
        n1 = (long)tb1 & 3;
488
        if (n1 == 2)
489
            break;
490
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
491
        tb2 = tb1->jmp_next[n1];
492
        tb_reset_jump(tb1, n1);
493
        tb1->jmp_next[n1] = NULL;
494
        tb1 = tb2;
495
    }
496
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
497

    
498
    tb_phys_invalidate_count++;
499
}
500

    
501
static inline void set_bits(uint8_t *tab, int start, int len)
502
{
503
    int end, mask, end1;
504

    
505
    end = start + len;
506
    tab += start >> 3;
507
    mask = 0xff << (start & 7);
508
    if ((start & ~7) == (end & ~7)) {
509
        if (start < end) {
510
            mask &= ~(0xff << (end & 7));
511
            *tab |= mask;
512
        }
513
    } else {
514
        *tab++ |= mask;
515
        start = (start + 8) & ~7;
516
        end1 = end & ~7;
517
        while (start < end1) {
518
            *tab++ = 0xff;
519
            start += 8;
520
        }
521
        if (start < end) {
522
            mask = ~(0xff << (end & 7));
523
            *tab |= mask;
524
        }
525
    }
526
}
527

    
528
static void build_page_bitmap(PageDesc *p)
529
{
530
    int n, tb_start, tb_end;
531
    TranslationBlock *tb;
532
    
533
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
534
    if (!p->code_bitmap)
535
        return;
536
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
537

    
538
    tb = p->first_tb;
539
    while (tb != NULL) {
540
        n = (long)tb & 3;
541
        tb = (TranslationBlock *)((long)tb & ~3);
542
        /* NOTE: this is subtle as a TB may span two physical pages */
543
        if (n == 0) {
544
            /* NOTE: tb_end may be after the end of the page, but
545
               it is not a problem */
546
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
547
            tb_end = tb_start + tb->size;
548
            if (tb_end > TARGET_PAGE_SIZE)
549
                tb_end = TARGET_PAGE_SIZE;
550
        } else {
551
            tb_start = 0;
552
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
553
        }
554
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555
        tb = tb->page_next[n];
556
    }
557
}
558

    
559
#ifdef TARGET_HAS_PRECISE_SMC
560

    
561
static void tb_gen_code(CPUState *env, 
562
                        target_ulong pc, target_ulong cs_base, int flags,
563
                        int cflags)
564
{
565
    TranslationBlock *tb;
566
    uint8_t *tc_ptr;
567
    target_ulong phys_pc, phys_page2, virt_page2;
568
    int code_gen_size;
569

    
570
    phys_pc = get_phys_addr_code(env, pc);
571
    tb = tb_alloc(pc);
572
    if (!tb) {
573
        /* flush must be done */
574
        tb_flush(env);
575
        /* cannot fail at this point */
576
        tb = tb_alloc(pc);
577
    }
578
    tc_ptr = code_gen_ptr;
579
    tb->tc_ptr = tc_ptr;
580
    tb->cs_base = cs_base;
581
    tb->flags = flags;
582
    tb->cflags = cflags;
583
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
585
    
586
    /* check next page if needed */
587
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
588
    phys_page2 = -1;
589
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
590
        phys_page2 = get_phys_addr_code(env, virt_page2);
591
    }
592
    tb_link_phys(tb, phys_pc, phys_page2);
593
}
594
#endif
595
    
596
/* invalidate all TBs which intersect with the target physical page
597
   starting in range [start;end[. NOTE: start and end must refer to
598
   the same physical page. 'is_cpu_write_access' should be true if called
599
   from a real cpu write access: the virtual CPU will exit the current
600
   TB if code is modified inside this TB. */
601
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
602
                                   int is_cpu_write_access)
603
{
604
    int n, current_tb_modified, current_tb_not_found, current_flags;
605
    CPUState *env = cpu_single_env;
606
    PageDesc *p;
607
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
608
    target_ulong tb_start, tb_end;
609
    target_ulong current_pc, current_cs_base;
610

    
611
    p = page_find(start >> TARGET_PAGE_BITS);
612
    if (!p) 
613
        return;
614
    if (!p->code_bitmap && 
615
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616
        is_cpu_write_access) {
617
        /* build code bitmap */
618
        build_page_bitmap(p);
619
    }
620

    
621
    /* we remove all the TBs in the range [start, end[ */
622
    /* XXX: see if in some cases it could be faster to invalidate all the code */
623
    current_tb_not_found = is_cpu_write_access;
624
    current_tb_modified = 0;
625
    current_tb = NULL; /* avoid warning */
626
    current_pc = 0; /* avoid warning */
627
    current_cs_base = 0; /* avoid warning */
628
    current_flags = 0; /* avoid warning */
629
    tb = p->first_tb;
630
    while (tb != NULL) {
631
        n = (long)tb & 3;
632
        tb = (TranslationBlock *)((long)tb & ~3);
633
        tb_next = tb->page_next[n];
634
        /* NOTE: this is subtle as a TB may span two physical pages */
635
        if (n == 0) {
636
            /* NOTE: tb_end may be after the end of the page, but
637
               it is not a problem */
638
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639
            tb_end = tb_start + tb->size;
640
        } else {
641
            tb_start = tb->page_addr[1];
642
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643
        }
644
        if (!(tb_end <= start || tb_start >= end)) {
645
#ifdef TARGET_HAS_PRECISE_SMC
646
            if (current_tb_not_found) {
647
                current_tb_not_found = 0;
648
                current_tb = NULL;
649
                if (env->mem_write_pc) {
650
                    /* now we have a real cpu fault */
651
                    current_tb = tb_find_pc(env->mem_write_pc);
652
                }
653
            }
654
            if (current_tb == tb &&
655
                !(current_tb->cflags & CF_SINGLE_INSN)) {
656
                /* If we are modifying the current TB, we must stop
657
                its execution. We could be more precise by checking
658
                that the modification is after the current PC, but it
659
                would require a specialized function to partially
660
                restore the CPU state */
661
                
662
                current_tb_modified = 1;
663
                cpu_restore_state(current_tb, env, 
664
                                  env->mem_write_pc, NULL);
665
#if defined(TARGET_I386)
666
                current_flags = env->hflags;
667
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668
                current_cs_base = (target_ulong)env->segs[R_CS].base;
669
                current_pc = current_cs_base + env->eip;
670
#else
671
#error unsupported CPU
672
#endif
673
            }
674
#endif /* TARGET_HAS_PRECISE_SMC */
675
            /* we need to do that to handle the case where a signal
676
               occurs while doing tb_phys_invalidate() */
677
            saved_tb = NULL;
678
            if (env) {
679
                saved_tb = env->current_tb;
680
                env->current_tb = NULL;
681
            }
682
            tb_phys_invalidate(tb, -1);
683
            if (env) {
684
                env->current_tb = saved_tb;
685
                if (env->interrupt_request && env->current_tb)
686
                    cpu_interrupt(env, env->interrupt_request);
687
            }
688
        }
689
        tb = tb_next;
690
    }
691
#if !defined(CONFIG_USER_ONLY)
692
    /* if no code remaining, no need to continue to use slow writes */
693
    if (!p->first_tb) {
694
        invalidate_page_bitmap(p);
695
        if (is_cpu_write_access) {
696
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
697
        }
698
    }
699
#endif
700
#ifdef TARGET_HAS_PRECISE_SMC
701
    if (current_tb_modified) {
702
        /* we generate a block containing just the instruction
703
           modifying the memory. It will ensure that it cannot modify
704
           itself */
705
        env->current_tb = NULL;
706
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
707
                    CF_SINGLE_INSN);
708
        cpu_resume_from_signal(env, NULL);
709
    }
710
#endif
711
}
712

    
713
/* len must be <= 8 and start must be a multiple of len */
714
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
715
{
716
    PageDesc *p;
717
    int offset, b;
718
#if 0
719
    if (1) {
720
        if (loglevel) {
721
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
722
                   cpu_single_env->mem_write_vaddr, len, 
723
                   cpu_single_env->eip, 
724
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
725
        }
726
    }
727
#endif
728
    p = page_find(start >> TARGET_PAGE_BITS);
729
    if (!p) 
730
        return;
731
    if (p->code_bitmap) {
732
        offset = start & ~TARGET_PAGE_MASK;
733
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
734
        if (b & ((1 << len) - 1))
735
            goto do_invalidate;
736
    } else {
737
    do_invalidate:
738
        tb_invalidate_phys_page_range(start, start + len, 1);
739
    }
740
}
741

    
742
#if !defined(CONFIG_SOFTMMU)
743
static void tb_invalidate_phys_page(target_ulong addr, 
744
                                    unsigned long pc, void *puc)
745
{
746
    int n, current_flags, current_tb_modified;
747
    target_ulong current_pc, current_cs_base;
748
    PageDesc *p;
749
    TranslationBlock *tb, *current_tb;
750
#ifdef TARGET_HAS_PRECISE_SMC
751
    CPUState *env = cpu_single_env;
752
#endif
753

    
754
    addr &= TARGET_PAGE_MASK;
755
    p = page_find(addr >> TARGET_PAGE_BITS);
756
    if (!p) 
757
        return;
758
    tb = p->first_tb;
759
    current_tb_modified = 0;
760
    current_tb = NULL;
761
    current_pc = 0; /* avoid warning */
762
    current_cs_base = 0; /* avoid warning */
763
    current_flags = 0; /* avoid warning */
764
#ifdef TARGET_HAS_PRECISE_SMC
765
    if (tb && pc != 0) {
766
        current_tb = tb_find_pc(pc);
767
    }
768
#endif
769
    while (tb != NULL) {
770
        n = (long)tb & 3;
771
        tb = (TranslationBlock *)((long)tb & ~3);
772
#ifdef TARGET_HAS_PRECISE_SMC
773
        if (current_tb == tb &&
774
            !(current_tb->cflags & CF_SINGLE_INSN)) {
775
                /* If we are modifying the current TB, we must stop
776
                   its execution. We could be more precise by checking
777
                   that the modification is after the current PC, but it
778
                   would require a specialized function to partially
779
                   restore the CPU state */
780
            
781
            current_tb_modified = 1;
782
            cpu_restore_state(current_tb, env, pc, puc);
783
#if defined(TARGET_I386)
784
            current_flags = env->hflags;
785
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786
            current_cs_base = (target_ulong)env->segs[R_CS].base;
787
            current_pc = current_cs_base + env->eip;
788
#else
789
#error unsupported CPU
790
#endif
791
        }
792
#endif /* TARGET_HAS_PRECISE_SMC */
793
        tb_phys_invalidate(tb, addr);
794
        tb = tb->page_next[n];
795
    }
796
    p->first_tb = NULL;
797
#ifdef TARGET_HAS_PRECISE_SMC
798
    if (current_tb_modified) {
799
        /* we generate a block containing just the instruction
800
           modifying the memory. It will ensure that it cannot modify
801
           itself */
802
        env->current_tb = NULL;
803
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
804
                    CF_SINGLE_INSN);
805
        cpu_resume_from_signal(env, puc);
806
    }
807
#endif
808
}
809
#endif
810

    
811
/* add the tb in the target page and protect it if necessary */
812
static inline void tb_alloc_page(TranslationBlock *tb, 
813
                                 unsigned int n, unsigned int page_addr)
814
{
815
    PageDesc *p;
816
    TranslationBlock *last_first_tb;
817

    
818
    tb->page_addr[n] = page_addr;
819
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
820
    tb->page_next[n] = p->first_tb;
821
    last_first_tb = p->first_tb;
822
    p->first_tb = (TranslationBlock *)((long)tb | n);
823
    invalidate_page_bitmap(p);
824

    
825
#if defined(TARGET_HAS_SMC) || 1
826

    
827
#if defined(CONFIG_USER_ONLY)
828
    if (p->flags & PAGE_WRITE) {
829
        unsigned long host_start, host_end, addr;
830
        int prot;
831

    
832
        /* force the host page as non writable (writes will have a
833
           page fault + mprotect overhead) */
834
        host_start = page_addr & qemu_host_page_mask;
835
        host_end = host_start + qemu_host_page_size;
836
        prot = 0;
837
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838
            prot |= page_get_flags(addr);
839
        mprotect((void *)host_start, qemu_host_page_size, 
840
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
841
#ifdef DEBUG_TB_INVALIDATE
842
        printf("protecting code page: 0x%08lx\n", 
843
               host_start);
844
#endif
845
        p->flags &= ~PAGE_WRITE;
846
    }
847
#else
848
    /* if some code is already present, then the pages are already
849
       protected. So we handle the case where only the first TB is
850
       allocated in a physical page */
851
    if (!last_first_tb) {
852
        tlb_protect_code(page_addr);
853
    }
854
#endif
855

    
856
#endif /* TARGET_HAS_SMC */
857
}
858

    
859
/* Allocate a new translation block. Flush the translation buffer if
860
   too many translation blocks or too much generated code. */
861
TranslationBlock *tb_alloc(target_ulong pc)
862
{
863
    TranslationBlock *tb;
864

    
865
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
866
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
867
        return NULL;
868
    tb = &tbs[nb_tbs++];
869
    tb->pc = pc;
870
    tb->cflags = 0;
871
    return tb;
872
}
873

    
874
/* add a new TB and link it to the physical page tables. phys_page2 is
875
   (-1) to indicate that only one page contains the TB. */
876
void tb_link_phys(TranslationBlock *tb, 
877
                  target_ulong phys_pc, target_ulong phys_page2)
878
{
879
    unsigned int h;
880
    TranslationBlock **ptb;
881

    
882
    /* add in the physical hash table */
883
    h = tb_phys_hash_func(phys_pc);
884
    ptb = &tb_phys_hash[h];
885
    tb->phys_hash_next = *ptb;
886
    *ptb = tb;
887

    
888
    /* add in the page list */
889
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
890
    if (phys_page2 != -1)
891
        tb_alloc_page(tb, 1, phys_page2);
892
    else
893
        tb->page_addr[1] = -1;
894

    
895
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
896
    tb->jmp_next[0] = NULL;
897
    tb->jmp_next[1] = NULL;
898
#ifdef USE_CODE_COPY
899
    tb->cflags &= ~CF_FP_USED;
900
    if (tb->cflags & CF_TB_FP_USED)
901
        tb->cflags |= CF_FP_USED;
902
#endif
903

    
904
    /* init original jump addresses */
905
    if (tb->tb_next_offset[0] != 0xffff)
906
        tb_reset_jump(tb, 0);
907
    if (tb->tb_next_offset[1] != 0xffff)
908
        tb_reset_jump(tb, 1);
909

    
910
#ifdef DEBUG_TB_CHECK
911
    tb_page_check();
912
#endif
913
}
914

    
915
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
916
   tb[1].tc_ptr. Return NULL if not found */
917
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
918
{
919
    int m_min, m_max, m;
920
    unsigned long v;
921
    TranslationBlock *tb;
922

    
923
    if (nb_tbs <= 0)
924
        return NULL;
925
    if (tc_ptr < (unsigned long)code_gen_buffer ||
926
        tc_ptr >= (unsigned long)code_gen_ptr)
927
        return NULL;
928
    /* binary search (cf Knuth) */
929
    m_min = 0;
930
    m_max = nb_tbs - 1;
931
    while (m_min <= m_max) {
932
        m = (m_min + m_max) >> 1;
933
        tb = &tbs[m];
934
        v = (unsigned long)tb->tc_ptr;
935
        if (v == tc_ptr)
936
            return tb;
937
        else if (tc_ptr < v) {
938
            m_max = m - 1;
939
        } else {
940
            m_min = m + 1;
941
        }
942
    } 
943
    return &tbs[m_max];
944
}
945

    
946
static void tb_reset_jump_recursive(TranslationBlock *tb);
947

    
948
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
949
{
950
    TranslationBlock *tb1, *tb_next, **ptb;
951
    unsigned int n1;
952

    
953
    tb1 = tb->jmp_next[n];
954
    if (tb1 != NULL) {
955
        /* find head of list */
956
        for(;;) {
957
            n1 = (long)tb1 & 3;
958
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
959
            if (n1 == 2)
960
                break;
961
            tb1 = tb1->jmp_next[n1];
962
        }
963
        /* we are now sure now that tb jumps to tb1 */
964
        tb_next = tb1;
965

    
966
        /* remove tb from the jmp_first list */
967
        ptb = &tb_next->jmp_first;
968
        for(;;) {
969
            tb1 = *ptb;
970
            n1 = (long)tb1 & 3;
971
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
972
            if (n1 == n && tb1 == tb)
973
                break;
974
            ptb = &tb1->jmp_next[n1];
975
        }
976
        *ptb = tb->jmp_next[n];
977
        tb->jmp_next[n] = NULL;
978
        
979
        /* suppress the jump to next tb in generated code */
980
        tb_reset_jump(tb, n);
981

    
982
        /* suppress jumps in the tb on which we could have jumped */
983
        tb_reset_jump_recursive(tb_next);
984
    }
985
}
986

    
987
static void tb_reset_jump_recursive(TranslationBlock *tb)
988
{
989
    tb_reset_jump_recursive2(tb, 0);
990
    tb_reset_jump_recursive2(tb, 1);
991
}
992

    
993
#if defined(TARGET_HAS_ICE)
994
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
995
{
996
    target_ulong phys_addr;
997

    
998
    phys_addr = cpu_get_phys_page_debug(env, pc);
999
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1000
}
1001
#endif
1002

    
1003
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1004
   breakpoint is reached */
1005
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1006
{
1007
#if defined(TARGET_HAS_ICE)
1008
    int i;
1009
    
1010
    for(i = 0; i < env->nb_breakpoints; i++) {
1011
        if (env->breakpoints[i] == pc)
1012
            return 0;
1013
    }
1014

    
1015
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1016
        return -1;
1017
    env->breakpoints[env->nb_breakpoints++] = pc;
1018
    
1019
    breakpoint_invalidate(env, pc);
1020
    return 0;
1021
#else
1022
    return -1;
1023
#endif
1024
}
1025

    
1026
/* remove a breakpoint */
1027
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1028
{
1029
#if defined(TARGET_HAS_ICE)
1030
    int i;
1031
    for(i = 0; i < env->nb_breakpoints; i++) {
1032
        if (env->breakpoints[i] == pc)
1033
            goto found;
1034
    }
1035
    return -1;
1036
 found:
1037
    env->nb_breakpoints--;
1038
    if (i < env->nb_breakpoints)
1039
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1040

    
1041
    breakpoint_invalidate(env, pc);
1042
    return 0;
1043
#else
1044
    return -1;
1045
#endif
1046
}
1047

    
1048
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1049
   CPU loop after each instruction */
1050
void cpu_single_step(CPUState *env, int enabled)
1051
{
1052
#if defined(TARGET_HAS_ICE)
1053
    if (env->singlestep_enabled != enabled) {
1054
        env->singlestep_enabled = enabled;
1055
        /* must flush all the translated code to avoid inconsistancies */
1056
        /* XXX: only flush what is necessary */
1057
        tb_flush(env);
1058
    }
1059
#endif
1060
}
1061

    
1062
/* enable or disable low levels log */
1063
void cpu_set_log(int log_flags)
1064
{
1065
    loglevel = log_flags;
1066
    if (loglevel && !logfile) {
1067
        logfile = fopen(logfilename, "w");
1068
        if (!logfile) {
1069
            perror(logfilename);
1070
            _exit(1);
1071
        }
1072
#if !defined(CONFIG_SOFTMMU)
1073
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1074
        {
1075
            static uint8_t logfile_buf[4096];
1076
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1077
        }
1078
#else
1079
        setvbuf(logfile, NULL, _IOLBF, 0);
1080
#endif
1081
    }
1082
}
1083

    
1084
void cpu_set_log_filename(const char *filename)
1085
{
1086
    logfilename = strdup(filename);
1087
}
1088

    
1089
/* mask must never be zero, except for A20 change call */
1090
void cpu_interrupt(CPUState *env, int mask)
1091
{
1092
    TranslationBlock *tb;
1093
    static int interrupt_lock;
1094

    
1095
    env->interrupt_request |= mask;
1096
    /* if the cpu is currently executing code, we must unlink it and
1097
       all the potentially executing TB */
1098
    tb = env->current_tb;
1099
    if (tb && !testandset(&interrupt_lock)) {
1100
        env->current_tb = NULL;
1101
        tb_reset_jump_recursive(tb);
1102
        interrupt_lock = 0;
1103
    }
1104
}
1105

    
1106
void cpu_reset_interrupt(CPUState *env, int mask)
1107
{
1108
    env->interrupt_request &= ~mask;
1109
}
1110

    
1111
CPULogItem cpu_log_items[] = {
1112
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1113
      "show generated host assembly code for each compiled TB" },
1114
    { CPU_LOG_TB_IN_ASM, "in_asm",
1115
      "show target assembly code for each compiled TB" },
1116
    { CPU_LOG_TB_OP, "op", 
1117
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1118
#ifdef TARGET_I386
1119
    { CPU_LOG_TB_OP_OPT, "op_opt",
1120
      "show micro ops after optimization for each compiled TB" },
1121
#endif
1122
    { CPU_LOG_INT, "int",
1123
      "show interrupts/exceptions in short format" },
1124
    { CPU_LOG_EXEC, "exec",
1125
      "show trace before each executed TB (lots of logs)" },
1126
    { CPU_LOG_TB_CPU, "cpu",
1127
      "show CPU state before bloc translation" },
1128
#ifdef TARGET_I386
1129
    { CPU_LOG_PCALL, "pcall",
1130
      "show protected mode far calls/returns/exceptions" },
1131
#endif
1132
#ifdef DEBUG_IOPORT
1133
    { CPU_LOG_IOPORT, "ioport",
1134
      "show all i/o ports accesses" },
1135
#endif
1136
    { 0, NULL, NULL },
1137
};
1138

    
1139
static int cmp1(const char *s1, int n, const char *s2)
1140
{
1141
    if (strlen(s2) != n)
1142
        return 0;
1143
    return memcmp(s1, s2, n) == 0;
1144
}
1145
      
1146
/* takes a comma separated list of log masks. Return 0 if error. */
1147
int cpu_str_to_log_mask(const char *str)
1148
{
1149
    CPULogItem *item;
1150
    int mask;
1151
    const char *p, *p1;
1152

    
1153
    p = str;
1154
    mask = 0;
1155
    for(;;) {
1156
        p1 = strchr(p, ',');
1157
        if (!p1)
1158
            p1 = p + strlen(p);
1159
        if(cmp1(p,p1-p,"all")) {
1160
                for(item = cpu_log_items; item->mask != 0; item++) {
1161
                        mask |= item->mask;
1162
                }
1163
        } else {
1164
        for(item = cpu_log_items; item->mask != 0; item++) {
1165
            if (cmp1(p, p1 - p, item->name))
1166
                goto found;
1167
        }
1168
        return 0;
1169
        }
1170
    found:
1171
        mask |= item->mask;
1172
        if (*p1 != ',')
1173
            break;
1174
        p = p1 + 1;
1175
    }
1176
    return mask;
1177
}
1178

    
1179
void cpu_abort(CPUState *env, const char *fmt, ...)
1180
{
1181
    va_list ap;
1182

    
1183
    va_start(ap, fmt);
1184
    fprintf(stderr, "qemu: fatal: ");
1185
    vfprintf(stderr, fmt, ap);
1186
    fprintf(stderr, "\n");
1187
#ifdef TARGET_I386
1188
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1189
#else
1190
    cpu_dump_state(env, stderr, fprintf, 0);
1191
#endif
1192
    va_end(ap);
1193
    abort();
1194
}
1195

    
1196
#if !defined(CONFIG_USER_ONLY)
1197

    
1198
/* NOTE: if flush_global is true, also flush global entries (not
1199
   implemented yet) */
1200
void tlb_flush(CPUState *env, int flush_global)
1201
{
1202
    int i;
1203

    
1204
#if defined(DEBUG_TLB)
1205
    printf("tlb_flush:\n");
1206
#endif
1207
    /* must reset current TB so that interrupts cannot modify the
1208
       links while we are modifying them */
1209
    env->current_tb = NULL;
1210

    
1211
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1212
        env->tlb_read[0][i].address = -1;
1213
        env->tlb_write[0][i].address = -1;
1214
        env->tlb_read[1][i].address = -1;
1215
        env->tlb_write[1][i].address = -1;
1216
    }
1217

    
1218
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1219

    
1220
#if !defined(CONFIG_SOFTMMU)
1221
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1222
#endif
1223
#ifdef USE_KQEMU
1224
    if (env->kqemu_enabled) {
1225
        kqemu_flush(env, flush_global);
1226
    }
1227
#endif
1228
    tlb_flush_count++;
1229
}
1230

    
1231
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1232
{
1233
    if (addr == (tlb_entry->address & 
1234
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1235
        tlb_entry->address = -1;
1236
}
1237

    
1238
void tlb_flush_page(CPUState *env, target_ulong addr)
1239
{
1240
    int i;
1241
    TranslationBlock *tb;
1242

    
1243
#if defined(DEBUG_TLB)
1244
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1245
#endif
1246
    /* must reset current TB so that interrupts cannot modify the
1247
       links while we are modifying them */
1248
    env->current_tb = NULL;
1249

    
1250
    addr &= TARGET_PAGE_MASK;
1251
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1252
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1253
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1254
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1255
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1256

    
1257
    for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1258
        tb = env->tb_jmp_cache[i];
1259
        if (tb && 
1260
            ((tb->pc & TARGET_PAGE_MASK) == addr ||
1261
             ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1262
            env->tb_jmp_cache[i] = NULL;
1263
        }
1264
    }
1265

    
1266
#if !defined(CONFIG_SOFTMMU)
1267
    if (addr < MMAP_AREA_END)
1268
        munmap((void *)addr, TARGET_PAGE_SIZE);
1269
#endif
1270
#ifdef USE_KQEMU
1271
    if (env->kqemu_enabled) {
1272
        kqemu_flush_page(env, addr);
1273
    }
1274
#endif
1275
}
1276

    
1277
/* update the TLBs so that writes to code in the virtual page 'addr'
1278
   can be detected */
1279
static void tlb_protect_code(ram_addr_t ram_addr)
1280
{
1281
    cpu_physical_memory_reset_dirty(ram_addr, 
1282
                                    ram_addr + TARGET_PAGE_SIZE,
1283
                                    CODE_DIRTY_FLAG);
1284
}
1285

    
1286
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1287
   tested for self modifying code */
1288
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1289
                                    target_ulong vaddr)
1290
{
1291
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1292
}
1293

    
1294
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1295
                                         unsigned long start, unsigned long length)
1296
{
1297
    unsigned long addr;
1298
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1299
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1300
        if ((addr - start) < length) {
1301
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1302
        }
1303
    }
1304
}
1305

    
1306
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1307
                                     int dirty_flags)
1308
{
1309
    CPUState *env;
1310
    unsigned long length, start1;
1311
    int i, mask, len;
1312
    uint8_t *p;
1313

    
1314
    start &= TARGET_PAGE_MASK;
1315
    end = TARGET_PAGE_ALIGN(end);
1316

    
1317
    length = end - start;
1318
    if (length == 0)
1319
        return;
1320
    len = length >> TARGET_PAGE_BITS;
1321
#ifdef USE_KQEMU
1322
    /* XXX: should not depend on cpu context */
1323
    env = first_cpu;
1324
    if (env->kqemu_enabled) {
1325
        ram_addr_t addr;
1326
        addr = start;
1327
        for(i = 0; i < len; i++) {
1328
            kqemu_set_notdirty(env, addr);
1329
            addr += TARGET_PAGE_SIZE;
1330
        }
1331
    }
1332
#endif
1333
    mask = ~dirty_flags;
1334
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1335
    for(i = 0; i < len; i++)
1336
        p[i] &= mask;
1337

    
1338
    /* we modify the TLB cache so that the dirty bit will be set again
1339
       when accessing the range */
1340
    start1 = start + (unsigned long)phys_ram_base;
1341
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1342
        for(i = 0; i < CPU_TLB_SIZE; i++)
1343
            tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1344
        for(i = 0; i < CPU_TLB_SIZE; i++)
1345
            tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1346
    }
1347

    
1348
#if !defined(CONFIG_SOFTMMU)
1349
    /* XXX: this is expensive */
1350
    {
1351
        VirtPageDesc *p;
1352
        int j;
1353
        target_ulong addr;
1354

    
1355
        for(i = 0; i < L1_SIZE; i++) {
1356
            p = l1_virt_map[i];
1357
            if (p) {
1358
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1359
                for(j = 0; j < L2_SIZE; j++) {
1360
                    if (p->valid_tag == virt_valid_tag &&
1361
                        p->phys_addr >= start && p->phys_addr < end &&
1362
                        (p->prot & PROT_WRITE)) {
1363
                        if (addr < MMAP_AREA_END) {
1364
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1365
                                     p->prot & ~PROT_WRITE);
1366
                        }
1367
                    }
1368
                    addr += TARGET_PAGE_SIZE;
1369
                    p++;
1370
                }
1371
            }
1372
        }
1373
    }
1374
#endif
1375
}
1376

    
1377
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1378
{
1379
    ram_addr_t ram_addr;
1380

    
1381
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1382
        ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) + 
1383
            tlb_entry->addend - (unsigned long)phys_ram_base;
1384
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1385
            tlb_entry->address |= IO_MEM_NOTDIRTY;
1386
        }
1387
    }
1388
}
1389

    
1390
/* update the TLB according to the current state of the dirty bits */
1391
void cpu_tlb_update_dirty(CPUState *env)
1392
{
1393
    int i;
1394
    for(i = 0; i < CPU_TLB_SIZE; i++)
1395
        tlb_update_dirty(&env->tlb_write[0][i]);
1396
    for(i = 0; i < CPU_TLB_SIZE; i++)
1397
        tlb_update_dirty(&env->tlb_write[1][i]);
1398
}
1399

    
1400
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1401
                                  unsigned long start)
1402
{
1403
    unsigned long addr;
1404
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1405
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1406
        if (addr == start) {
1407
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1408
        }
1409
    }
1410
}
1411

    
1412
/* update the TLB corresponding to virtual page vaddr and phys addr
1413
   addr so that it is no longer dirty */
1414
static inline void tlb_set_dirty(CPUState *env,
1415
                                 unsigned long addr, target_ulong vaddr)
1416
{
1417
    int i;
1418

    
1419
    addr &= TARGET_PAGE_MASK;
1420
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1421
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1422
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1423
}
1424

    
1425
/* add a new TLB entry. At most one entry for a given virtual address
1426
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1427
   (can only happen in non SOFTMMU mode for I/O pages or pages
1428
   conflicting with the host address space). */
1429
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1430
                 target_phys_addr_t paddr, int prot, 
1431
                 int is_user, int is_softmmu)
1432
{
1433
    PhysPageDesc *p;
1434
    unsigned long pd;
1435
    unsigned int index;
1436
    target_ulong address;
1437
    target_phys_addr_t addend;
1438
    int ret;
1439

    
1440
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1441
    if (!p) {
1442
        pd = IO_MEM_UNASSIGNED;
1443
    } else {
1444
        pd = p->phys_offset;
1445
    }
1446
#if defined(DEBUG_TLB)
1447
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1448
           vaddr, paddr, prot, is_user, is_softmmu, pd);
1449
#endif
1450

    
1451
    ret = 0;
1452
#if !defined(CONFIG_SOFTMMU)
1453
    if (is_softmmu) 
1454
#endif
1455
    {
1456
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1457
            /* IO memory case */
1458
            address = vaddr | pd;
1459
            addend = paddr;
1460
        } else {
1461
            /* standard memory */
1462
            address = vaddr;
1463
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1464
        }
1465
        
1466
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1467
        addend -= vaddr;
1468
        if (prot & PAGE_READ) {
1469
            env->tlb_read[is_user][index].address = address;
1470
            env->tlb_read[is_user][index].addend = addend;
1471
        } else {
1472
            env->tlb_read[is_user][index].address = -1;
1473
            env->tlb_read[is_user][index].addend = -1;
1474
        }
1475
        if (prot & PAGE_WRITE) {
1476
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1477
                /* ROM: access is ignored (same as unassigned) */
1478
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1479
                env->tlb_write[is_user][index].addend = addend;
1480
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1481
                       !cpu_physical_memory_is_dirty(pd)) {
1482
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1483
                env->tlb_write[is_user][index].addend = addend;
1484
            } else {
1485
                env->tlb_write[is_user][index].address = address;
1486
                env->tlb_write[is_user][index].addend = addend;
1487
            }
1488
        } else {
1489
            env->tlb_write[is_user][index].address = -1;
1490
            env->tlb_write[is_user][index].addend = -1;
1491
        }
1492
    }
1493
#if !defined(CONFIG_SOFTMMU)
1494
    else {
1495
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1496
            /* IO access: no mapping is done as it will be handled by the
1497
               soft MMU */
1498
            if (!(env->hflags & HF_SOFTMMU_MASK))
1499
                ret = 2;
1500
        } else {
1501
            void *map_addr;
1502

    
1503
            if (vaddr >= MMAP_AREA_END) {
1504
                ret = 2;
1505
            } else {
1506
                if (prot & PROT_WRITE) {
1507
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1508
#if defined(TARGET_HAS_SMC) || 1
1509
                        first_tb ||
1510
#endif
1511
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1512
                         !cpu_physical_memory_is_dirty(pd))) {
1513
                        /* ROM: we do as if code was inside */
1514
                        /* if code is present, we only map as read only and save the
1515
                           original mapping */
1516
                        VirtPageDesc *vp;
1517
                        
1518
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1519
                        vp->phys_addr = pd;
1520
                        vp->prot = prot;
1521
                        vp->valid_tag = virt_valid_tag;
1522
                        prot &= ~PAGE_WRITE;
1523
                    }
1524
                }
1525
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1526
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1527
                if (map_addr == MAP_FAILED) {
1528
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1529
                              paddr, vaddr);
1530
                }
1531
            }
1532
        }
1533
    }
1534
#endif
1535
    return ret;
1536
}
1537

    
1538
/* called from signal handler: invalidate the code and unprotect the
1539
   page. Return TRUE if the fault was succesfully handled. */
1540
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1541
{
1542
#if !defined(CONFIG_SOFTMMU)
1543
    VirtPageDesc *vp;
1544

    
1545
#if defined(DEBUG_TLB)
1546
    printf("page_unprotect: addr=0x%08x\n", addr);
1547
#endif
1548
    addr &= TARGET_PAGE_MASK;
1549

    
1550
    /* if it is not mapped, no need to worry here */
1551
    if (addr >= MMAP_AREA_END)
1552
        return 0;
1553
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1554
    if (!vp)
1555
        return 0;
1556
    /* NOTE: in this case, validate_tag is _not_ tested as it
1557
       validates only the code TLB */
1558
    if (vp->valid_tag != virt_valid_tag)
1559
        return 0;
1560
    if (!(vp->prot & PAGE_WRITE))
1561
        return 0;
1562
#if defined(DEBUG_TLB)
1563
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1564
           addr, vp->phys_addr, vp->prot);
1565
#endif
1566
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1567
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1568
                  (unsigned long)addr, vp->prot);
1569
    /* set the dirty bit */
1570
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1571
    /* flush the code inside */
1572
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1573
    return 1;
1574
#else
1575
    return 0;
1576
#endif
1577
}
1578

    
1579
#else
1580

    
1581
void tlb_flush(CPUState *env, int flush_global)
1582
{
1583
}
1584

    
1585
void tlb_flush_page(CPUState *env, target_ulong addr)
1586
{
1587
}
1588

    
1589
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1590
                 target_phys_addr_t paddr, int prot, 
1591
                 int is_user, int is_softmmu)
1592
{
1593
    return 0;
1594
}
1595

    
1596
/* dump memory mappings */
1597
void page_dump(FILE *f)
1598
{
1599
    unsigned long start, end;
1600
    int i, j, prot, prot1;
1601
    PageDesc *p;
1602

    
1603
    fprintf(f, "%-8s %-8s %-8s %s\n",
1604
            "start", "end", "size", "prot");
1605
    start = -1;
1606
    end = -1;
1607
    prot = 0;
1608
    for(i = 0; i <= L1_SIZE; i++) {
1609
        if (i < L1_SIZE)
1610
            p = l1_map[i];
1611
        else
1612
            p = NULL;
1613
        for(j = 0;j < L2_SIZE; j++) {
1614
            if (!p)
1615
                prot1 = 0;
1616
            else
1617
                prot1 = p[j].flags;
1618
            if (prot1 != prot) {
1619
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1620
                if (start != -1) {
1621
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1622
                            start, end, end - start, 
1623
                            prot & PAGE_READ ? 'r' : '-',
1624
                            prot & PAGE_WRITE ? 'w' : '-',
1625
                            prot & PAGE_EXEC ? 'x' : '-');
1626
                }
1627
                if (prot1 != 0)
1628
                    start = end;
1629
                else
1630
                    start = -1;
1631
                prot = prot1;
1632
            }
1633
            if (!p)
1634
                break;
1635
        }
1636
    }
1637
}
1638

    
1639
int page_get_flags(unsigned long address)
1640
{
1641
    PageDesc *p;
1642

    
1643
    p = page_find(address >> TARGET_PAGE_BITS);
1644
    if (!p)
1645
        return 0;
1646
    return p->flags;
1647
}
1648

    
1649
/* modify the flags of a page and invalidate the code if
1650
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1651
   depending on PAGE_WRITE */
1652
void page_set_flags(unsigned long start, unsigned long end, int flags)
1653
{
1654
    PageDesc *p;
1655
    unsigned long addr;
1656

    
1657
    start = start & TARGET_PAGE_MASK;
1658
    end = TARGET_PAGE_ALIGN(end);
1659
    if (flags & PAGE_WRITE)
1660
        flags |= PAGE_WRITE_ORG;
1661
    spin_lock(&tb_lock);
1662
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1663
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1664
        /* if the write protection is set, then we invalidate the code
1665
           inside */
1666
        if (!(p->flags & PAGE_WRITE) && 
1667
            (flags & PAGE_WRITE) &&
1668
            p->first_tb) {
1669
            tb_invalidate_phys_page(addr, 0, NULL);
1670
        }
1671
        p->flags = flags;
1672
    }
1673
    spin_unlock(&tb_lock);
1674
}
1675

    
1676
/* called from signal handler: invalidate the code and unprotect the
1677
   page. Return TRUE if the fault was succesfully handled. */
1678
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1679
{
1680
    unsigned int page_index, prot, pindex;
1681
    PageDesc *p, *p1;
1682
    unsigned long host_start, host_end, addr;
1683

    
1684
    host_start = address & qemu_host_page_mask;
1685
    page_index = host_start >> TARGET_PAGE_BITS;
1686
    p1 = page_find(page_index);
1687
    if (!p1)
1688
        return 0;
1689
    host_end = host_start + qemu_host_page_size;
1690
    p = p1;
1691
    prot = 0;
1692
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1693
        prot |= p->flags;
1694
        p++;
1695
    }
1696
    /* if the page was really writable, then we change its
1697
       protection back to writable */
1698
    if (prot & PAGE_WRITE_ORG) {
1699
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1700
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1701
            mprotect((void *)host_start, qemu_host_page_size, 
1702
                     (prot & PAGE_BITS) | PAGE_WRITE);
1703
            p1[pindex].flags |= PAGE_WRITE;
1704
            /* and since the content will be modified, we must invalidate
1705
               the corresponding translated code. */
1706
            tb_invalidate_phys_page(address, pc, puc);
1707
#ifdef DEBUG_TB_CHECK
1708
            tb_invalidate_check(address);
1709
#endif
1710
            return 1;
1711
        }
1712
    }
1713
    return 0;
1714
}
1715

    
1716
/* call this function when system calls directly modify a memory area */
1717
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1718
{
1719
    unsigned long start, end, addr;
1720

    
1721
    start = (unsigned long)data;
1722
    end = start + data_size;
1723
    start &= TARGET_PAGE_MASK;
1724
    end = TARGET_PAGE_ALIGN(end);
1725
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1726
        page_unprotect(addr, 0, NULL);
1727
    }
1728
}
1729

    
1730
static inline void tlb_set_dirty(CPUState *env,
1731
                                 unsigned long addr, target_ulong vaddr)
1732
{
1733
}
1734
#endif /* defined(CONFIG_USER_ONLY) */
1735

    
1736
/* register physical memory. 'size' must be a multiple of the target
1737
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1738
   io memory page */
1739
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1740
                                  unsigned long size,
1741
                                  unsigned long phys_offset)
1742
{
1743
    target_phys_addr_t addr, end_addr;
1744
    PhysPageDesc *p;
1745

    
1746
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1747
    end_addr = start_addr + size;
1748
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1749
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1750
        p->phys_offset = phys_offset;
1751
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1752
            phys_offset += TARGET_PAGE_SIZE;
1753
    }
1754
}
1755

    
1756
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1757
{
1758
    return 0;
1759
}
1760

    
1761
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1762
{
1763
}
1764

    
1765
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1766
    unassigned_mem_readb,
1767
    unassigned_mem_readb,
1768
    unassigned_mem_readb,
1769
};
1770

    
1771
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1772
    unassigned_mem_writeb,
1773
    unassigned_mem_writeb,
1774
    unassigned_mem_writeb,
1775
};
1776

    
1777
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1778
{
1779
    unsigned long ram_addr;
1780
    int dirty_flags;
1781
    ram_addr = addr - (unsigned long)phys_ram_base;
1782
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1783
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1784
#if !defined(CONFIG_USER_ONLY)
1785
        tb_invalidate_phys_page_fast(ram_addr, 1);
1786
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1787
#endif
1788
    }
1789
    stb_p((uint8_t *)(long)addr, val);
1790
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1791
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1792
    /* we remove the notdirty callback only if the code has been
1793
       flushed */
1794
    if (dirty_flags == 0xff)
1795
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1796
}
1797

    
1798
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1799
{
1800
    unsigned long ram_addr;
1801
    int dirty_flags;
1802
    ram_addr = addr - (unsigned long)phys_ram_base;
1803
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1804
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1805
#if !defined(CONFIG_USER_ONLY)
1806
        tb_invalidate_phys_page_fast(ram_addr, 2);
1807
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1808
#endif
1809
    }
1810
    stw_p((uint8_t *)(long)addr, val);
1811
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1812
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1813
    /* we remove the notdirty callback only if the code has been
1814
       flushed */
1815
    if (dirty_flags == 0xff)
1816
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1817
}
1818

    
1819
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1820
{
1821
    unsigned long ram_addr;
1822
    int dirty_flags;
1823
    ram_addr = addr - (unsigned long)phys_ram_base;
1824
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1825
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1826
#if !defined(CONFIG_USER_ONLY)
1827
        tb_invalidate_phys_page_fast(ram_addr, 4);
1828
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1829
#endif
1830
    }
1831
    stl_p((uint8_t *)(long)addr, val);
1832
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1833
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1834
    /* we remove the notdirty callback only if the code has been
1835
       flushed */
1836
    if (dirty_flags == 0xff)
1837
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1838
}
1839

    
1840
static CPUReadMemoryFunc *error_mem_read[3] = {
1841
    NULL, /* never used */
1842
    NULL, /* never used */
1843
    NULL, /* never used */
1844
};
1845

    
1846
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1847
    notdirty_mem_writeb,
1848
    notdirty_mem_writew,
1849
    notdirty_mem_writel,
1850
};
1851

    
1852
static void io_mem_init(void)
1853
{
1854
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1855
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1856
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1857
    io_mem_nb = 5;
1858

    
1859
    /* alloc dirty bits array */
1860
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1861
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1862
}
1863

    
1864
/* mem_read and mem_write are arrays of functions containing the
1865
   function to access byte (index 0), word (index 1) and dword (index
1866
   2). All functions must be supplied. If io_index is non zero, the
1867
   corresponding io zone is modified. If it is zero, a new io zone is
1868
   allocated. The return value can be used with
1869
   cpu_register_physical_memory(). (-1) is returned if error. */
1870
int cpu_register_io_memory(int io_index,
1871
                           CPUReadMemoryFunc **mem_read,
1872
                           CPUWriteMemoryFunc **mem_write,
1873
                           void *opaque)
1874
{
1875
    int i;
1876

    
1877
    if (io_index <= 0) {
1878
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1879
            return -1;
1880
        io_index = io_mem_nb++;
1881
    } else {
1882
        if (io_index >= IO_MEM_NB_ENTRIES)
1883
            return -1;
1884
    }
1885

    
1886
    for(i = 0;i < 3; i++) {
1887
        io_mem_read[io_index][i] = mem_read[i];
1888
        io_mem_write[io_index][i] = mem_write[i];
1889
    }
1890
    io_mem_opaque[io_index] = opaque;
1891
    return io_index << IO_MEM_SHIFT;
1892
}
1893

    
1894
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1895
{
1896
    return io_mem_write[io_index >> IO_MEM_SHIFT];
1897
}
1898

    
1899
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1900
{
1901
    return io_mem_read[io_index >> IO_MEM_SHIFT];
1902
}
1903

    
1904
/* physical memory access (slow version, mainly for debug) */
1905
#if defined(CONFIG_USER_ONLY)
1906
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1907
                            int len, int is_write)
1908
{
1909
    int l, flags;
1910
    target_ulong page;
1911

    
1912
    while (len > 0) {
1913
        page = addr & TARGET_PAGE_MASK;
1914
        l = (page + TARGET_PAGE_SIZE) - addr;
1915
        if (l > len)
1916
            l = len;
1917
        flags = page_get_flags(page);
1918
        if (!(flags & PAGE_VALID))
1919
            return;
1920
        if (is_write) {
1921
            if (!(flags & PAGE_WRITE))
1922
                return;
1923
            memcpy((uint8_t *)addr, buf, len);
1924
        } else {
1925
            if (!(flags & PAGE_READ))
1926
                return;
1927
            memcpy(buf, (uint8_t *)addr, len);
1928
        }
1929
        len -= l;
1930
        buf += l;
1931
        addr += l;
1932
    }
1933
}
1934

    
1935
#else
1936
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
1937
                            int len, int is_write)
1938
{
1939
    int l, io_index;
1940
    uint8_t *ptr;
1941
    uint32_t val;
1942
    target_phys_addr_t page;
1943
    unsigned long pd;
1944
    PhysPageDesc *p;
1945
    
1946
    while (len > 0) {
1947
        page = addr & TARGET_PAGE_MASK;
1948
        l = (page + TARGET_PAGE_SIZE) - addr;
1949
        if (l > len)
1950
            l = len;
1951
        p = phys_page_find(page >> TARGET_PAGE_BITS);
1952
        if (!p) {
1953
            pd = IO_MEM_UNASSIGNED;
1954
        } else {
1955
            pd = p->phys_offset;
1956
        }
1957
        
1958
        if (is_write) {
1959
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1960
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1961
                /* XXX: could force cpu_single_env to NULL to avoid
1962
                   potential bugs */
1963
                if (l >= 4 && ((addr & 3) == 0)) {
1964
                    /* 32 bit write access */
1965
                    val = ldl_p(buf);
1966
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
1967
                    l = 4;
1968
                } else if (l >= 2 && ((addr & 1) == 0)) {
1969
                    /* 16 bit write access */
1970
                    val = lduw_p(buf);
1971
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
1972
                    l = 2;
1973
                } else {
1974
                    /* 8 bit write access */
1975
                    val = ldub_p(buf);
1976
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
1977
                    l = 1;
1978
                }
1979
            } else {
1980
                unsigned long addr1;
1981
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1982
                /* RAM case */
1983
                ptr = phys_ram_base + addr1;
1984
                memcpy(ptr, buf, l);
1985
                if (!cpu_physical_memory_is_dirty(addr1)) {
1986
                    /* invalidate code */
1987
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1988
                    /* set dirty bit */
1989
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
1990
                        (0xff & ~CODE_DIRTY_FLAG);
1991
                }
1992
            }
1993
        } else {
1994
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1995
                /* I/O case */
1996
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1997
                if (l >= 4 && ((addr & 3) == 0)) {
1998
                    /* 32 bit read access */
1999
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2000
                    stl_p(buf, val);
2001
                    l = 4;
2002
                } else if (l >= 2 && ((addr & 1) == 0)) {
2003
                    /* 16 bit read access */
2004
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2005
                    stw_p(buf, val);
2006
                    l = 2;
2007
                } else {
2008
                    /* 8 bit read access */
2009
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2010
                    stb_p(buf, val);
2011
                    l = 1;
2012
                }
2013
            } else {
2014
                /* RAM case */
2015
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2016
                    (addr & ~TARGET_PAGE_MASK);
2017
                memcpy(buf, ptr, l);
2018
            }
2019
        }
2020
        len -= l;
2021
        buf += l;
2022
        addr += l;
2023
    }
2024
}
2025

    
2026
/* warning: addr must be aligned */
2027
uint32_t ldl_phys(target_phys_addr_t addr)
2028
{
2029
    int io_index;
2030
    uint8_t *ptr;
2031
    uint32_t val;
2032
    unsigned long pd;
2033
    PhysPageDesc *p;
2034

    
2035
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2036
    if (!p) {
2037
        pd = IO_MEM_UNASSIGNED;
2038
    } else {
2039
        pd = p->phys_offset;
2040
    }
2041
        
2042
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2043
        /* I/O case */
2044
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2045
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2046
    } else {
2047
        /* RAM case */
2048
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2049
            (addr & ~TARGET_PAGE_MASK);
2050
        val = ldl_p(ptr);
2051
    }
2052
    return val;
2053
}
2054

    
2055
/* XXX: optimize */
2056
uint32_t ldub_phys(target_phys_addr_t addr)
2057
{
2058
    uint8_t val;
2059
    cpu_physical_memory_read(addr, &val, 1);
2060
    return val;
2061
}
2062

    
2063
/* XXX: optimize */
2064
uint32_t lduw_phys(target_phys_addr_t addr)
2065
{
2066
    uint16_t val;
2067
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2068
    return tswap16(val);
2069
}
2070

    
2071
/* XXX: optimize */
2072
uint64_t ldq_phys(target_phys_addr_t addr)
2073
{
2074
    uint64_t val;
2075
    cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2076
    return tswap64(val);
2077
}
2078

    
2079
/* warning: addr must be aligned. The ram page is not masked as dirty
2080
   and the code inside is not invalidated. It is useful if the dirty
2081
   bits are used to track modified PTEs */
2082
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2083
{
2084
    int io_index;
2085
    uint8_t *ptr;
2086
    unsigned long pd;
2087
    PhysPageDesc *p;
2088

    
2089
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2090
    if (!p) {
2091
        pd = IO_MEM_UNASSIGNED;
2092
    } else {
2093
        pd = p->phys_offset;
2094
    }
2095
        
2096
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2097
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2098
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2099
    } else {
2100
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2101
            (addr & ~TARGET_PAGE_MASK);
2102
        stl_p(ptr, val);
2103
    }
2104
}
2105

    
2106
/* warning: addr must be aligned */
2107
void stl_phys(target_phys_addr_t addr, uint32_t val)
2108
{
2109
    int io_index;
2110
    uint8_t *ptr;
2111
    unsigned long pd;
2112
    PhysPageDesc *p;
2113

    
2114
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2115
    if (!p) {
2116
        pd = IO_MEM_UNASSIGNED;
2117
    } else {
2118
        pd = p->phys_offset;
2119
    }
2120
        
2121
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2122
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2123
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2124
    } else {
2125
        unsigned long addr1;
2126
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2127
        /* RAM case */
2128
        ptr = phys_ram_base + addr1;
2129
        stl_p(ptr, val);
2130
        if (!cpu_physical_memory_is_dirty(addr1)) {
2131
            /* invalidate code */
2132
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2133
            /* set dirty bit */
2134
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2135
                (0xff & ~CODE_DIRTY_FLAG);
2136
        }
2137
    }
2138
}
2139

    
2140
/* XXX: optimize */
2141
void stb_phys(target_phys_addr_t addr, uint32_t val)
2142
{
2143
    uint8_t v = val;
2144
    cpu_physical_memory_write(addr, &v, 1);
2145
}
2146

    
2147
/* XXX: optimize */
2148
void stw_phys(target_phys_addr_t addr, uint32_t val)
2149
{
2150
    uint16_t v = tswap16(val);
2151
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2152
}
2153

    
2154
/* XXX: optimize */
2155
void stq_phys(target_phys_addr_t addr, uint64_t val)
2156
{
2157
    val = tswap64(val);
2158
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2159
}
2160

    
2161
#endif
2162

    
2163
/* virtual memory access for debug */
2164
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2165
                        uint8_t *buf, int len, int is_write)
2166
{
2167
    int l;
2168
    target_ulong page, phys_addr;
2169

    
2170
    while (len > 0) {
2171
        page = addr & TARGET_PAGE_MASK;
2172
        phys_addr = cpu_get_phys_page_debug(env, page);
2173
        /* if no physical page mapped, return an error */
2174
        if (phys_addr == -1)
2175
            return -1;
2176
        l = (page + TARGET_PAGE_SIZE) - addr;
2177
        if (l > len)
2178
            l = len;
2179
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2180
                               buf, l, is_write);
2181
        len -= l;
2182
        buf += l;
2183
        addr += l;
2184
    }
2185
    return 0;
2186
}
2187

    
2188
void dump_exec_info(FILE *f,
2189
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2190
{
2191
    int i, target_code_size, max_target_code_size;
2192
    int direct_jmp_count, direct_jmp2_count, cross_page;
2193
    TranslationBlock *tb;
2194
    
2195
    target_code_size = 0;
2196
    max_target_code_size = 0;
2197
    cross_page = 0;
2198
    direct_jmp_count = 0;
2199
    direct_jmp2_count = 0;
2200
    for(i = 0; i < nb_tbs; i++) {
2201
        tb = &tbs[i];
2202
        target_code_size += tb->size;
2203
        if (tb->size > max_target_code_size)
2204
            max_target_code_size = tb->size;
2205
        if (tb->page_addr[1] != -1)
2206
            cross_page++;
2207
        if (tb->tb_next_offset[0] != 0xffff) {
2208
            direct_jmp_count++;
2209
            if (tb->tb_next_offset[1] != 0xffff) {
2210
                direct_jmp2_count++;
2211
            }
2212
        }
2213
    }
2214
    /* XXX: avoid using doubles ? */
2215
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2216
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2217
                nb_tbs ? target_code_size / nb_tbs : 0,
2218
                max_target_code_size);
2219
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2220
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2221
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2222
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2223
            cross_page, 
2224
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2225
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2226
                direct_jmp_count, 
2227
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2228
                direct_jmp2_count,
2229
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2230
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2231
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2232
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2233
}
2234

    
2235
#if !defined(CONFIG_USER_ONLY) 
2236

    
2237
#define MMUSUFFIX _cmmu
2238
#define GETPC() NULL
2239
#define env cpu_single_env
2240
#define SOFTMMU_CODE_ACCESS
2241

    
2242
#define SHIFT 0
2243
#include "softmmu_template.h"
2244

    
2245
#define SHIFT 1
2246
#include "softmmu_template.h"
2247

    
2248
#define SHIFT 2
2249
#include "softmmu_template.h"
2250

    
2251
#define SHIFT 3
2252
#include "softmmu_template.h"
2253

    
2254
#undef env
2255

    
2256
#endif