Statistics
| Branch: | Revision:

root / exec.c @ 90f18422

History | View | Annotate | Download (70.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40
//#define DEBUG_TLB
41

    
42
/* make various TB consistency checks */
43
//#define DEBUG_TB_CHECK 
44
//#define DEBUG_TLB_CHECK 
45

    
46
/* threshold to flush the translated code buffer */
47
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48

    
49
#define SMC_BITMAP_USE_THRESHOLD 10
50

    
51
#define MMAP_AREA_START        0x00000000
52
#define MMAP_AREA_END          0xa8000000
53

    
54
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57
int nb_tbs;
58
/* any access to the tbs or the page table must use this lock */
59
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
60

    
61
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
62
uint8_t *code_gen_ptr;
63

    
64
int phys_ram_size;
65
int phys_ram_fd;
66
uint8_t *phys_ram_base;
67
uint8_t *phys_ram_dirty;
68

    
69
typedef struct PageDesc {
70
    /* list of TBs intersecting this ram page */
71
    TranslationBlock *first_tb;
72
    /* in order to optimize self modifying code, we count the number
73
       of lookups we do to a given page to use a bitmap */
74
    unsigned int code_write_count;
75
    uint8_t *code_bitmap;
76
#if defined(CONFIG_USER_ONLY)
77
    unsigned long flags;
78
#endif
79
} PageDesc;
80

    
81
typedef struct PhysPageDesc {
82
    /* offset in host memory of the page + io_index in the low 12 bits */
83
    uint32_t phys_offset;
84
} PhysPageDesc;
85

    
86
/* Note: the VirtPage handling is absolete and will be suppressed
87
   ASAP */
88
typedef struct VirtPageDesc {
89
    /* physical address of code page. It is valid only if 'valid_tag'
90
       matches 'virt_valid_tag' */ 
91
    target_ulong phys_addr; 
92
    unsigned int valid_tag;
93
#if !defined(CONFIG_SOFTMMU)
94
    /* original page access rights. It is valid only if 'valid_tag'
95
       matches 'virt_valid_tag' */
96
    unsigned int prot;
97
#endif
98
} VirtPageDesc;
99

    
100
#define L2_BITS 10
101
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
102

    
103
#define L1_SIZE (1 << L1_BITS)
104
#define L2_SIZE (1 << L2_BITS)
105

    
106
static void io_mem_init(void);
107

    
108
unsigned long qemu_real_host_page_size;
109
unsigned long qemu_host_page_bits;
110
unsigned long qemu_host_page_size;
111
unsigned long qemu_host_page_mask;
112

    
113
/* XXX: for system emulation, it could just be an array */
114
static PageDesc *l1_map[L1_SIZE];
115
PhysPageDesc **l1_phys_map;
116

    
117
#if !defined(CONFIG_USER_ONLY)
118
#if TARGET_LONG_BITS > 32
119
#define VIRT_L_BITS 9
120
#define VIRT_L_SIZE (1 << VIRT_L_BITS)
121
static void *l1_virt_map[VIRT_L_SIZE];
122
#else
123
static VirtPageDesc *l1_virt_map[L1_SIZE];
124
#endif
125
static unsigned int virt_valid_tag;
126
#endif
127

    
128
/* io memory support */
129
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
130
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
131
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
132
static int io_mem_nb;
133

    
134
/* log support */
135
char *logfilename = "/tmp/qemu.log";
136
FILE *logfile;
137
int loglevel;
138

    
139
/* statistics */
140
static int tlb_flush_count;
141
static int tb_flush_count;
142
static int tb_phys_invalidate_count;
143

    
144
static void page_init(void)
145
{
146
    /* NOTE: we can always suppose that qemu_host_page_size >=
147
       TARGET_PAGE_SIZE */
148
#ifdef _WIN32
149
    {
150
        SYSTEM_INFO system_info;
151
        DWORD old_protect;
152
        
153
        GetSystemInfo(&system_info);
154
        qemu_real_host_page_size = system_info.dwPageSize;
155
        
156
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
157
                       PAGE_EXECUTE_READWRITE, &old_protect);
158
    }
159
#else
160
    qemu_real_host_page_size = getpagesize();
161
    {
162
        unsigned long start, end;
163

    
164
        start = (unsigned long)code_gen_buffer;
165
        start &= ~(qemu_real_host_page_size - 1);
166
        
167
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
168
        end += qemu_real_host_page_size - 1;
169
        end &= ~(qemu_real_host_page_size - 1);
170
        
171
        mprotect((void *)start, end - start, 
172
                 PROT_READ | PROT_WRITE | PROT_EXEC);
173
    }
174
#endif
175

    
176
    if (qemu_host_page_size == 0)
177
        qemu_host_page_size = qemu_real_host_page_size;
178
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
179
        qemu_host_page_size = TARGET_PAGE_SIZE;
180
    qemu_host_page_bits = 0;
181
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
182
        qemu_host_page_bits++;
183
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
184
#if !defined(CONFIG_USER_ONLY)
185
    virt_valid_tag = 1;
186
#endif
187
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(PhysPageDesc *));
188
    memset(l1_phys_map, 0, L1_SIZE * sizeof(PhysPageDesc *));
189
}
190

    
191
static inline PageDesc *page_find_alloc(unsigned int index)
192
{
193
    PageDesc **lp, *p;
194

    
195
    lp = &l1_map[index >> L2_BITS];
196
    p = *lp;
197
    if (!p) {
198
        /* allocate if not found */
199
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
200
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
201
        *lp = p;
202
    }
203
    return p + (index & (L2_SIZE - 1));
204
}
205

    
206
static inline PageDesc *page_find(unsigned int index)
207
{
208
    PageDesc *p;
209

    
210
    p = l1_map[index >> L2_BITS];
211
    if (!p)
212
        return 0;
213
    return p + (index & (L2_SIZE - 1));
214
}
215

    
216
static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
217
{
218
    PhysPageDesc **lp, *p;
219

    
220
    lp = &l1_phys_map[index >> L2_BITS];
221
    p = *lp;
222
    if (!p) {
223
        /* allocate if not found */
224
        p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
225
        memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
226
        *lp = p;
227
    }
228
    return p + (index & (L2_SIZE - 1));
229
}
230

    
231
static inline PhysPageDesc *phys_page_find(unsigned int index)
232
{
233
    PhysPageDesc *p;
234

    
235
    p = l1_phys_map[index >> L2_BITS];
236
    if (!p)
237
        return 0;
238
    return p + (index & (L2_SIZE - 1));
239
}
240

    
241
#if !defined(CONFIG_USER_ONLY)
242
static void tlb_protect_code(CPUState *env, target_ulong addr);
243
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
244

    
245
static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
246
{
247
#if TARGET_LONG_BITS > 32
248
    void **p, **lp;
249

    
250
    p = l1_virt_map;
251
    lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
252
    p = *lp;
253
    if (!p) {
254
        if (!alloc)
255
            return NULL;
256
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
257
        *lp = p;
258
    }
259
    lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
260
    p = *lp;
261
    if (!p) {
262
        if (!alloc)
263
            return NULL;
264
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
265
        *lp = p;
266
    }
267
    lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
268
    p = *lp;
269
    if (!p) {
270
        if (!alloc)
271
            return NULL;
272
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
273
        *lp = p;
274
    }
275
    lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
276
    p = *lp;
277
    if (!p) {
278
        if (!alloc)
279
            return NULL;
280
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
281
        *lp = p;
282
    }
283
    lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
284
    p = *lp;
285
    if (!p) {
286
        if (!alloc)
287
            return NULL;
288
        p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
289
        *lp = p;
290
    }
291
    return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
292
#else
293
    VirtPageDesc *p, **lp;
294

    
295
    lp = &l1_virt_map[index >> L2_BITS];
296
    p = *lp;
297
    if (!p) {
298
        /* allocate if not found */
299
        if (!alloc)
300
            return NULL;
301
        p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
302
        *lp = p;
303
    }
304
    return p + (index & (L2_SIZE - 1));
305
#endif
306
}
307

    
308
static inline VirtPageDesc *virt_page_find(target_ulong index)
309
{
310
    return virt_page_find_alloc(index, 0);
311
}
312

    
313
#if TARGET_LONG_BITS > 32
314
static void virt_page_flush_internal(void **p, int level)
315
{
316
    int i; 
317
    if (level == 0) {
318
        VirtPageDesc *q = (VirtPageDesc *)p;
319
        for(i = 0; i < VIRT_L_SIZE; i++)
320
            q[i].valid_tag = 0;
321
    } else {
322
        level--;
323
        for(i = 0; i < VIRT_L_SIZE; i++) {
324
            if (p[i])
325
                virt_page_flush_internal(p[i], level);
326
        }
327
    }
328
}
329
#endif
330

    
331
static void virt_page_flush(void)
332
{
333
    virt_valid_tag++;
334

    
335
    if (virt_valid_tag == 0) {
336
        virt_valid_tag = 1;
337
#if TARGET_LONG_BITS > 32
338
        virt_page_flush_internal(l1_virt_map, 5);
339
#else
340
        {
341
            int i, j;
342
            VirtPageDesc *p;
343
            for(i = 0; i < L1_SIZE; i++) {
344
                p = l1_virt_map[i];
345
                if (p) {
346
                    for(j = 0; j < L2_SIZE; j++)
347
                        p[j].valid_tag = 0;
348
                }
349
            }
350
        }
351
#endif
352
    }
353
}
354
#else
355
static void virt_page_flush(void)
356
{
357
}
358
#endif
359

    
360
void cpu_exec_init(void)
361
{
362
    if (!code_gen_ptr) {
363
        code_gen_ptr = code_gen_buffer;
364
        page_init();
365
        io_mem_init();
366
    }
367
}
368

    
369
static inline void invalidate_page_bitmap(PageDesc *p)
370
{
371
    if (p->code_bitmap) {
372
        qemu_free(p->code_bitmap);
373
        p->code_bitmap = NULL;
374
    }
375
    p->code_write_count = 0;
376
}
377

    
378
/* set to NULL all the 'first_tb' fields in all PageDescs */
379
static void page_flush_tb(void)
380
{
381
    int i, j;
382
    PageDesc *p;
383

    
384
    for(i = 0; i < L1_SIZE; i++) {
385
        p = l1_map[i];
386
        if (p) {
387
            for(j = 0; j < L2_SIZE; j++) {
388
                p->first_tb = NULL;
389
                invalidate_page_bitmap(p);
390
                p++;
391
            }
392
        }
393
    }
394
}
395

    
396
/* flush all the translation blocks */
397
/* XXX: tb_flush is currently not thread safe */
398
void tb_flush(CPUState *env)
399
{
400
#if defined(DEBUG_FLUSH)
401
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
402
           code_gen_ptr - code_gen_buffer, 
403
           nb_tbs, 
404
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
405
#endif
406
    nb_tbs = 0;
407
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
408
    virt_page_flush();
409

    
410
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
411
    page_flush_tb();
412

    
413
    code_gen_ptr = code_gen_buffer;
414
    /* XXX: flush processor icache at this point if cache flush is
415
       expensive */
416
    tb_flush_count++;
417
}
418

    
419
#ifdef DEBUG_TB_CHECK
420

    
421
static void tb_invalidate_check(unsigned long address)
422
{
423
    TranslationBlock *tb;
424
    int i;
425
    address &= TARGET_PAGE_MASK;
426
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
427
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
428
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
429
                  address >= tb->pc + tb->size)) {
430
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
431
                       address, tb->pc, tb->size);
432
            }
433
        }
434
    }
435
}
436

    
437
/* verify that all the pages have correct rights for code */
438
static void tb_page_check(void)
439
{
440
    TranslationBlock *tb;
441
    int i, flags1, flags2;
442
    
443
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
444
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
445
            flags1 = page_get_flags(tb->pc);
446
            flags2 = page_get_flags(tb->pc + tb->size - 1);
447
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
448
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
449
                       tb->pc, tb->size, flags1, flags2);
450
            }
451
        }
452
    }
453
}
454

    
455
void tb_jmp_check(TranslationBlock *tb)
456
{
457
    TranslationBlock *tb1;
458
    unsigned int n1;
459

    
460
    /* suppress any remaining jumps to this TB */
461
    tb1 = tb->jmp_first;
462
    for(;;) {
463
        n1 = (long)tb1 & 3;
464
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
465
        if (n1 == 2)
466
            break;
467
        tb1 = tb1->jmp_next[n1];
468
    }
469
    /* check end of list */
470
    if (tb1 != tb) {
471
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
472
    }
473
}
474

    
475
#endif
476

    
477
/* invalidate one TB */
478
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
479
                             int next_offset)
480
{
481
    TranslationBlock *tb1;
482
    for(;;) {
483
        tb1 = *ptb;
484
        if (tb1 == tb) {
485
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
486
            break;
487
        }
488
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
489
    }
490
}
491

    
492
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
493
{
494
    TranslationBlock *tb1;
495
    unsigned int n1;
496

    
497
    for(;;) {
498
        tb1 = *ptb;
499
        n1 = (long)tb1 & 3;
500
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
501
        if (tb1 == tb) {
502
            *ptb = tb1->page_next[n1];
503
            break;
504
        }
505
        ptb = &tb1->page_next[n1];
506
    }
507
}
508

    
509
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
510
{
511
    TranslationBlock *tb1, **ptb;
512
    unsigned int n1;
513

    
514
    ptb = &tb->jmp_next[n];
515
    tb1 = *ptb;
516
    if (tb1) {
517
        /* find tb(n) in circular list */
518
        for(;;) {
519
            tb1 = *ptb;
520
            n1 = (long)tb1 & 3;
521
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
522
            if (n1 == n && tb1 == tb)
523
                break;
524
            if (n1 == 2) {
525
                ptb = &tb1->jmp_first;
526
            } else {
527
                ptb = &tb1->jmp_next[n1];
528
            }
529
        }
530
        /* now we can suppress tb(n) from the list */
531
        *ptb = tb->jmp_next[n];
532

    
533
        tb->jmp_next[n] = NULL;
534
    }
535
}
536

    
537
/* reset the jump entry 'n' of a TB so that it is not chained to
538
   another TB */
539
static inline void tb_reset_jump(TranslationBlock *tb, int n)
540
{
541
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
542
}
543

    
544
static inline void tb_invalidate(TranslationBlock *tb)
545
{
546
    unsigned int h, n1;
547
    TranslationBlock *tb1, *tb2, **ptb;
548
    
549
    tb_invalidated_flag = 1;
550

    
551
    /* remove the TB from the hash list */
552
    h = tb_hash_func(tb->pc);
553
    ptb = &tb_hash[h];
554
    for(;;) {
555
        tb1 = *ptb;
556
        /* NOTE: the TB is not necessarily linked in the hash. It
557
           indicates that it is not currently used */
558
        if (tb1 == NULL)
559
            return;
560
        if (tb1 == tb) {
561
            *ptb = tb1->hash_next;
562
            break;
563
        }
564
        ptb = &tb1->hash_next;
565
    }
566

    
567
    /* suppress this TB from the two jump lists */
568
    tb_jmp_remove(tb, 0);
569
    tb_jmp_remove(tb, 1);
570

    
571
    /* suppress any remaining jumps to this TB */
572
    tb1 = tb->jmp_first;
573
    for(;;) {
574
        n1 = (long)tb1 & 3;
575
        if (n1 == 2)
576
            break;
577
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
578
        tb2 = tb1->jmp_next[n1];
579
        tb_reset_jump(tb1, n1);
580
        tb1->jmp_next[n1] = NULL;
581
        tb1 = tb2;
582
    }
583
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
584
}
585

    
586
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
587
{
588
    PageDesc *p;
589
    unsigned int h;
590
    target_ulong phys_pc;
591
    
592
    /* remove the TB from the hash list */
593
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
594
    h = tb_phys_hash_func(phys_pc);
595
    tb_remove(&tb_phys_hash[h], tb, 
596
              offsetof(TranslationBlock, phys_hash_next));
597

    
598
    /* remove the TB from the page list */
599
    if (tb->page_addr[0] != page_addr) {
600
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
601
        tb_page_remove(&p->first_tb, tb);
602
        invalidate_page_bitmap(p);
603
    }
604
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
605
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
606
        tb_page_remove(&p->first_tb, tb);
607
        invalidate_page_bitmap(p);
608
    }
609

    
610
    tb_invalidate(tb);
611
    tb_phys_invalidate_count++;
612
}
613

    
614
static inline void set_bits(uint8_t *tab, int start, int len)
615
{
616
    int end, mask, end1;
617

    
618
    end = start + len;
619
    tab += start >> 3;
620
    mask = 0xff << (start & 7);
621
    if ((start & ~7) == (end & ~7)) {
622
        if (start < end) {
623
            mask &= ~(0xff << (end & 7));
624
            *tab |= mask;
625
        }
626
    } else {
627
        *tab++ |= mask;
628
        start = (start + 8) & ~7;
629
        end1 = end & ~7;
630
        while (start < end1) {
631
            *tab++ = 0xff;
632
            start += 8;
633
        }
634
        if (start < end) {
635
            mask = ~(0xff << (end & 7));
636
            *tab |= mask;
637
        }
638
    }
639
}
640

    
641
static void build_page_bitmap(PageDesc *p)
642
{
643
    int n, tb_start, tb_end;
644
    TranslationBlock *tb;
645
    
646
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
647
    if (!p->code_bitmap)
648
        return;
649
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
650

    
651
    tb = p->first_tb;
652
    while (tb != NULL) {
653
        n = (long)tb & 3;
654
        tb = (TranslationBlock *)((long)tb & ~3);
655
        /* NOTE: this is subtle as a TB may span two physical pages */
656
        if (n == 0) {
657
            /* NOTE: tb_end may be after the end of the page, but
658
               it is not a problem */
659
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
660
            tb_end = tb_start + tb->size;
661
            if (tb_end > TARGET_PAGE_SIZE)
662
                tb_end = TARGET_PAGE_SIZE;
663
        } else {
664
            tb_start = 0;
665
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
666
        }
667
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
668
        tb = tb->page_next[n];
669
    }
670
}
671

    
672
#ifdef TARGET_HAS_PRECISE_SMC
673

    
674
static void tb_gen_code(CPUState *env, 
675
                        target_ulong pc, target_ulong cs_base, int flags,
676
                        int cflags)
677
{
678
    TranslationBlock *tb;
679
    uint8_t *tc_ptr;
680
    target_ulong phys_pc, phys_page2, virt_page2;
681
    int code_gen_size;
682

    
683
    phys_pc = get_phys_addr_code(env, pc);
684
    tb = tb_alloc(pc);
685
    if (!tb) {
686
        /* flush must be done */
687
        tb_flush(env);
688
        /* cannot fail at this point */
689
        tb = tb_alloc(pc);
690
    }
691
    tc_ptr = code_gen_ptr;
692
    tb->tc_ptr = tc_ptr;
693
    tb->cs_base = cs_base;
694
    tb->flags = flags;
695
    tb->cflags = cflags;
696
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
697
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
698
    
699
    /* check next page if needed */
700
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
701
    phys_page2 = -1;
702
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
703
        phys_page2 = get_phys_addr_code(env, virt_page2);
704
    }
705
    tb_link_phys(tb, phys_pc, phys_page2);
706
}
707
#endif
708
    
709
/* invalidate all TBs which intersect with the target physical page
710
   starting in range [start;end[. NOTE: start and end must refer to
711
   the same physical page. 'is_cpu_write_access' should be true if called
712
   from a real cpu write access: the virtual CPU will exit the current
713
   TB if code is modified inside this TB. */
714
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
715
                                   int is_cpu_write_access)
716
{
717
    int n, current_tb_modified, current_tb_not_found, current_flags;
718
    CPUState *env = cpu_single_env;
719
    PageDesc *p;
720
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
721
    target_ulong tb_start, tb_end;
722
    target_ulong current_pc, current_cs_base;
723

    
724
    p = page_find(start >> TARGET_PAGE_BITS);
725
    if (!p) 
726
        return;
727
    if (!p->code_bitmap && 
728
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
729
        is_cpu_write_access) {
730
        /* build code bitmap */
731
        build_page_bitmap(p);
732
    }
733

    
734
    /* we remove all the TBs in the range [start, end[ */
735
    /* XXX: see if in some cases it could be faster to invalidate all the code */
736
    current_tb_not_found = is_cpu_write_access;
737
    current_tb_modified = 0;
738
    current_tb = NULL; /* avoid warning */
739
    current_pc = 0; /* avoid warning */
740
    current_cs_base = 0; /* avoid warning */
741
    current_flags = 0; /* avoid warning */
742
    tb = p->first_tb;
743
    while (tb != NULL) {
744
        n = (long)tb & 3;
745
        tb = (TranslationBlock *)((long)tb & ~3);
746
        tb_next = tb->page_next[n];
747
        /* NOTE: this is subtle as a TB may span two physical pages */
748
        if (n == 0) {
749
            /* NOTE: tb_end may be after the end of the page, but
750
               it is not a problem */
751
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752
            tb_end = tb_start + tb->size;
753
        } else {
754
            tb_start = tb->page_addr[1];
755
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
756
        }
757
        if (!(tb_end <= start || tb_start >= end)) {
758
#ifdef TARGET_HAS_PRECISE_SMC
759
            if (current_tb_not_found) {
760
                current_tb_not_found = 0;
761
                current_tb = NULL;
762
                if (env->mem_write_pc) {
763
                    /* now we have a real cpu fault */
764
                    current_tb = tb_find_pc(env->mem_write_pc);
765
                }
766
            }
767
            if (current_tb == tb &&
768
                !(current_tb->cflags & CF_SINGLE_INSN)) {
769
                /* If we are modifying the current TB, we must stop
770
                its execution. We could be more precise by checking
771
                that the modification is after the current PC, but it
772
                would require a specialized function to partially
773
                restore the CPU state */
774
                
775
                current_tb_modified = 1;
776
                cpu_restore_state(current_tb, env, 
777
                                  env->mem_write_pc, NULL);
778
#if defined(TARGET_I386)
779
                current_flags = env->hflags;
780
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
781
                current_cs_base = (target_ulong)env->segs[R_CS].base;
782
                current_pc = current_cs_base + env->eip;
783
#else
784
#error unsupported CPU
785
#endif
786
            }
787
#endif /* TARGET_HAS_PRECISE_SMC */
788
            saved_tb = env->current_tb;
789
            env->current_tb = NULL;
790
            tb_phys_invalidate(tb, -1);
791
            env->current_tb = saved_tb;
792
            if (env->interrupt_request && env->current_tb)
793
                cpu_interrupt(env, env->interrupt_request);
794
        }
795
        tb = tb_next;
796
    }
797
#if !defined(CONFIG_USER_ONLY)
798
    /* if no code remaining, no need to continue to use slow writes */
799
    if (!p->first_tb) {
800
        invalidate_page_bitmap(p);
801
        if (is_cpu_write_access) {
802
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
803
        }
804
    }
805
#endif
806
#ifdef TARGET_HAS_PRECISE_SMC
807
    if (current_tb_modified) {
808
        /* we generate a block containing just the instruction
809
           modifying the memory. It will ensure that it cannot modify
810
           itself */
811
        env->current_tb = NULL;
812
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
813
                    CF_SINGLE_INSN);
814
        cpu_resume_from_signal(env, NULL);
815
    }
816
#endif
817
}
818

    
819
/* len must be <= 8 and start must be a multiple of len */
820
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
821
{
822
    PageDesc *p;
823
    int offset, b;
824
#if 0
825
    if (1) {
826
        if (loglevel) {
827
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
828
                   cpu_single_env->mem_write_vaddr, len, 
829
                   cpu_single_env->eip, 
830
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
831
        }
832
    }
833
#endif
834
    p = page_find(start >> TARGET_PAGE_BITS);
835
    if (!p) 
836
        return;
837
    if (p->code_bitmap) {
838
        offset = start & ~TARGET_PAGE_MASK;
839
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
840
        if (b & ((1 << len) - 1))
841
            goto do_invalidate;
842
    } else {
843
    do_invalidate:
844
        tb_invalidate_phys_page_range(start, start + len, 1);
845
    }
846
}
847

    
848
#if !defined(CONFIG_SOFTMMU)
849
static void tb_invalidate_phys_page(target_ulong addr, 
850
                                    unsigned long pc, void *puc)
851
{
852
    int n, current_flags, current_tb_modified;
853
    target_ulong current_pc, current_cs_base;
854
    PageDesc *p;
855
    TranslationBlock *tb, *current_tb;
856
#ifdef TARGET_HAS_PRECISE_SMC
857
    CPUState *env = cpu_single_env;
858
#endif
859

    
860
    addr &= TARGET_PAGE_MASK;
861
    p = page_find(addr >> TARGET_PAGE_BITS);
862
    if (!p) 
863
        return;
864
    tb = p->first_tb;
865
    current_tb_modified = 0;
866
    current_tb = NULL;
867
    current_pc = 0; /* avoid warning */
868
    current_cs_base = 0; /* avoid warning */
869
    current_flags = 0; /* avoid warning */
870
#ifdef TARGET_HAS_PRECISE_SMC
871
    if (tb && pc != 0) {
872
        current_tb = tb_find_pc(pc);
873
    }
874
#endif
875
    while (tb != NULL) {
876
        n = (long)tb & 3;
877
        tb = (TranslationBlock *)((long)tb & ~3);
878
#ifdef TARGET_HAS_PRECISE_SMC
879
        if (current_tb == tb &&
880
            !(current_tb->cflags & CF_SINGLE_INSN)) {
881
                /* If we are modifying the current TB, we must stop
882
                   its execution. We could be more precise by checking
883
                   that the modification is after the current PC, but it
884
                   would require a specialized function to partially
885
                   restore the CPU state */
886
            
887
            current_tb_modified = 1;
888
            cpu_restore_state(current_tb, env, pc, puc);
889
#if defined(TARGET_I386)
890
            current_flags = env->hflags;
891
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
892
            current_cs_base = (target_ulong)env->segs[R_CS].base;
893
            current_pc = current_cs_base + env->eip;
894
#else
895
#error unsupported CPU
896
#endif
897
        }
898
#endif /* TARGET_HAS_PRECISE_SMC */
899
        tb_phys_invalidate(tb, addr);
900
        tb = tb->page_next[n];
901
    }
902
    p->first_tb = NULL;
903
#ifdef TARGET_HAS_PRECISE_SMC
904
    if (current_tb_modified) {
905
        /* we generate a block containing just the instruction
906
           modifying the memory. It will ensure that it cannot modify
907
           itself */
908
        env->current_tb = NULL;
909
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
910
                    CF_SINGLE_INSN);
911
        cpu_resume_from_signal(env, puc);
912
    }
913
#endif
914
}
915
#endif
916

    
917
/* add the tb in the target page and protect it if necessary */
918
static inline void tb_alloc_page(TranslationBlock *tb, 
919
                                 unsigned int n, unsigned int page_addr)
920
{
921
    PageDesc *p;
922
    TranslationBlock *last_first_tb;
923

    
924
    tb->page_addr[n] = page_addr;
925
    p = page_find(page_addr >> TARGET_PAGE_BITS);
926
    tb->page_next[n] = p->first_tb;
927
    last_first_tb = p->first_tb;
928
    p->first_tb = (TranslationBlock *)((long)tb | n);
929
    invalidate_page_bitmap(p);
930

    
931
#if defined(TARGET_HAS_SMC) || 1
932

    
933
#if defined(CONFIG_USER_ONLY)
934
    if (p->flags & PAGE_WRITE) {
935
        unsigned long host_start, host_end, addr;
936
        int prot;
937

    
938
        /* force the host page as non writable (writes will have a
939
           page fault + mprotect overhead) */
940
        host_start = page_addr & qemu_host_page_mask;
941
        host_end = host_start + qemu_host_page_size;
942
        prot = 0;
943
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
944
            prot |= page_get_flags(addr);
945
        mprotect((void *)host_start, qemu_host_page_size, 
946
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
947
#ifdef DEBUG_TB_INVALIDATE
948
        printf("protecting code page: 0x%08lx\n", 
949
               host_start);
950
#endif
951
        p->flags &= ~PAGE_WRITE;
952
    }
953
#else
954
    /* if some code is already present, then the pages are already
955
       protected. So we handle the case where only the first TB is
956
       allocated in a physical page */
957
    if (!last_first_tb) {
958
        target_ulong virt_addr;
959

    
960
        virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
961
        tlb_protect_code(cpu_single_env, virt_addr);        
962
    }
963
#endif
964

    
965
#endif /* TARGET_HAS_SMC */
966
}
967

    
968
/* Allocate a new translation block. Flush the translation buffer if
969
   too many translation blocks or too much generated code. */
970
TranslationBlock *tb_alloc(target_ulong pc)
971
{
972
    TranslationBlock *tb;
973

    
974
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
975
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
976
        return NULL;
977
    tb = &tbs[nb_tbs++];
978
    tb->pc = pc;
979
    tb->cflags = 0;
980
    return tb;
981
}
982

    
983
/* add a new TB and link it to the physical page tables. phys_page2 is
984
   (-1) to indicate that only one page contains the TB. */
985
void tb_link_phys(TranslationBlock *tb, 
986
                  target_ulong phys_pc, target_ulong phys_page2)
987
{
988
    unsigned int h;
989
    TranslationBlock **ptb;
990

    
991
    /* add in the physical hash table */
992
    h = tb_phys_hash_func(phys_pc);
993
    ptb = &tb_phys_hash[h];
994
    tb->phys_hash_next = *ptb;
995
    *ptb = tb;
996

    
997
    /* add in the page list */
998
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
999
    if (phys_page2 != -1)
1000
        tb_alloc_page(tb, 1, phys_page2);
1001
    else
1002
        tb->page_addr[1] = -1;
1003
#ifdef DEBUG_TB_CHECK
1004
    tb_page_check();
1005
#endif
1006
}
1007

    
1008
/* link the tb with the other TBs */
1009
void tb_link(TranslationBlock *tb)
1010
{
1011
#if !defined(CONFIG_USER_ONLY)
1012
    {
1013
        VirtPageDesc *vp;
1014
        target_ulong addr;
1015
        
1016
        /* save the code memory mappings (needed to invalidate the code) */
1017
        addr = tb->pc & TARGET_PAGE_MASK;
1018
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1019
#ifdef DEBUG_TLB_CHECK 
1020
        if (vp->valid_tag == virt_valid_tag &&
1021
            vp->phys_addr != tb->page_addr[0]) {
1022
            printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1023
                   addr, tb->page_addr[0], vp->phys_addr);
1024
        }
1025
#endif
1026
        vp->phys_addr = tb->page_addr[0];
1027
        if (vp->valid_tag != virt_valid_tag) {
1028
            vp->valid_tag = virt_valid_tag;
1029
#if !defined(CONFIG_SOFTMMU)
1030
            vp->prot = 0;
1031
#endif
1032
        }
1033
        
1034
        if (tb->page_addr[1] != -1) {
1035
            addr += TARGET_PAGE_SIZE;
1036
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1037
#ifdef DEBUG_TLB_CHECK 
1038
            if (vp->valid_tag == virt_valid_tag &&
1039
                vp->phys_addr != tb->page_addr[1]) { 
1040
                printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1041
                       addr, tb->page_addr[1], vp->phys_addr);
1042
            }
1043
#endif
1044
            vp->phys_addr = tb->page_addr[1];
1045
            if (vp->valid_tag != virt_valid_tag) {
1046
                vp->valid_tag = virt_valid_tag;
1047
#if !defined(CONFIG_SOFTMMU)
1048
                vp->prot = 0;
1049
#endif
1050
            }
1051
        }
1052
    }
1053
#endif
1054

    
1055
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1056
    tb->jmp_next[0] = NULL;
1057
    tb->jmp_next[1] = NULL;
1058
#ifdef USE_CODE_COPY
1059
    tb->cflags &= ~CF_FP_USED;
1060
    if (tb->cflags & CF_TB_FP_USED)
1061
        tb->cflags |= CF_FP_USED;
1062
#endif
1063

    
1064
    /* init original jump addresses */
1065
    if (tb->tb_next_offset[0] != 0xffff)
1066
        tb_reset_jump(tb, 0);
1067
    if (tb->tb_next_offset[1] != 0xffff)
1068
        tb_reset_jump(tb, 1);
1069
}
1070

    
1071
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1072
   tb[1].tc_ptr. Return NULL if not found */
1073
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1074
{
1075
    int m_min, m_max, m;
1076
    unsigned long v;
1077
    TranslationBlock *tb;
1078

    
1079
    if (nb_tbs <= 0)
1080
        return NULL;
1081
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1082
        tc_ptr >= (unsigned long)code_gen_ptr)
1083
        return NULL;
1084
    /* binary search (cf Knuth) */
1085
    m_min = 0;
1086
    m_max = nb_tbs - 1;
1087
    while (m_min <= m_max) {
1088
        m = (m_min + m_max) >> 1;
1089
        tb = &tbs[m];
1090
        v = (unsigned long)tb->tc_ptr;
1091
        if (v == tc_ptr)
1092
            return tb;
1093
        else if (tc_ptr < v) {
1094
            m_max = m - 1;
1095
        } else {
1096
            m_min = m + 1;
1097
        }
1098
    } 
1099
    return &tbs[m_max];
1100
}
1101

    
1102
static void tb_reset_jump_recursive(TranslationBlock *tb);
1103

    
1104
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1105
{
1106
    TranslationBlock *tb1, *tb_next, **ptb;
1107
    unsigned int n1;
1108

    
1109
    tb1 = tb->jmp_next[n];
1110
    if (tb1 != NULL) {
1111
        /* find head of list */
1112
        for(;;) {
1113
            n1 = (long)tb1 & 3;
1114
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1115
            if (n1 == 2)
1116
                break;
1117
            tb1 = tb1->jmp_next[n1];
1118
        }
1119
        /* we are now sure now that tb jumps to tb1 */
1120
        tb_next = tb1;
1121

    
1122
        /* remove tb from the jmp_first list */
1123
        ptb = &tb_next->jmp_first;
1124
        for(;;) {
1125
            tb1 = *ptb;
1126
            n1 = (long)tb1 & 3;
1127
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1128
            if (n1 == n && tb1 == tb)
1129
                break;
1130
            ptb = &tb1->jmp_next[n1];
1131
        }
1132
        *ptb = tb->jmp_next[n];
1133
        tb->jmp_next[n] = NULL;
1134
        
1135
        /* suppress the jump to next tb in generated code */
1136
        tb_reset_jump(tb, n);
1137

    
1138
        /* suppress jumps in the tb on which we could have jumped */
1139
        tb_reset_jump_recursive(tb_next);
1140
    }
1141
}
1142

    
1143
static void tb_reset_jump_recursive(TranslationBlock *tb)
1144
{
1145
    tb_reset_jump_recursive2(tb, 0);
1146
    tb_reset_jump_recursive2(tb, 1);
1147
}
1148

    
1149
#if defined(TARGET_HAS_ICE)
1150
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1151
{
1152
    target_ulong phys_addr;
1153

    
1154
    phys_addr = cpu_get_phys_page_debug(env, pc);
1155
    tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1156
}
1157
#endif
1158

    
1159
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1160
   breakpoint is reached */
1161
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1162
{
1163
#if defined(TARGET_HAS_ICE)
1164
    int i;
1165
    
1166
    for(i = 0; i < env->nb_breakpoints; i++) {
1167
        if (env->breakpoints[i] == pc)
1168
            return 0;
1169
    }
1170

    
1171
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1172
        return -1;
1173
    env->breakpoints[env->nb_breakpoints++] = pc;
1174
    
1175
    breakpoint_invalidate(env, pc);
1176
    return 0;
1177
#else
1178
    return -1;
1179
#endif
1180
}
1181

    
1182
/* remove a breakpoint */
1183
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1184
{
1185
#if defined(TARGET_HAS_ICE)
1186
    int i;
1187
    for(i = 0; i < env->nb_breakpoints; i++) {
1188
        if (env->breakpoints[i] == pc)
1189
            goto found;
1190
    }
1191
    return -1;
1192
 found:
1193
    env->nb_breakpoints--;
1194
    if (i < env->nb_breakpoints)
1195
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1196

    
1197
    breakpoint_invalidate(env, pc);
1198
    return 0;
1199
#else
1200
    return -1;
1201
#endif
1202
}
1203

    
1204
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1205
   CPU loop after each instruction */
1206
void cpu_single_step(CPUState *env, int enabled)
1207
{
1208
#if defined(TARGET_HAS_ICE)
1209
    if (env->singlestep_enabled != enabled) {
1210
        env->singlestep_enabled = enabled;
1211
        /* must flush all the translated code to avoid inconsistancies */
1212
        /* XXX: only flush what is necessary */
1213
        tb_flush(env);
1214
    }
1215
#endif
1216
}
1217

    
1218
/* enable or disable low levels log */
1219
void cpu_set_log(int log_flags)
1220
{
1221
    loglevel = log_flags;
1222
    if (loglevel && !logfile) {
1223
        logfile = fopen(logfilename, "w");
1224
        if (!logfile) {
1225
            perror(logfilename);
1226
            _exit(1);
1227
        }
1228
#if !defined(CONFIG_SOFTMMU)
1229
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1230
        {
1231
            static uint8_t logfile_buf[4096];
1232
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1233
        }
1234
#else
1235
        setvbuf(logfile, NULL, _IOLBF, 0);
1236
#endif
1237
    }
1238
}
1239

    
1240
void cpu_set_log_filename(const char *filename)
1241
{
1242
    logfilename = strdup(filename);
1243
}
1244

    
1245
/* mask must never be zero, except for A20 change call */
1246
void cpu_interrupt(CPUState *env, int mask)
1247
{
1248
    TranslationBlock *tb;
1249
    static int interrupt_lock;
1250

    
1251
    env->interrupt_request |= mask;
1252
    /* if the cpu is currently executing code, we must unlink it and
1253
       all the potentially executing TB */
1254
    tb = env->current_tb;
1255
    if (tb && !testandset(&interrupt_lock)) {
1256
        env->current_tb = NULL;
1257
        tb_reset_jump_recursive(tb);
1258
        interrupt_lock = 0;
1259
    }
1260
}
1261

    
1262
void cpu_reset_interrupt(CPUState *env, int mask)
1263
{
1264
    env->interrupt_request &= ~mask;
1265
}
1266

    
1267
CPULogItem cpu_log_items[] = {
1268
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1269
      "show generated host assembly code for each compiled TB" },
1270
    { CPU_LOG_TB_IN_ASM, "in_asm",
1271
      "show target assembly code for each compiled TB" },
1272
    { CPU_LOG_TB_OP, "op", 
1273
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1274
#ifdef TARGET_I386
1275
    { CPU_LOG_TB_OP_OPT, "op_opt",
1276
      "show micro ops after optimization for each compiled TB" },
1277
#endif
1278
    { CPU_LOG_INT, "int",
1279
      "show interrupts/exceptions in short format" },
1280
    { CPU_LOG_EXEC, "exec",
1281
      "show trace before each executed TB (lots of logs)" },
1282
    { CPU_LOG_TB_CPU, "cpu",
1283
      "show CPU state before bloc translation" },
1284
#ifdef TARGET_I386
1285
    { CPU_LOG_PCALL, "pcall",
1286
      "show protected mode far calls/returns/exceptions" },
1287
#endif
1288
#ifdef DEBUG_IOPORT
1289
    { CPU_LOG_IOPORT, "ioport",
1290
      "show all i/o ports accesses" },
1291
#endif
1292
    { 0, NULL, NULL },
1293
};
1294

    
1295
static int cmp1(const char *s1, int n, const char *s2)
1296
{
1297
    if (strlen(s2) != n)
1298
        return 0;
1299
    return memcmp(s1, s2, n) == 0;
1300
}
1301
      
1302
/* takes a comma separated list of log masks. Return 0 if error. */
1303
int cpu_str_to_log_mask(const char *str)
1304
{
1305
    CPULogItem *item;
1306
    int mask;
1307
    const char *p, *p1;
1308

    
1309
    p = str;
1310
    mask = 0;
1311
    for(;;) {
1312
        p1 = strchr(p, ',');
1313
        if (!p1)
1314
            p1 = p + strlen(p);
1315
        if(cmp1(p,p1-p,"all")) {
1316
                for(item = cpu_log_items; item->mask != 0; item++) {
1317
                        mask |= item->mask;
1318
                }
1319
        } else {
1320
        for(item = cpu_log_items; item->mask != 0; item++) {
1321
            if (cmp1(p, p1 - p, item->name))
1322
                goto found;
1323
        }
1324
        return 0;
1325
        }
1326
    found:
1327
        mask |= item->mask;
1328
        if (*p1 != ',')
1329
            break;
1330
        p = p1 + 1;
1331
    }
1332
    return mask;
1333
}
1334

    
1335
void cpu_abort(CPUState *env, const char *fmt, ...)
1336
{
1337
    va_list ap;
1338

    
1339
    va_start(ap, fmt);
1340
    fprintf(stderr, "qemu: fatal: ");
1341
    vfprintf(stderr, fmt, ap);
1342
    fprintf(stderr, "\n");
1343
#ifdef TARGET_I386
1344
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1345
#else
1346
    cpu_dump_state(env, stderr, fprintf, 0);
1347
#endif
1348
    va_end(ap);
1349
    abort();
1350
}
1351

    
1352
#if !defined(CONFIG_USER_ONLY)
1353

    
1354
/* NOTE: if flush_global is true, also flush global entries (not
1355
   implemented yet) */
1356
void tlb_flush(CPUState *env, int flush_global)
1357
{
1358
    int i;
1359

    
1360
#if defined(DEBUG_TLB)
1361
    printf("tlb_flush:\n");
1362
#endif
1363
    /* must reset current TB so that interrupts cannot modify the
1364
       links while we are modifying them */
1365
    env->current_tb = NULL;
1366

    
1367
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1368
        env->tlb_read[0][i].address = -1;
1369
        env->tlb_write[0][i].address = -1;
1370
        env->tlb_read[1][i].address = -1;
1371
        env->tlb_write[1][i].address = -1;
1372
    }
1373

    
1374
    virt_page_flush();
1375
    memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1376

    
1377
#if !defined(CONFIG_SOFTMMU)
1378
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1379
#endif
1380
#ifdef USE_KQEMU
1381
    if (env->kqemu_enabled) {
1382
        kqemu_flush(env, flush_global);
1383
    }
1384
#endif
1385
    tlb_flush_count++;
1386
}
1387

    
1388
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1389
{
1390
    if (addr == (tlb_entry->address & 
1391
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1392
        tlb_entry->address = -1;
1393
}
1394

    
1395
void tlb_flush_page(CPUState *env, target_ulong addr)
1396
{
1397
    int i, n;
1398
    VirtPageDesc *vp;
1399
    PageDesc *p;
1400
    TranslationBlock *tb;
1401

    
1402
#if defined(DEBUG_TLB)
1403
    printf("tlb_flush_page: 0x%08x\n", addr);
1404
#endif
1405
    /* must reset current TB so that interrupts cannot modify the
1406
       links while we are modifying them */
1407
    env->current_tb = NULL;
1408

    
1409
    addr &= TARGET_PAGE_MASK;
1410
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1411
    tlb_flush_entry(&env->tlb_read[0][i], addr);
1412
    tlb_flush_entry(&env->tlb_write[0][i], addr);
1413
    tlb_flush_entry(&env->tlb_read[1][i], addr);
1414
    tlb_flush_entry(&env->tlb_write[1][i], addr);
1415

    
1416
    /* remove from the virtual pc hash table all the TB at this
1417
       virtual address */
1418
    
1419
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1420
    if (vp && vp->valid_tag == virt_valid_tag) {
1421
        p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1422
        if (p) {
1423
            /* we remove all the links to the TBs in this virtual page */
1424
            tb = p->first_tb;
1425
            while (tb != NULL) {
1426
                n = (long)tb & 3;
1427
                tb = (TranslationBlock *)((long)tb & ~3);
1428
                if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1429
                    ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1430
                    tb_invalidate(tb);
1431
                }
1432
                tb = tb->page_next[n];
1433
            }
1434
        }
1435
        vp->valid_tag = 0;
1436
    }
1437

    
1438
#if !defined(CONFIG_SOFTMMU)
1439
    if (addr < MMAP_AREA_END)
1440
        munmap((void *)addr, TARGET_PAGE_SIZE);
1441
#endif
1442
#ifdef USE_KQEMU
1443
    if (env->kqemu_enabled) {
1444
        kqemu_flush_page(env, addr);
1445
    }
1446
#endif
1447
}
1448

    
1449
static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1450
{
1451
    if (addr == (tlb_entry->address & 
1452
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1453
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1454
        (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1455
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1456
    }
1457
}
1458

    
1459
/* update the TLBs so that writes to code in the virtual page 'addr'
1460
   can be detected */
1461
static void tlb_protect_code(CPUState *env, target_ulong addr)
1462
{
1463
    int i;
1464

    
1465
    addr &= TARGET_PAGE_MASK;
1466
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1467
    tlb_protect_code1(&env->tlb_write[0][i], addr);
1468
    tlb_protect_code1(&env->tlb_write[1][i], addr);
1469
#if !defined(CONFIG_SOFTMMU)
1470
    /* NOTE: as we generated the code for this page, it is already at
1471
       least readable */
1472
    if (addr < MMAP_AREA_END)
1473
        mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1474
#endif
1475
}
1476

    
1477
static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 
1478
                                       unsigned long phys_addr)
1479
{
1480
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1481
        ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1482
        tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1483
    }
1484
}
1485

    
1486
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1487
   tested self modifying code */
1488
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1489
{
1490
    int i;
1491

    
1492
    phys_addr &= TARGET_PAGE_MASK;
1493
    phys_addr += (long)phys_ram_base;
1494
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1495
    tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1496
    tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1497
}
1498

    
1499
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1500
                                         unsigned long start, unsigned long length)
1501
{
1502
    unsigned long addr;
1503
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1504
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1505
        if ((addr - start) < length) {
1506
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1507
        }
1508
    }
1509
}
1510

    
1511
void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1512
                                     int dirty_flags)
1513
{
1514
    CPUState *env;
1515
    unsigned long length, start1;
1516
    int i, mask, len;
1517
    uint8_t *p;
1518

    
1519
    start &= TARGET_PAGE_MASK;
1520
    end = TARGET_PAGE_ALIGN(end);
1521

    
1522
    length = end - start;
1523
    if (length == 0)
1524
        return;
1525
    mask = ~dirty_flags;
1526
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1527
    len = length >> TARGET_PAGE_BITS;
1528
    for(i = 0; i < len; i++)
1529
        p[i] &= mask;
1530

    
1531
    env = cpu_single_env;
1532
    /* we modify the TLB cache so that the dirty bit will be set again
1533
       when accessing the range */
1534
    start1 = start + (unsigned long)phys_ram_base;
1535
    for(i = 0; i < CPU_TLB_SIZE; i++)
1536
        tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1537
    for(i = 0; i < CPU_TLB_SIZE; i++)
1538
        tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1539

    
1540
#if !defined(CONFIG_SOFTMMU)
1541
    /* XXX: this is expensive */
1542
    {
1543
        VirtPageDesc *p;
1544
        int j;
1545
        target_ulong addr;
1546

    
1547
        for(i = 0; i < L1_SIZE; i++) {
1548
            p = l1_virt_map[i];
1549
            if (p) {
1550
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1551
                for(j = 0; j < L2_SIZE; j++) {
1552
                    if (p->valid_tag == virt_valid_tag &&
1553
                        p->phys_addr >= start && p->phys_addr < end &&
1554
                        (p->prot & PROT_WRITE)) {
1555
                        if (addr < MMAP_AREA_END) {
1556
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1557
                                     p->prot & ~PROT_WRITE);
1558
                        }
1559
                    }
1560
                    addr += TARGET_PAGE_SIZE;
1561
                    p++;
1562
                }
1563
            }
1564
        }
1565
    }
1566
#endif
1567
}
1568

    
1569
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1570
                                    unsigned long start)
1571
{
1572
    unsigned long addr;
1573
    if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1574
        addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1575
        if (addr == start) {
1576
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1577
        }
1578
    }
1579
}
1580

    
1581
/* update the TLB corresponding to virtual page vaddr and phys addr
1582
   addr so that it is no longer dirty */
1583
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1584
{
1585
    CPUState *env = cpu_single_env;
1586
    int i;
1587

    
1588
    phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1589

    
1590
    addr &= TARGET_PAGE_MASK;
1591
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1592
    tlb_set_dirty1(&env->tlb_write[0][i], addr);
1593
    tlb_set_dirty1(&env->tlb_write[1][i], addr);
1594
}
1595

    
1596
/* add a new TLB entry. At most one entry for a given virtual address
1597
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1598
   (can only happen in non SOFTMMU mode for I/O pages or pages
1599
   conflicting with the host address space). */
1600
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1601
                 target_phys_addr_t paddr, int prot, 
1602
                 int is_user, int is_softmmu)
1603
{
1604
    PhysPageDesc *p;
1605
    unsigned long pd;
1606
    TranslationBlock *first_tb;
1607
    unsigned int index;
1608
    target_ulong address;
1609
    unsigned long addend;
1610
    int ret;
1611

    
1612
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1613
    first_tb = NULL;
1614
    if (!p) {
1615
        pd = IO_MEM_UNASSIGNED;
1616
    } else {
1617
        PageDesc *p1;
1618
        pd = p->phys_offset;
1619
        if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1620
            /* NOTE: we also allocate the page at this stage */
1621
            p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1622
            first_tb = p1->first_tb;
1623
        }
1624
    }
1625
#if defined(DEBUG_TLB)
1626
    printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1627
           vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1628
#endif
1629

    
1630
    ret = 0;
1631
#if !defined(CONFIG_SOFTMMU)
1632
    if (is_softmmu) 
1633
#endif
1634
    {
1635
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1636
            /* IO memory case */
1637
            address = vaddr | pd;
1638
            addend = paddr;
1639
        } else {
1640
            /* standard memory */
1641
            address = vaddr;
1642
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1643
        }
1644
        
1645
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1646
        addend -= vaddr;
1647
        if (prot & PAGE_READ) {
1648
            env->tlb_read[is_user][index].address = address;
1649
            env->tlb_read[is_user][index].addend = addend;
1650
        } else {
1651
            env->tlb_read[is_user][index].address = -1;
1652
            env->tlb_read[is_user][index].addend = -1;
1653
        }
1654
        if (prot & PAGE_WRITE) {
1655
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1656
                /* ROM: access is ignored (same as unassigned) */
1657
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1658
                env->tlb_write[is_user][index].addend = addend;
1659
            } else 
1660
                /* XXX: the PowerPC code seems not ready to handle
1661
                   self modifying code with DCBI */
1662
#if defined(TARGET_HAS_SMC) || 1
1663
            if (first_tb) {
1664
                /* if code is present, we use a specific memory
1665
                   handler. It works only for physical memory access */
1666
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1667
                env->tlb_write[is_user][index].addend = addend;
1668
            } else 
1669
#endif
1670
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1671
                       !cpu_physical_memory_is_dirty(pd)) {
1672
                env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1673
                env->tlb_write[is_user][index].addend = addend;
1674
            } else {
1675
                env->tlb_write[is_user][index].address = address;
1676
                env->tlb_write[is_user][index].addend = addend;
1677
            }
1678
        } else {
1679
            env->tlb_write[is_user][index].address = -1;
1680
            env->tlb_write[is_user][index].addend = -1;
1681
        }
1682
    }
1683
#if !defined(CONFIG_SOFTMMU)
1684
    else {
1685
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1686
            /* IO access: no mapping is done as it will be handled by the
1687
               soft MMU */
1688
            if (!(env->hflags & HF_SOFTMMU_MASK))
1689
                ret = 2;
1690
        } else {
1691
            void *map_addr;
1692

    
1693
            if (vaddr >= MMAP_AREA_END) {
1694
                ret = 2;
1695
            } else {
1696
                if (prot & PROT_WRITE) {
1697
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1698
#if defined(TARGET_HAS_SMC) || 1
1699
                        first_tb ||
1700
#endif
1701
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1702
                         !cpu_physical_memory_is_dirty(pd))) {
1703
                        /* ROM: we do as if code was inside */
1704
                        /* if code is present, we only map as read only and save the
1705
                           original mapping */
1706
                        VirtPageDesc *vp;
1707
                        
1708
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1709
                        vp->phys_addr = pd;
1710
                        vp->prot = prot;
1711
                        vp->valid_tag = virt_valid_tag;
1712
                        prot &= ~PAGE_WRITE;
1713
                    }
1714
                }
1715
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1716
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1717
                if (map_addr == MAP_FAILED) {
1718
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1719
                              paddr, vaddr);
1720
                }
1721
            }
1722
        }
1723
    }
1724
#endif
1725
    return ret;
1726
}
1727

    
1728
/* called from signal handler: invalidate the code and unprotect the
1729
   page. Return TRUE if the fault was succesfully handled. */
1730
int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1731
{
1732
#if !defined(CONFIG_SOFTMMU)
1733
    VirtPageDesc *vp;
1734

    
1735
#if defined(DEBUG_TLB)
1736
    printf("page_unprotect: addr=0x%08x\n", addr);
1737
#endif
1738
    addr &= TARGET_PAGE_MASK;
1739

    
1740
    /* if it is not mapped, no need to worry here */
1741
    if (addr >= MMAP_AREA_END)
1742
        return 0;
1743
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1744
    if (!vp)
1745
        return 0;
1746
    /* NOTE: in this case, validate_tag is _not_ tested as it
1747
       validates only the code TLB */
1748
    if (vp->valid_tag != virt_valid_tag)
1749
        return 0;
1750
    if (!(vp->prot & PAGE_WRITE))
1751
        return 0;
1752
#if defined(DEBUG_TLB)
1753
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1754
           addr, vp->phys_addr, vp->prot);
1755
#endif
1756
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1757
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1758
                  (unsigned long)addr, vp->prot);
1759
    /* set the dirty bit */
1760
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1761
    /* flush the code inside */
1762
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1763
    return 1;
1764
#else
1765
    return 0;
1766
#endif
1767
}
1768

    
1769
#else
1770

    
1771
void tlb_flush(CPUState *env, int flush_global)
1772
{
1773
}
1774

    
1775
void tlb_flush_page(CPUState *env, target_ulong addr)
1776
{
1777
}
1778

    
1779
int tlb_set_page(CPUState *env, target_ulong vaddr, 
1780
                 target_phys_addr_t paddr, int prot, 
1781
                 int is_user, int is_softmmu)
1782
{
1783
    return 0;
1784
}
1785

    
1786
/* dump memory mappings */
1787
void page_dump(FILE *f)
1788
{
1789
    unsigned long start, end;
1790
    int i, j, prot, prot1;
1791
    PageDesc *p;
1792

    
1793
    fprintf(f, "%-8s %-8s %-8s %s\n",
1794
            "start", "end", "size", "prot");
1795
    start = -1;
1796
    end = -1;
1797
    prot = 0;
1798
    for(i = 0; i <= L1_SIZE; i++) {
1799
        if (i < L1_SIZE)
1800
            p = l1_map[i];
1801
        else
1802
            p = NULL;
1803
        for(j = 0;j < L2_SIZE; j++) {
1804
            if (!p)
1805
                prot1 = 0;
1806
            else
1807
                prot1 = p[j].flags;
1808
            if (prot1 != prot) {
1809
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1810
                if (start != -1) {
1811
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1812
                            start, end, end - start, 
1813
                            prot & PAGE_READ ? 'r' : '-',
1814
                            prot & PAGE_WRITE ? 'w' : '-',
1815
                            prot & PAGE_EXEC ? 'x' : '-');
1816
                }
1817
                if (prot1 != 0)
1818
                    start = end;
1819
                else
1820
                    start = -1;
1821
                prot = prot1;
1822
            }
1823
            if (!p)
1824
                break;
1825
        }
1826
    }
1827
}
1828

    
1829
int page_get_flags(unsigned long address)
1830
{
1831
    PageDesc *p;
1832

    
1833
    p = page_find(address >> TARGET_PAGE_BITS);
1834
    if (!p)
1835
        return 0;
1836
    return p->flags;
1837
}
1838

    
1839
/* modify the flags of a page and invalidate the code if
1840
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1841
   depending on PAGE_WRITE */
1842
void page_set_flags(unsigned long start, unsigned long end, int flags)
1843
{
1844
    PageDesc *p;
1845
    unsigned long addr;
1846

    
1847
    start = start & TARGET_PAGE_MASK;
1848
    end = TARGET_PAGE_ALIGN(end);
1849
    if (flags & PAGE_WRITE)
1850
        flags |= PAGE_WRITE_ORG;
1851
    spin_lock(&tb_lock);
1852
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1853
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1854
        /* if the write protection is set, then we invalidate the code
1855
           inside */
1856
        if (!(p->flags & PAGE_WRITE) && 
1857
            (flags & PAGE_WRITE) &&
1858
            p->first_tb) {
1859
            tb_invalidate_phys_page(addr, 0, NULL);
1860
        }
1861
        p->flags = flags;
1862
    }
1863
    spin_unlock(&tb_lock);
1864
}
1865

    
1866
/* called from signal handler: invalidate the code and unprotect the
1867
   page. Return TRUE if the fault was succesfully handled. */
1868
int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1869
{
1870
    unsigned int page_index, prot, pindex;
1871
    PageDesc *p, *p1;
1872
    unsigned long host_start, host_end, addr;
1873

    
1874
    host_start = address & qemu_host_page_mask;
1875
    page_index = host_start >> TARGET_PAGE_BITS;
1876
    p1 = page_find(page_index);
1877
    if (!p1)
1878
        return 0;
1879
    host_end = host_start + qemu_host_page_size;
1880
    p = p1;
1881
    prot = 0;
1882
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1883
        prot |= p->flags;
1884
        p++;
1885
    }
1886
    /* if the page was really writable, then we change its
1887
       protection back to writable */
1888
    if (prot & PAGE_WRITE_ORG) {
1889
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1890
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1891
            mprotect((void *)host_start, qemu_host_page_size, 
1892
                     (prot & PAGE_BITS) | PAGE_WRITE);
1893
            p1[pindex].flags |= PAGE_WRITE;
1894
            /* and since the content will be modified, we must invalidate
1895
               the corresponding translated code. */
1896
            tb_invalidate_phys_page(address, pc, puc);
1897
#ifdef DEBUG_TB_CHECK
1898
            tb_invalidate_check(address);
1899
#endif
1900
            return 1;
1901
        }
1902
    }
1903
    return 0;
1904
}
1905

    
1906
/* call this function when system calls directly modify a memory area */
1907
void page_unprotect_range(uint8_t *data, unsigned long data_size)
1908
{
1909
    unsigned long start, end, addr;
1910

    
1911
    start = (unsigned long)data;
1912
    end = start + data_size;
1913
    start &= TARGET_PAGE_MASK;
1914
    end = TARGET_PAGE_ALIGN(end);
1915
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1916
        page_unprotect(addr, 0, NULL);
1917
    }
1918
}
1919

    
1920
static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1921
{
1922
}
1923
#endif /* defined(CONFIG_USER_ONLY) */
1924

    
1925
/* register physical memory. 'size' must be a multiple of the target
1926
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1927
   io memory page */
1928
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1929
                                  unsigned long size,
1930
                                  unsigned long phys_offset)
1931
{
1932
    unsigned long addr, end_addr;
1933
    PhysPageDesc *p;
1934

    
1935
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1936
    end_addr = start_addr + size;
1937
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1938
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1939
        p->phys_offset = phys_offset;
1940
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1941
            phys_offset += TARGET_PAGE_SIZE;
1942
    }
1943
}
1944

    
1945
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1946
{
1947
    return 0;
1948
}
1949

    
1950
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1951
{
1952
}
1953

    
1954
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1955
    unassigned_mem_readb,
1956
    unassigned_mem_readb,
1957
    unassigned_mem_readb,
1958
};
1959

    
1960
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1961
    unassigned_mem_writeb,
1962
    unassigned_mem_writeb,
1963
    unassigned_mem_writeb,
1964
};
1965

    
1966
/* self modifying code support in soft mmu mode : writing to a page
1967
   containing code comes to these functions */
1968

    
1969
static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1970
{
1971
    unsigned long phys_addr;
1972

    
1973
    phys_addr = addr - (unsigned long)phys_ram_base;
1974
#if !defined(CONFIG_USER_ONLY)
1975
    tb_invalidate_phys_page_fast(phys_addr, 1);
1976
#endif
1977
    stb_p((uint8_t *)(long)addr, val);
1978
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1979
}
1980

    
1981
static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1982
{
1983
    unsigned long phys_addr;
1984

    
1985
    phys_addr = addr - (unsigned long)phys_ram_base;
1986
#if !defined(CONFIG_USER_ONLY)
1987
    tb_invalidate_phys_page_fast(phys_addr, 2);
1988
#endif
1989
    stw_p((uint8_t *)(long)addr, val);
1990
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1991
}
1992

    
1993
static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1994
{
1995
    unsigned long phys_addr;
1996

    
1997
    phys_addr = addr - (unsigned long)phys_ram_base;
1998
#if !defined(CONFIG_USER_ONLY)
1999
    tb_invalidate_phys_page_fast(phys_addr, 4);
2000
#endif
2001
    stl_p((uint8_t *)(long)addr, val);
2002
    phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
2003
}
2004

    
2005
static CPUReadMemoryFunc *code_mem_read[3] = {
2006
    NULL, /* never used */
2007
    NULL, /* never used */
2008
    NULL, /* never used */
2009
};
2010

    
2011
static CPUWriteMemoryFunc *code_mem_write[3] = {
2012
    code_mem_writeb,
2013
    code_mem_writew,
2014
    code_mem_writel,
2015
};
2016

    
2017
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2018
{
2019
    stb_p((uint8_t *)(long)addr, val);
2020
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2021
}
2022

    
2023
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2024
{
2025
    stw_p((uint8_t *)(long)addr, val);
2026
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2027
}
2028

    
2029
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2030
{
2031
    stl_p((uint8_t *)(long)addr, val);
2032
    tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2033
}
2034

    
2035
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2036
    notdirty_mem_writeb,
2037
    notdirty_mem_writew,
2038
    notdirty_mem_writel,
2039
};
2040

    
2041
static void io_mem_init(void)
2042
{
2043
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
2044
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2045
    cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
2046
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
2047
    io_mem_nb = 5;
2048

    
2049
    /* alloc dirty bits array */
2050
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2051
}
2052

    
2053
/* mem_read and mem_write are arrays of functions containing the
2054
   function to access byte (index 0), word (index 1) and dword (index
2055
   2). All functions must be supplied. If io_index is non zero, the
2056
   corresponding io zone is modified. If it is zero, a new io zone is
2057
   allocated. The return value can be used with
2058
   cpu_register_physical_memory(). (-1) is returned if error. */
2059
int cpu_register_io_memory(int io_index,
2060
                           CPUReadMemoryFunc **mem_read,
2061
                           CPUWriteMemoryFunc **mem_write,
2062
                           void *opaque)
2063
{
2064
    int i;
2065

    
2066
    if (io_index <= 0) {
2067
        if (io_index >= IO_MEM_NB_ENTRIES)
2068
            return -1;
2069
        io_index = io_mem_nb++;
2070
    } else {
2071
        if (io_index >= IO_MEM_NB_ENTRIES)
2072
            return -1;
2073
    }
2074
    
2075
    for(i = 0;i < 3; i++) {
2076
        io_mem_read[io_index][i] = mem_read[i];
2077
        io_mem_write[io_index][i] = mem_write[i];
2078
    }
2079
    io_mem_opaque[io_index] = opaque;
2080
    return io_index << IO_MEM_SHIFT;
2081
}
2082

    
2083
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2084
{
2085
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2086
}
2087

    
2088
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2089
{
2090
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2091
}
2092

    
2093
/* physical memory access (slow version, mainly for debug) */
2094
#if defined(CONFIG_USER_ONLY)
2095
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2096
                            int len, int is_write)
2097
{
2098
    int l, flags;
2099
    target_ulong page;
2100

    
2101
    while (len > 0) {
2102
        page = addr & TARGET_PAGE_MASK;
2103
        l = (page + TARGET_PAGE_SIZE) - addr;
2104
        if (l > len)
2105
            l = len;
2106
        flags = page_get_flags(page);
2107
        if (!(flags & PAGE_VALID))
2108
            return;
2109
        if (is_write) {
2110
            if (!(flags & PAGE_WRITE))
2111
                return;
2112
            memcpy((uint8_t *)addr, buf, len);
2113
        } else {
2114
            if (!(flags & PAGE_READ))
2115
                return;
2116
            memcpy(buf, (uint8_t *)addr, len);
2117
        }
2118
        len -= l;
2119
        buf += l;
2120
        addr += l;
2121
    }
2122
}
2123

    
2124
/* never used */
2125
uint32_t ldl_phys(target_phys_addr_t addr)
2126
{
2127
    return 0;
2128
}
2129

    
2130
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2131
{
2132
}
2133

    
2134
void stl_phys(target_phys_addr_t addr, uint32_t val)
2135
{
2136
}
2137

    
2138
#else
2139
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2140
                            int len, int is_write)
2141
{
2142
    int l, io_index;
2143
    uint8_t *ptr;
2144
    uint32_t val;
2145
    target_phys_addr_t page;
2146
    unsigned long pd;
2147
    PhysPageDesc *p;
2148
    
2149
    while (len > 0) {
2150
        page = addr & TARGET_PAGE_MASK;
2151
        l = (page + TARGET_PAGE_SIZE) - addr;
2152
        if (l > len)
2153
            l = len;
2154
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2155
        if (!p) {
2156
            pd = IO_MEM_UNASSIGNED;
2157
        } else {
2158
            pd = p->phys_offset;
2159
        }
2160
        
2161
        if (is_write) {
2162
            if ((pd & ~TARGET_PAGE_MASK) != 0) {
2163
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2164
                if (l >= 4 && ((addr & 3) == 0)) {
2165
                    /* 32 bit read access */
2166
                    val = ldl_p(buf);
2167
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2168
                    l = 4;
2169
                } else if (l >= 2 && ((addr & 1) == 0)) {
2170
                    /* 16 bit read access */
2171
                    val = lduw_p(buf);
2172
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2173
                    l = 2;
2174
                } else {
2175
                    /* 8 bit access */
2176
                    val = ldub_p(buf);
2177
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2178
                    l = 1;
2179
                }
2180
            } else {
2181
                unsigned long addr1;
2182
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2183
                /* RAM case */
2184
                ptr = phys_ram_base + addr1;
2185
                memcpy(ptr, buf, l);
2186
                /* invalidate code */
2187
                tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2188
                /* set dirty bit */
2189
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2190
            }
2191
        } else {
2192
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2193
                (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2194
                /* I/O case */
2195
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2196
                if (l >= 4 && ((addr & 3) == 0)) {
2197
                    /* 32 bit read access */
2198
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2199
                    stl_p(buf, val);
2200
                    l = 4;
2201
                } else if (l >= 2 && ((addr & 1) == 0)) {
2202
                    /* 16 bit read access */
2203
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2204
                    stw_p(buf, val);
2205
                    l = 2;
2206
                } else {
2207
                    /* 8 bit access */
2208
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2209
                    stb_p(buf, val);
2210
                    l = 1;
2211
                }
2212
            } else {
2213
                /* RAM case */
2214
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2215
                    (addr & ~TARGET_PAGE_MASK);
2216
                memcpy(buf, ptr, l);
2217
            }
2218
        }
2219
        len -= l;
2220
        buf += l;
2221
        addr += l;
2222
    }
2223
}
2224

    
2225
/* warning: addr must be aligned */
2226
uint32_t ldl_phys(target_phys_addr_t addr)
2227
{
2228
    int io_index;
2229
    uint8_t *ptr;
2230
    uint32_t val;
2231
    unsigned long pd;
2232
    PhysPageDesc *p;
2233

    
2234
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2235
    if (!p) {
2236
        pd = IO_MEM_UNASSIGNED;
2237
    } else {
2238
        pd = p->phys_offset;
2239
    }
2240
        
2241
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2242
        (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2243
        /* I/O case */
2244
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2245
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2246
    } else {
2247
        /* RAM case */
2248
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2249
            (addr & ~TARGET_PAGE_MASK);
2250
        val = ldl_p(ptr);
2251
    }
2252
    return val;
2253
}
2254

    
2255
/* warning: addr must be aligned. The ram page is not masked as dirty
2256
   and the code inside is not invalidated. It is useful if the dirty
2257
   bits are used to track modified PTEs */
2258
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2259
{
2260
    int io_index;
2261
    uint8_t *ptr;
2262
    unsigned long pd;
2263
    PhysPageDesc *p;
2264

    
2265
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2266
    if (!p) {
2267
        pd = IO_MEM_UNASSIGNED;
2268
    } else {
2269
        pd = p->phys_offset;
2270
    }
2271
        
2272
    if ((pd & ~TARGET_PAGE_MASK) != 0) {
2273
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2274
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2275
    } else {
2276
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2277
            (addr & ~TARGET_PAGE_MASK);
2278
        stl_p(ptr, val);
2279
    }
2280
}
2281

    
2282
/* warning: addr must be aligned */
2283
/* XXX: optimize code invalidation test */
2284
void stl_phys(target_phys_addr_t addr, uint32_t val)
2285
{
2286
    int io_index;
2287
    uint8_t *ptr;
2288
    unsigned long pd;
2289
    PhysPageDesc *p;
2290

    
2291
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2292
    if (!p) {
2293
        pd = IO_MEM_UNASSIGNED;
2294
    } else {
2295
        pd = p->phys_offset;
2296
    }
2297
        
2298
    if ((pd & ~TARGET_PAGE_MASK) != 0) {
2299
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2300
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2301
    } else {
2302
        unsigned long addr1;
2303
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2304
        /* RAM case */
2305
        ptr = phys_ram_base + addr1;
2306
        stl_p(ptr, val);
2307
        /* invalidate code */
2308
        tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2309
        /* set dirty bit */
2310
        phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2311
    }
2312
}
2313

    
2314
#endif
2315

    
2316
/* virtual memory access for debug */
2317
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2318
                        uint8_t *buf, int len, int is_write)
2319
{
2320
    int l;
2321
    target_ulong page, phys_addr;
2322

    
2323
    while (len > 0) {
2324
        page = addr & TARGET_PAGE_MASK;
2325
        phys_addr = cpu_get_phys_page_debug(env, page);
2326
        /* if no physical page mapped, return an error */
2327
        if (phys_addr == -1)
2328
            return -1;
2329
        l = (page + TARGET_PAGE_SIZE) - addr;
2330
        if (l > len)
2331
            l = len;
2332
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2333
                               buf, l, is_write);
2334
        len -= l;
2335
        buf += l;
2336
        addr += l;
2337
    }
2338
    return 0;
2339
}
2340

    
2341
void dump_exec_info(FILE *f,
2342
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2343
{
2344
    int i, target_code_size, max_target_code_size;
2345
    int direct_jmp_count, direct_jmp2_count, cross_page;
2346
    TranslationBlock *tb;
2347
    
2348
    target_code_size = 0;
2349
    max_target_code_size = 0;
2350
    cross_page = 0;
2351
    direct_jmp_count = 0;
2352
    direct_jmp2_count = 0;
2353
    for(i = 0; i < nb_tbs; i++) {
2354
        tb = &tbs[i];
2355
        target_code_size += tb->size;
2356
        if (tb->size > max_target_code_size)
2357
            max_target_code_size = tb->size;
2358
        if (tb->page_addr[1] != -1)
2359
            cross_page++;
2360
        if (tb->tb_next_offset[0] != 0xffff) {
2361
            direct_jmp_count++;
2362
            if (tb->tb_next_offset[1] != 0xffff) {
2363
                direct_jmp2_count++;
2364
            }
2365
        }
2366
    }
2367
    /* XXX: avoid using doubles ? */
2368
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2369
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2370
                nb_tbs ? target_code_size / nb_tbs : 0,
2371
                max_target_code_size);
2372
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2373
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2374
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2375
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2376
            cross_page, 
2377
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2378
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2379
                direct_jmp_count, 
2380
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2381
                direct_jmp2_count,
2382
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2383
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2384
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2385
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2386
}
2387

    
2388
#if !defined(CONFIG_USER_ONLY) 
2389

    
2390
#define MMUSUFFIX _cmmu
2391
#define GETPC() NULL
2392
#define env cpu_single_env
2393
#define SOFTMMU_CODE_ACCESS
2394

    
2395
#define SHIFT 0
2396
#include "softmmu_template.h"
2397

    
2398
#define SHIFT 1
2399
#include "softmmu_template.h"
2400

    
2401
#define SHIFT 2
2402
#include "softmmu_template.h"
2403

    
2404
#define SHIFT 3
2405
#include "softmmu_template.h"
2406

    
2407
#undef env
2408

    
2409
#endif