Statistics
| Branch: | Revision:

root / exec.c @ 07ad1b93

History | View | Annotate | Download (25.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <stdarg.h>
23
#include <string.h>
24
#include <errno.h>
25
#include <unistd.h>
26
#include <inttypes.h>
27
#include <sys/mman.h>
28

    
29
#include "config.h"
30
#include "cpu.h"
31
#include "exec-all.h"
32

    
33
//#define DEBUG_TB_INVALIDATE
34
//#define DEBUG_FLUSH
35

    
36
/* make various TB consistency checks */
37
//#define DEBUG_TB_CHECK 
38

    
39
/* threshold to flush the translated code buffer */
40
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
41

    
42
#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / 64)
43

    
44
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
45
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
46
int nb_tbs;
47
/* any access to the tbs or the page table must use this lock */
48
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
49

    
50
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
51
uint8_t *code_gen_ptr;
52

    
53
/* XXX: pack the flags in the low bits of the pointer ? */
54
typedef struct PageDesc {
55
    unsigned long flags;
56
    TranslationBlock *first_tb;
57
} PageDesc;
58

    
59
#define L2_BITS 10
60
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
61

    
62
#define L1_SIZE (1 << L1_BITS)
63
#define L2_SIZE (1 << L2_BITS)
64

    
65
static void tb_invalidate_page(unsigned long address);
66
static void io_mem_init(void);
67

    
68
unsigned long real_host_page_size;
69
unsigned long host_page_bits;
70
unsigned long host_page_size;
71
unsigned long host_page_mask;
72

    
73
static PageDesc *l1_map[L1_SIZE];
74

    
75
/* io memory support */
76
static unsigned long *l1_physmap[L1_SIZE];
77
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
78
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
79
static int io_mem_nb;
80

    
81
/* log support */
82
char *logfilename = "/tmp/qemu.log";
83
FILE *logfile;
84
int loglevel;
85

    
86
static void page_init(void)
87
{
88
    /* NOTE: we can always suppose that host_page_size >=
89
       TARGET_PAGE_SIZE */
90
    real_host_page_size = getpagesize();
91
    if (host_page_size == 0)
92
        host_page_size = real_host_page_size;
93
    if (host_page_size < TARGET_PAGE_SIZE)
94
        host_page_size = TARGET_PAGE_SIZE;
95
    host_page_bits = 0;
96
    while ((1 << host_page_bits) < host_page_size)
97
        host_page_bits++;
98
    host_page_mask = ~(host_page_size - 1);
99
}
100

    
101
/* dump memory mappings */
102
void page_dump(FILE *f)
103
{
104
    unsigned long start, end;
105
    int i, j, prot, prot1;
106
    PageDesc *p;
107

    
108
    fprintf(f, "%-8s %-8s %-8s %s\n",
109
            "start", "end", "size", "prot");
110
    start = -1;
111
    end = -1;
112
    prot = 0;
113
    for(i = 0; i <= L1_SIZE; i++) {
114
        if (i < L1_SIZE)
115
            p = l1_map[i];
116
        else
117
            p = NULL;
118
        for(j = 0;j < L2_SIZE; j++) {
119
            if (!p)
120
                prot1 = 0;
121
            else
122
                prot1 = p[j].flags;
123
            if (prot1 != prot) {
124
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
125
                if (start != -1) {
126
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
127
                            start, end, end - start, 
128
                            prot & PAGE_READ ? 'r' : '-',
129
                            prot & PAGE_WRITE ? 'w' : '-',
130
                            prot & PAGE_EXEC ? 'x' : '-');
131
                }
132
                if (prot1 != 0)
133
                    start = end;
134
                else
135
                    start = -1;
136
                prot = prot1;
137
            }
138
            if (!p)
139
                break;
140
        }
141
    }
142
}
143

    
144
static inline PageDesc *page_find_alloc(unsigned int index)
145
{
146
    PageDesc **lp, *p;
147

    
148
    lp = &l1_map[index >> L2_BITS];
149
    p = *lp;
150
    if (!p) {
151
        /* allocate if not found */
152
        p = malloc(sizeof(PageDesc) * L2_SIZE);
153
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
154
        *lp = p;
155
    }
156
    return p + (index & (L2_SIZE - 1));
157
}
158

    
159
static inline PageDesc *page_find(unsigned int index)
160
{
161
    PageDesc *p;
162

    
163
    p = l1_map[index >> L2_BITS];
164
    if (!p)
165
        return 0;
166
    return p + (index & (L2_SIZE - 1));
167
}
168

    
169
int page_get_flags(unsigned long address)
170
{
171
    PageDesc *p;
172

    
173
    p = page_find(address >> TARGET_PAGE_BITS);
174
    if (!p)
175
        return 0;
176
    return p->flags;
177
}
178

    
179
/* modify the flags of a page and invalidate the code if
180
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
181
   depending on PAGE_WRITE */
182
void page_set_flags(unsigned long start, unsigned long end, int flags)
183
{
184
    PageDesc *p;
185
    unsigned long addr;
186

    
187
    start = start & TARGET_PAGE_MASK;
188
    end = TARGET_PAGE_ALIGN(end);
189
    if (flags & PAGE_WRITE)
190
        flags |= PAGE_WRITE_ORG;
191
    spin_lock(&tb_lock);
192
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
193
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
194
        /* if the write protection is set, then we invalidate the code
195
           inside */
196
        if (!(p->flags & PAGE_WRITE) && 
197
            (flags & PAGE_WRITE) &&
198
            p->first_tb) {
199
            tb_invalidate_page(addr);
200
        }
201
        p->flags = flags;
202
    }
203
    spin_unlock(&tb_lock);
204
}
205

    
206
void cpu_exec_init(void)
207
{
208
    if (!code_gen_ptr) {
209
        code_gen_ptr = code_gen_buffer;
210
        page_init();
211
        io_mem_init();
212
    }
213
}
214

    
215
/* set to NULL all the 'first_tb' fields in all PageDescs */
216
static void page_flush_tb(void)
217
{
218
    int i, j;
219
    PageDesc *p;
220

    
221
    for(i = 0; i < L1_SIZE; i++) {
222
        p = l1_map[i];
223
        if (p) {
224
            for(j = 0; j < L2_SIZE; j++)
225
                p[j].first_tb = NULL;
226
        }
227
    }
228
}
229

    
230
/* flush all the translation blocks */
231
/* XXX: tb_flush is currently not thread safe */
232
void tb_flush(void)
233
{
234
    int i;
235
#ifdef DEBUG_FLUSH
236
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
237
           code_gen_ptr - code_gen_buffer, 
238
           nb_tbs, 
239
           (code_gen_ptr - code_gen_buffer) / nb_tbs);
240
#endif
241
    nb_tbs = 0;
242
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
243
        tb_hash[i] = NULL;
244
    page_flush_tb();
245
    code_gen_ptr = code_gen_buffer;
246
    /* XXX: flush processor icache at this point if cache flush is
247
       expensive */
248
}
249

    
250
#ifdef DEBUG_TB_CHECK
251

    
252
static void tb_invalidate_check(unsigned long address)
253
{
254
    TranslationBlock *tb;
255
    int i;
256
    address &= TARGET_PAGE_MASK;
257
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
258
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
259
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
260
                  address >= tb->pc + tb->size)) {
261
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
262
                       address, tb->pc, tb->size);
263
            }
264
        }
265
    }
266
}
267

    
268
/* verify that all the pages have correct rights for code */
269
static void tb_page_check(void)
270
{
271
    TranslationBlock *tb;
272
    int i, flags1, flags2;
273
    
274
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
275
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
276
            flags1 = page_get_flags(tb->pc);
277
            flags2 = page_get_flags(tb->pc + tb->size - 1);
278
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
279
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
280
                       tb->pc, tb->size, flags1, flags2);
281
            }
282
        }
283
    }
284
}
285

    
286
void tb_jmp_check(TranslationBlock *tb)
287
{
288
    TranslationBlock *tb1;
289
    unsigned int n1;
290

    
291
    /* suppress any remaining jumps to this TB */
292
    tb1 = tb->jmp_first;
293
    for(;;) {
294
        n1 = (long)tb1 & 3;
295
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
296
        if (n1 == 2)
297
            break;
298
        tb1 = tb1->jmp_next[n1];
299
    }
300
    /* check end of list */
301
    if (tb1 != tb) {
302
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
303
    }
304
}
305

    
306
#endif
307

    
308
/* invalidate one TB */
309
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
310
                             int next_offset)
311
{
312
    TranslationBlock *tb1;
313
    for(;;) {
314
        tb1 = *ptb;
315
        if (tb1 == tb) {
316
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
317
            break;
318
        }
319
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
320
    }
321
}
322

    
323
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
324
{
325
    TranslationBlock *tb1, **ptb;
326
    unsigned int n1;
327

    
328
    ptb = &tb->jmp_next[n];
329
    tb1 = *ptb;
330
    if (tb1) {
331
        /* find tb(n) in circular list */
332
        for(;;) {
333
            tb1 = *ptb;
334
            n1 = (long)tb1 & 3;
335
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
336
            if (n1 == n && tb1 == tb)
337
                break;
338
            if (n1 == 2) {
339
                ptb = &tb1->jmp_first;
340
            } else {
341
                ptb = &tb1->jmp_next[n1];
342
            }
343
        }
344
        /* now we can suppress tb(n) from the list */
345
        *ptb = tb->jmp_next[n];
346

    
347
        tb->jmp_next[n] = NULL;
348
    }
349
}
350

    
351
/* reset the jump entry 'n' of a TB so that it is not chained to
352
   another TB */
353
static inline void tb_reset_jump(TranslationBlock *tb, int n)
354
{
355
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
356
}
357

    
358
static inline void tb_invalidate(TranslationBlock *tb, int parity)
359
{
360
    PageDesc *p;
361
    unsigned int page_index1, page_index2;
362
    unsigned int h, n1;
363
    TranslationBlock *tb1, *tb2;
364
    
365
    tb_invalidated_flag = 1;
366
    
367
    /* remove the TB from the hash list */
368
    h = tb_hash_func(tb->pc);
369
    tb_remove(&tb_hash[h], tb, 
370
              offsetof(TranslationBlock, hash_next));
371
    /* remove the TB from the page list */
372
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
373
    if ((page_index1 & 1) == parity) {
374
        p = page_find(page_index1);
375
        tb_remove(&p->first_tb, tb, 
376
                  offsetof(TranslationBlock, page_next[page_index1 & 1]));
377
    }
378
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
379
    if ((page_index2 & 1) == parity) {
380
        p = page_find(page_index2);
381
        tb_remove(&p->first_tb, tb, 
382
                  offsetof(TranslationBlock, page_next[page_index2 & 1]));
383
    }
384

    
385
    /* suppress this TB from the two jump lists */
386
    tb_jmp_remove(tb, 0);
387
    tb_jmp_remove(tb, 1);
388

    
389
    /* suppress any remaining jumps to this TB */
390
    tb1 = tb->jmp_first;
391
    for(;;) {
392
        n1 = (long)tb1 & 3;
393
        if (n1 == 2)
394
            break;
395
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
396
        tb2 = tb1->jmp_next[n1];
397
        tb_reset_jump(tb1, n1);
398
        tb1->jmp_next[n1] = NULL;
399
        tb1 = tb2;
400
    }
401
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
402
}
403

    
404
/* invalidate all TBs which intersect with the target page starting at addr */
405
static void tb_invalidate_page(unsigned long address)
406
{
407
    TranslationBlock *tb_next, *tb;
408
    unsigned int page_index;
409
    int parity1, parity2;
410
    PageDesc *p;
411
#ifdef DEBUG_TB_INVALIDATE
412
    printf("tb_invalidate_page: %lx\n", address);
413
#endif
414

    
415
    page_index = address >> TARGET_PAGE_BITS;
416
    p = page_find(page_index);
417
    if (!p)
418
        return;
419
    tb = p->first_tb;
420
    parity1 = page_index & 1;
421
    parity2 = parity1 ^ 1;
422
    while (tb != NULL) {
423
        tb_next = tb->page_next[parity1];
424
        tb_invalidate(tb, parity2);
425
        tb = tb_next;
426
    }
427
    p->first_tb = NULL;
428
}
429

    
430
/* add the tb in the target page and protect it if necessary */
431
static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
432
{
433
    PageDesc *p;
434
    unsigned long host_start, host_end, addr, page_addr;
435
    int prot;
436

    
437
    p = page_find_alloc(page_index);
438
    tb->page_next[page_index & 1] = p->first_tb;
439
    p->first_tb = tb;
440
    if (p->flags & PAGE_WRITE) {
441
        /* force the host page as non writable (writes will have a
442
           page fault + mprotect overhead) */
443
        page_addr = (page_index << TARGET_PAGE_BITS);
444
        host_start = page_addr & host_page_mask;
445
        host_end = host_start + host_page_size;
446
        prot = 0;
447
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
448
            prot |= page_get_flags(addr);
449
#if !defined(CONFIG_SOFTMMU)
450
        mprotect((void *)host_start, host_page_size, 
451
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
452
#endif
453
#if !defined(CONFIG_USER_ONLY)
454
        /* suppress soft TLB */
455
        /* XXX: must flush on all processor with same address space */
456
        tlb_flush_page_write(cpu_single_env, host_start);
457
#endif
458
#ifdef DEBUG_TB_INVALIDATE
459
        printf("protecting code page: 0x%08lx\n", 
460
               host_start);
461
#endif
462
        p->flags &= ~PAGE_WRITE;
463
    }
464
}
465

    
466
/* Allocate a new translation block. Flush the translation buffer if
467
   too many translation blocks or too much generated code. */
468
TranslationBlock *tb_alloc(unsigned long pc)
469
{
470
    TranslationBlock *tb;
471

    
472
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
473
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
474
        return NULL;
475
    tb = &tbs[nb_tbs++];
476
    tb->pc = pc;
477
    return tb;
478
}
479

    
480
/* link the tb with the other TBs */
481
void tb_link(TranslationBlock *tb)
482
{
483
    unsigned int page_index1, page_index2;
484

    
485
    /* add in the page list */
486
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
487
    tb_alloc_page(tb, page_index1);
488
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
489
    if (page_index2 != page_index1) {
490
        tb_alloc_page(tb, page_index2);
491
    }
492
#ifdef DEBUG_TB_CHECK
493
    tb_page_check();
494
#endif
495
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
496
    tb->jmp_next[0] = NULL;
497
    tb->jmp_next[1] = NULL;
498

    
499
    /* init original jump addresses */
500
    if (tb->tb_next_offset[0] != 0xffff)
501
        tb_reset_jump(tb, 0);
502
    if (tb->tb_next_offset[1] != 0xffff)
503
        tb_reset_jump(tb, 1);
504
}
505

    
506
/* called from signal handler: invalidate the code and unprotect the
507
   page. Return TRUE if the fault was succesfully handled. */
508
int page_unprotect(unsigned long address)
509
{
510
    unsigned int page_index, prot, pindex;
511
    PageDesc *p, *p1;
512
    unsigned long host_start, host_end, addr;
513

    
514
    host_start = address & host_page_mask;
515
    page_index = host_start >> TARGET_PAGE_BITS;
516
    p1 = page_find(page_index);
517
    if (!p1)
518
        return 0;
519
    host_end = host_start + host_page_size;
520
    p = p1;
521
    prot = 0;
522
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
523
        prot |= p->flags;
524
        p++;
525
    }
526
    /* if the page was really writable, then we change its
527
       protection back to writable */
528
    if (prot & PAGE_WRITE_ORG) {
529
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
530
        if (!(p1[pindex].flags & PAGE_WRITE)) {
531
#if !defined(CONFIG_SOFTMMU)
532
            mprotect((void *)host_start, host_page_size, 
533
                     (prot & PAGE_BITS) | PAGE_WRITE);
534
#endif
535
            p1[pindex].flags |= PAGE_WRITE;
536
            /* and since the content will be modified, we must invalidate
537
               the corresponding translated code. */
538
            tb_invalidate_page(address);
539
#ifdef DEBUG_TB_CHECK
540
            tb_invalidate_check(address);
541
#endif
542
            return 1;
543
        }
544
    }
545
    return 0;
546
}
547

    
548
/* call this function when system calls directly modify a memory area */
549
void page_unprotect_range(uint8_t *data, unsigned long data_size)
550
{
551
    unsigned long start, end, addr;
552

    
553
    start = (unsigned long)data;
554
    end = start + data_size;
555
    start &= TARGET_PAGE_MASK;
556
    end = TARGET_PAGE_ALIGN(end);
557
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
558
        page_unprotect(addr);
559
    }
560
}
561

    
562
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
563
   tb[1].tc_ptr. Return NULL if not found */
564
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
565
{
566
    int m_min, m_max, m;
567
    unsigned long v;
568
    TranslationBlock *tb;
569

    
570
    if (nb_tbs <= 0)
571
        return NULL;
572
    if (tc_ptr < (unsigned long)code_gen_buffer ||
573
        tc_ptr >= (unsigned long)code_gen_ptr)
574
        return NULL;
575
    /* binary search (cf Knuth) */
576
    m_min = 0;
577
    m_max = nb_tbs - 1;
578
    while (m_min <= m_max) {
579
        m = (m_min + m_max) >> 1;
580
        tb = &tbs[m];
581
        v = (unsigned long)tb->tc_ptr;
582
        if (v == tc_ptr)
583
            return tb;
584
        else if (tc_ptr < v) {
585
            m_max = m - 1;
586
        } else {
587
            m_min = m + 1;
588
        }
589
    } 
590
    return &tbs[m_max];
591
}
592

    
593
static void tb_reset_jump_recursive(TranslationBlock *tb);
594

    
595
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
596
{
597
    TranslationBlock *tb1, *tb_next, **ptb;
598
    unsigned int n1;
599

    
600
    tb1 = tb->jmp_next[n];
601
    if (tb1 != NULL) {
602
        /* find head of list */
603
        for(;;) {
604
            n1 = (long)tb1 & 3;
605
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
606
            if (n1 == 2)
607
                break;
608
            tb1 = tb1->jmp_next[n1];
609
        }
610
        /* we are now sure now that tb jumps to tb1 */
611
        tb_next = tb1;
612

    
613
        /* remove tb from the jmp_first list */
614
        ptb = &tb_next->jmp_first;
615
        for(;;) {
616
            tb1 = *ptb;
617
            n1 = (long)tb1 & 3;
618
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
619
            if (n1 == n && tb1 == tb)
620
                break;
621
            ptb = &tb1->jmp_next[n1];
622
        }
623
        *ptb = tb->jmp_next[n];
624
        tb->jmp_next[n] = NULL;
625
        
626
        /* suppress the jump to next tb in generated code */
627
        tb_reset_jump(tb, n);
628

    
629
        /* suppress jumps in the tb on which we could have jump */
630
        tb_reset_jump_recursive(tb_next);
631
    }
632
}
633

    
634
static void tb_reset_jump_recursive(TranslationBlock *tb)
635
{
636
    tb_reset_jump_recursive2(tb, 0);
637
    tb_reset_jump_recursive2(tb, 1);
638
}
639

    
640
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
641
   breakpoint is reached */
642
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
643
{
644
#if defined(TARGET_I386)
645
    int i;
646

    
647
    for(i = 0; i < env->nb_breakpoints; i++) {
648
        if (env->breakpoints[i] == pc)
649
            return 0;
650
    }
651

    
652
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
653
        return -1;
654
    env->breakpoints[env->nb_breakpoints++] = pc;
655
    tb_invalidate_page(pc);
656
    return 0;
657
#else
658
    return -1;
659
#endif
660
}
661

    
662
/* remove a breakpoint */
663
int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
664
{
665
#if defined(TARGET_I386)
666
    int i;
667
    for(i = 0; i < env->nb_breakpoints; i++) {
668
        if (env->breakpoints[i] == pc)
669
            goto found;
670
    }
671
    return -1;
672
 found:
673
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
674
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
675
    env->nb_breakpoints--;
676
    tb_invalidate_page(pc);
677
    return 0;
678
#else
679
    return -1;
680
#endif
681
}
682

    
683
/* enable or disable single step mode. EXCP_DEBUG is returned by the
684
   CPU loop after each instruction */
685
void cpu_single_step(CPUState *env, int enabled)
686
{
687
#if defined(TARGET_I386)
688
    if (env->singlestep_enabled != enabled) {
689
        env->singlestep_enabled = enabled;
690
        /* must flush all the translated code to avoid inconsistancies */
691
        tb_flush();
692
    }
693
#endif
694
}
695

    
696
/* enable or disable low levels log */
697
void cpu_set_log(int log_flags)
698
{
699
    loglevel = log_flags;
700
    if (loglevel && !logfile) {
701
        logfile = fopen(logfilename, "w");
702
        if (!logfile) {
703
            perror(logfilename);
704
            _exit(1);
705
        }
706
        setvbuf(logfile, NULL, _IOLBF, 0);
707
    }
708
}
709

    
710
void cpu_set_log_filename(const char *filename)
711
{
712
    logfilename = strdup(filename);
713
}
714

    
715
/* mask must never be zero */
716
void cpu_interrupt(CPUState *env, int mask)
717
{
718
    TranslationBlock *tb;
719
    
720
    env->interrupt_request |= mask;
721
    /* if the cpu is currently executing code, we must unlink it and
722
       all the potentially executing TB */
723
    tb = env->current_tb;
724
    if (tb) {
725
        tb_reset_jump_recursive(tb);
726
    }
727
}
728

    
729

    
730
void cpu_abort(CPUState *env, const char *fmt, ...)
731
{
732
    va_list ap;
733

    
734
    va_start(ap, fmt);
735
    fprintf(stderr, "qemu: fatal: ");
736
    vfprintf(stderr, fmt, ap);
737
    fprintf(stderr, "\n");
738
#ifdef TARGET_I386
739
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
740
#endif
741
    va_end(ap);
742
    abort();
743
}
744

    
745
#ifdef TARGET_I386
746
/* unmap all maped pages and flush all associated code */
747
void page_unmap(void)
748
{
749
    PageDesc *pmap;
750
    int i;
751

    
752
    for(i = 0; i < L1_SIZE; i++) {
753
        pmap = l1_map[i];
754
        if (pmap) {
755
#if !defined(CONFIG_SOFTMMU)
756
            PageDesc *p;
757
            unsigned long addr;
758
            int j, ret, j1;
759
            
760
            p = pmap;
761
            for(j = 0;j < L2_SIZE;) {
762
                if (p->flags & PAGE_VALID) {
763
                    addr = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
764
                    /* we try to find a range to make less syscalls */
765
                    j1 = j;
766
                    p++;
767
                    j++;
768
                    while (j < L2_SIZE && (p->flags & PAGE_VALID)) {
769
                        p++;
770
                        j++;
771
                    }
772
                    ret = munmap((void *)addr, (j - j1) << TARGET_PAGE_BITS);
773
                    if (ret != 0) {
774
                        fprintf(stderr, "Could not unmap page 0x%08lx\n", addr);
775
                        exit(1);
776
                    }
777
                } else {
778
                    p++;
779
                    j++;
780
                }
781
            }
782
#endif
783
            free(pmap);
784
            l1_map[i] = NULL;
785
        }
786
    }
787
    tb_flush();
788
}
789
#endif
790

    
791
void tlb_flush(CPUState *env)
792
{
793
#if !defined(CONFIG_USER_ONLY)
794
    int i;
795
    for(i = 0; i < CPU_TLB_SIZE; i++) {
796
        env->tlb_read[0][i].address = -1;
797
        env->tlb_write[0][i].address = -1;
798
        env->tlb_read[1][i].address = -1;
799
        env->tlb_write[1][i].address = -1;
800
    }
801
#endif
802
}
803

    
804
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
805
{
806
    if (addr == (tlb_entry->address & 
807
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
808
        tlb_entry->address = -1;
809
}
810

    
811
void tlb_flush_page(CPUState *env, uint32_t addr)
812
{
813
#if !defined(CONFIG_USER_ONLY)
814
    int i;
815

    
816
    addr &= TARGET_PAGE_MASK;
817
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
818
    tlb_flush_entry(&env->tlb_read[0][i], addr);
819
    tlb_flush_entry(&env->tlb_write[0][i], addr);
820
    tlb_flush_entry(&env->tlb_read[1][i], addr);
821
    tlb_flush_entry(&env->tlb_write[1][i], addr);
822
#endif
823
}
824

    
825
/* make all write to page 'addr' trigger a TLB exception to detect
826
   self modifying code */
827
void tlb_flush_page_write(CPUState *env, uint32_t addr)
828
{
829
#if !defined(CONFIG_USER_ONLY)
830
    int i;
831

    
832
    addr &= TARGET_PAGE_MASK;
833
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
834
    tlb_flush_entry(&env->tlb_write[0][i], addr);
835
    tlb_flush_entry(&env->tlb_write[1][i], addr);
836
#endif
837
}
838

    
839
static inline unsigned long *physpage_find_alloc(unsigned int page)
840
{
841
    unsigned long **lp, *p;
842
    unsigned int index, i;
843

    
844
    index = page >> TARGET_PAGE_BITS;
845
    lp = &l1_physmap[index >> L2_BITS];
846
    p = *lp;
847
    if (!p) {
848
        /* allocate if not found */
849
        p = malloc(sizeof(unsigned long) * L2_SIZE);
850
        for(i = 0; i < L2_SIZE; i++)
851
            p[i] = IO_MEM_UNASSIGNED;
852
        *lp = p;
853
    }
854
    return p + (index & (L2_SIZE - 1));
855
}
856

    
857
/* return NULL if no page defined (unused memory) */
858
unsigned long physpage_find(unsigned long page)
859
{
860
    unsigned long *p;
861
    unsigned int index;
862
    index = page >> TARGET_PAGE_BITS;
863
    p = l1_physmap[index >> L2_BITS];
864
    if (!p)
865
        return IO_MEM_UNASSIGNED;
866
    return p[index & (L2_SIZE - 1)];
867
}
868

    
869
/* register physical memory. 'size' must be a multiple of the target
870
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
871
   io memory page */
872
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
873
                                  long phys_offset)
874
{
875
    unsigned long addr, end_addr;
876
    unsigned long *p;
877

    
878
    end_addr = start_addr + size;
879
    for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
880
        p = physpage_find_alloc(addr);
881
        *p = phys_offset;
882
        if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
883
            phys_offset += TARGET_PAGE_SIZE;
884
    }
885
}
886

    
887
static uint32_t unassigned_mem_readb(uint32_t addr)
888
{
889
    return 0;
890
}
891

    
892
static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
893
{
894
}
895

    
896
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
897
    unassigned_mem_readb,
898
    unassigned_mem_readb,
899
    unassigned_mem_readb,
900
};
901

    
902
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
903
    unassigned_mem_writeb,
904
    unassigned_mem_writeb,
905
    unassigned_mem_writeb,
906
};
907

    
908

    
909
static void io_mem_init(void)
910
{
911
    io_mem_nb = 1;
912
    cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
913
}
914

    
915
/* mem_read and mem_write are arrays of functions containing the
916
   function to access byte (index 0), word (index 1) and dword (index
917
   2). All functions must be supplied. If io_index is non zero, the
918
   corresponding io zone is modified. If it is zero, a new io zone is
919
   allocated. The return value can be used with
920
   cpu_register_physical_memory(). (-1) is returned if error. */
921
int cpu_register_io_memory(int io_index,
922
                           CPUReadMemoryFunc **mem_read,
923
                           CPUWriteMemoryFunc **mem_write)
924
{
925
    int i;
926

    
927
    if (io_index <= 0) {
928
        if (io_index >= IO_MEM_NB_ENTRIES)
929
            return -1;
930
        io_index = io_mem_nb++;
931
    } else {
932
        if (io_index >= IO_MEM_NB_ENTRIES)
933
            return -1;
934
    }
935
    
936
    for(i = 0;i < 3; i++) {
937
        io_mem_read[io_index][i] = mem_read[i];
938
        io_mem_write[io_index][i] = mem_write[i];
939
    }
940
    return io_index << IO_MEM_SHIFT;
941
}
942

    
943
#if !defined(CONFIG_USER_ONLY) 
944

    
945
#define MMUSUFFIX _cmmu
946
#define GETPC() NULL
947
#define env cpu_single_env
948

    
949
#define SHIFT 0
950
#include "softmmu_template.h"
951

    
952
#define SHIFT 1
953
#include "softmmu_template.h"
954

    
955
#define SHIFT 2
956
#include "softmmu_template.h"
957

    
958
#define SHIFT 3
959
#include "softmmu_template.h"
960

    
961
#undef env
962

    
963
#endif