Statistics
| Branch: | Revision:

root / exec.c @ 61382a50

History | View | Annotate | Download (25.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <stdarg.h>
23
#include <string.h>
24
#include <errno.h>
25
#include <unistd.h>
26
#include <inttypes.h>
27
#include <sys/mman.h>
28

    
29
#include "config.h"
30
#include "cpu.h"
31
#include "exec-all.h"
32

    
33
//#define DEBUG_TB_INVALIDATE
34
//#define DEBUG_FLUSH
35

    
36
/* make various TB consistency checks */
37
//#define DEBUG_TB_CHECK 
38

    
39
/* threshold to flush the translated code buffer */
40
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
41

    
42
#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / 64)
43

    
44
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
45
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
46
int nb_tbs;
47
/* any access to the tbs or the page table must use this lock */
48
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
49

    
50
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
51
uint8_t *code_gen_ptr;
52

    
53
/* XXX: pack the flags in the low bits of the pointer ? */
54
typedef struct PageDesc {
55
    unsigned long flags;
56
    TranslationBlock *first_tb;
57
} PageDesc;
58

    
59
#define L2_BITS 10
60
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
61

    
62
#define L1_SIZE (1 << L1_BITS)
63
#define L2_SIZE (1 << L2_BITS)
64

    
65
static void tb_invalidate_page(unsigned long address);
66
static void io_mem_init(void);
67

    
68
unsigned long real_host_page_size;
69
unsigned long host_page_bits;
70
unsigned long host_page_size;
71
unsigned long host_page_mask;
72

    
73
static PageDesc *l1_map[L1_SIZE];
74

    
75
/* io memory support */
76
static unsigned long *l1_physmap[L1_SIZE];
77
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
78
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
79
static int io_mem_nb;
80

    
81
/* log support */
82
char *logfilename = "/tmp/qemu.log";
83
FILE *logfile;
84
int loglevel;
85

    
86
static void page_init(void)
87
{
88
    /* NOTE: we can always suppose that host_page_size >=
89
       TARGET_PAGE_SIZE */
90
    real_host_page_size = getpagesize();
91
    if (host_page_size == 0)
92
        host_page_size = real_host_page_size;
93
    if (host_page_size < TARGET_PAGE_SIZE)
94
        host_page_size = TARGET_PAGE_SIZE;
95
    host_page_bits = 0;
96
    while ((1 << host_page_bits) < host_page_size)
97
        host_page_bits++;
98
    host_page_mask = ~(host_page_size - 1);
99
}
100

    
101
/* dump memory mappings */
102
void page_dump(FILE *f)
103
{
104
    unsigned long start, end;
105
    int i, j, prot, prot1;
106
    PageDesc *p;
107

    
108
    fprintf(f, "%-8s %-8s %-8s %s\n",
109
            "start", "end", "size", "prot");
110
    start = -1;
111
    end = -1;
112
    prot = 0;
113
    for(i = 0; i <= L1_SIZE; i++) {
114
        if (i < L1_SIZE)
115
            p = l1_map[i];
116
        else
117
            p = NULL;
118
        for(j = 0;j < L2_SIZE; j++) {
119
            if (!p)
120
                prot1 = 0;
121
            else
122
                prot1 = p[j].flags;
123
            if (prot1 != prot) {
124
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
125
                if (start != -1) {
126
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
127
                            start, end, end - start, 
128
                            prot & PAGE_READ ? 'r' : '-',
129
                            prot & PAGE_WRITE ? 'w' : '-',
130
                            prot & PAGE_EXEC ? 'x' : '-');
131
                }
132
                if (prot1 != 0)
133
                    start = end;
134
                else
135
                    start = -1;
136
                prot = prot1;
137
            }
138
            if (!p)
139
                break;
140
        }
141
    }
142
}
143

    
144
static inline PageDesc *page_find_alloc(unsigned int index)
145
{
146
    PageDesc **lp, *p;
147

    
148
    lp = &l1_map[index >> L2_BITS];
149
    p = *lp;
150
    if (!p) {
151
        /* allocate if not found */
152
        p = malloc(sizeof(PageDesc) * L2_SIZE);
153
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
154
        *lp = p;
155
    }
156
    return p + (index & (L2_SIZE - 1));
157
}
158

    
159
static inline PageDesc *page_find(unsigned int index)
160
{
161
    PageDesc *p;
162

    
163
    p = l1_map[index >> L2_BITS];
164
    if (!p)
165
        return 0;
166
    return p + (index & (L2_SIZE - 1));
167
}
168

    
169
int page_get_flags(unsigned long address)
170
{
171
    PageDesc *p;
172

    
173
    p = page_find(address >> TARGET_PAGE_BITS);
174
    if (!p)
175
        return 0;
176
    return p->flags;
177
}
178

    
179
/* modify the flags of a page and invalidate the code if
180
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
181
   depending on PAGE_WRITE */
182
void page_set_flags(unsigned long start, unsigned long end, int flags)
183
{
184
    PageDesc *p;
185
    unsigned long addr;
186

    
187
    start = start & TARGET_PAGE_MASK;
188
    end = TARGET_PAGE_ALIGN(end);
189
    if (flags & PAGE_WRITE)
190
        flags |= PAGE_WRITE_ORG;
191
    spin_lock(&tb_lock);
192
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
193
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
194
        /* if the write protection is set, then we invalidate the code
195
           inside */
196
        if (!(p->flags & PAGE_WRITE) && 
197
            (flags & PAGE_WRITE) &&
198
            p->first_tb) {
199
            tb_invalidate_page(addr);
200
        }
201
        p->flags = flags;
202
    }
203
    spin_unlock(&tb_lock);
204
}
205

    
206
void cpu_exec_init(void)
207
{
208
    if (!code_gen_ptr) {
209
        code_gen_ptr = code_gen_buffer;
210
        page_init();
211
        io_mem_init();
212
    }
213
}
214

    
215
/* set to NULL all the 'first_tb' fields in all PageDescs */
216
static void page_flush_tb(void)
217
{
218
    int i, j;
219
    PageDesc *p;
220

    
221
    for(i = 0; i < L1_SIZE; i++) {
222
        p = l1_map[i];
223
        if (p) {
224
            for(j = 0; j < L2_SIZE; j++)
225
                p[j].first_tb = NULL;
226
        }
227
    }
228
}
229

    
230
/* flush all the translation blocks */
231
/* XXX: tb_flush is currently not thread safe */
232
void tb_flush(void)
233
{
234
    int i;
235
#ifdef DEBUG_FLUSH
236
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
237
           code_gen_ptr - code_gen_buffer, 
238
           nb_tbs, 
239
           (code_gen_ptr - code_gen_buffer) / nb_tbs);
240
#endif
241
    nb_tbs = 0;
242
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
243
        tb_hash[i] = NULL;
244
    page_flush_tb();
245
    code_gen_ptr = code_gen_buffer;
246
    /* XXX: flush processor icache at this point if cache flush is
247
       expensive */
248
}
249

    
250
#ifdef DEBUG_TB_CHECK
251

    
252
static void tb_invalidate_check(unsigned long address)
253
{
254
    TranslationBlock *tb;
255
    int i;
256
    address &= TARGET_PAGE_MASK;
257
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
258
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
259
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
260
                  address >= tb->pc + tb->size)) {
261
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
262
                       address, tb->pc, tb->size);
263
            }
264
        }
265
    }
266
}
267

    
268
/* verify that all the pages have correct rights for code */
269
static void tb_page_check(void)
270
{
271
    TranslationBlock *tb;
272
    int i, flags1, flags2;
273
    
274
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
275
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
276
            flags1 = page_get_flags(tb->pc);
277
            flags2 = page_get_flags(tb->pc + tb->size - 1);
278
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
279
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
280
                       tb->pc, tb->size, flags1, flags2);
281
            }
282
        }
283
    }
284
}
285

    
286
void tb_jmp_check(TranslationBlock *tb)
287
{
288
    TranslationBlock *tb1;
289
    unsigned int n1;
290

    
291
    /* suppress any remaining jumps to this TB */
292
    tb1 = tb->jmp_first;
293
    for(;;) {
294
        n1 = (long)tb1 & 3;
295
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
296
        if (n1 == 2)
297
            break;
298
        tb1 = tb1->jmp_next[n1];
299
    }
300
    /* check end of list */
301
    if (tb1 != tb) {
302
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
303
    }
304
}
305

    
306
#endif
307

    
308
/* invalidate one TB */
309
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
310
                             int next_offset)
311
{
312
    TranslationBlock *tb1;
313
    for(;;) {
314
        tb1 = *ptb;
315
        if (tb1 == tb) {
316
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
317
            break;
318
        }
319
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
320
    }
321
}
322

    
323
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
324
{
325
    TranslationBlock *tb1, **ptb;
326
    unsigned int n1;
327

    
328
    ptb = &tb->jmp_next[n];
329
    tb1 = *ptb;
330
    if (tb1) {
331
        /* find tb(n) in circular list */
332
        for(;;) {
333
            tb1 = *ptb;
334
            n1 = (long)tb1 & 3;
335
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
336
            if (n1 == n && tb1 == tb)
337
                break;
338
            if (n1 == 2) {
339
                ptb = &tb1->jmp_first;
340
            } else {
341
                ptb = &tb1->jmp_next[n1];
342
            }
343
        }
344
        /* now we can suppress tb(n) from the list */
345
        *ptb = tb->jmp_next[n];
346

    
347
        tb->jmp_next[n] = NULL;
348
    }
349
}
350

    
351
/* reset the jump entry 'n' of a TB so that it is not chained to
352
   another TB */
353
static inline void tb_reset_jump(TranslationBlock *tb, int n)
354
{
355
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
356
}
357

    
358
static inline void tb_invalidate(TranslationBlock *tb, int parity)
359
{
360
    PageDesc *p;
361
    unsigned int page_index1, page_index2;
362
    unsigned int h, n1;
363
    TranslationBlock *tb1, *tb2;
364
    
365
    /* remove the TB from the hash list */
366
    h = tb_hash_func(tb->pc);
367
    tb_remove(&tb_hash[h], tb, 
368
              offsetof(TranslationBlock, hash_next));
369
    /* remove the TB from the page list */
370
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
371
    if ((page_index1 & 1) == parity) {
372
        p = page_find(page_index1);
373
        tb_remove(&p->first_tb, tb, 
374
                  offsetof(TranslationBlock, page_next[page_index1 & 1]));
375
    }
376
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
377
    if ((page_index2 & 1) == parity) {
378
        p = page_find(page_index2);
379
        tb_remove(&p->first_tb, tb, 
380
                  offsetof(TranslationBlock, page_next[page_index2 & 1]));
381
    }
382

    
383
    /* suppress this TB from the two jump lists */
384
    tb_jmp_remove(tb, 0);
385
    tb_jmp_remove(tb, 1);
386

    
387
    /* suppress any remaining jumps to this TB */
388
    tb1 = tb->jmp_first;
389
    for(;;) {
390
        n1 = (long)tb1 & 3;
391
        if (n1 == 2)
392
            break;
393
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
394
        tb2 = tb1->jmp_next[n1];
395
        tb_reset_jump(tb1, n1);
396
        tb1->jmp_next[n1] = NULL;
397
        tb1 = tb2;
398
    }
399
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
400
}
401

    
402
/* invalidate all TBs which intersect with the target page starting at addr */
403
static void tb_invalidate_page(unsigned long address)
404
{
405
    TranslationBlock *tb_next, *tb;
406
    unsigned int page_index;
407
    int parity1, parity2;
408
    PageDesc *p;
409
#ifdef DEBUG_TB_INVALIDATE
410
    printf("tb_invalidate_page: %lx\n", address);
411
#endif
412

    
413
    page_index = address >> TARGET_PAGE_BITS;
414
    p = page_find(page_index);
415
    if (!p)
416
        return;
417
    tb = p->first_tb;
418
    parity1 = page_index & 1;
419
    parity2 = parity1 ^ 1;
420
    while (tb != NULL) {
421
        tb_next = tb->page_next[parity1];
422
        tb_invalidate(tb, parity2);
423
        tb = tb_next;
424
    }
425
    p->first_tb = NULL;
426
}
427

    
428
/* add the tb in the target page and protect it if necessary */
429
static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
430
{
431
    PageDesc *p;
432
    unsigned long host_start, host_end, addr, page_addr;
433
    int prot;
434

    
435
    p = page_find_alloc(page_index);
436
    tb->page_next[page_index & 1] = p->first_tb;
437
    p->first_tb = tb;
438
    if (p->flags & PAGE_WRITE) {
439
        /* force the host page as non writable (writes will have a
440
           page fault + mprotect overhead) */
441
        page_addr = (page_index << TARGET_PAGE_BITS);
442
        host_start = page_addr & host_page_mask;
443
        host_end = host_start + host_page_size;
444
        prot = 0;
445
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
446
            prot |= page_get_flags(addr);
447
#if !defined(CONFIG_SOFTMMU)
448
        mprotect((void *)host_start, host_page_size, 
449
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
450
#endif
451
#if !defined(CONFIG_USER_ONLY)
452
        /* suppress soft TLB */
453
        /* XXX: must flush on all processor with same address space */
454
        tlb_flush_page_write(cpu_single_env, host_start);
455
#endif
456
#ifdef DEBUG_TB_INVALIDATE
457
        printf("protecting code page: 0x%08lx\n", 
458
               host_start);
459
#endif
460
        p->flags &= ~PAGE_WRITE;
461
    }
462
}
463

    
464
/* Allocate a new translation block. Flush the translation buffer if
465
   too many translation blocks or too much generated code. */
466
TranslationBlock *tb_alloc(unsigned long pc)
467
{
468
    TranslationBlock *tb;
469

    
470
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
471
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
472
        return NULL;
473
    tb = &tbs[nb_tbs++];
474
    tb->pc = pc;
475
    return tb;
476
}
477

    
478
/* link the tb with the other TBs */
479
void tb_link(TranslationBlock *tb)
480
{
481
    unsigned int page_index1, page_index2;
482

    
483
    /* add in the page list */
484
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
485
    tb_alloc_page(tb, page_index1);
486
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
487
    if (page_index2 != page_index1) {
488
        tb_alloc_page(tb, page_index2);
489
    }
490
#ifdef DEBUG_TB_CHECK
491
    tb_page_check();
492
#endif
493
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
494
    tb->jmp_next[0] = NULL;
495
    tb->jmp_next[1] = NULL;
496

    
497
    /* init original jump addresses */
498
    if (tb->tb_next_offset[0] != 0xffff)
499
        tb_reset_jump(tb, 0);
500
    if (tb->tb_next_offset[1] != 0xffff)
501
        tb_reset_jump(tb, 1);
502
}
503

    
504
/* called from signal handler: invalidate the code and unprotect the
505
   page. Return TRUE if the fault was succesfully handled. */
506
int page_unprotect(unsigned long address)
507
{
508
    unsigned int page_index, prot, pindex;
509
    PageDesc *p, *p1;
510
    unsigned long host_start, host_end, addr;
511

    
512
    host_start = address & host_page_mask;
513
    page_index = host_start >> TARGET_PAGE_BITS;
514
    p1 = page_find(page_index);
515
    if (!p1)
516
        return 0;
517
    host_end = host_start + host_page_size;
518
    p = p1;
519
    prot = 0;
520
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
521
        prot |= p->flags;
522
        p++;
523
    }
524
    /* if the page was really writable, then we change its
525
       protection back to writable */
526
    if (prot & PAGE_WRITE_ORG) {
527
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
528
        if (!(p1[pindex].flags & PAGE_WRITE)) {
529
#if !defined(CONFIG_SOFTMMU)
530
            mprotect((void *)host_start, host_page_size, 
531
                     (prot & PAGE_BITS) | PAGE_WRITE);
532
#endif
533
            p1[pindex].flags |= PAGE_WRITE;
534
            /* and since the content will be modified, we must invalidate
535
               the corresponding translated code. */
536
            tb_invalidate_page(address);
537
#ifdef DEBUG_TB_CHECK
538
            tb_invalidate_check(address);
539
#endif
540
            return 1;
541
        }
542
    }
543
    return 0;
544
}
545

    
546
/* call this function when system calls directly modify a memory area */
547
void page_unprotect_range(uint8_t *data, unsigned long data_size)
548
{
549
    unsigned long start, end, addr;
550

    
551
    start = (unsigned long)data;
552
    end = start + data_size;
553
    start &= TARGET_PAGE_MASK;
554
    end = TARGET_PAGE_ALIGN(end);
555
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
556
        page_unprotect(addr);
557
    }
558
}
559

    
560
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
561
   tb[1].tc_ptr. Return NULL if not found */
562
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
563
{
564
    int m_min, m_max, m;
565
    unsigned long v;
566
    TranslationBlock *tb;
567

    
568
    if (nb_tbs <= 0)
569
        return NULL;
570
    if (tc_ptr < (unsigned long)code_gen_buffer ||
571
        tc_ptr >= (unsigned long)code_gen_ptr)
572
        return NULL;
573
    /* binary search (cf Knuth) */
574
    m_min = 0;
575
    m_max = nb_tbs - 1;
576
    while (m_min <= m_max) {
577
        m = (m_min + m_max) >> 1;
578
        tb = &tbs[m];
579
        v = (unsigned long)tb->tc_ptr;
580
        if (v == tc_ptr)
581
            return tb;
582
        else if (tc_ptr < v) {
583
            m_max = m - 1;
584
        } else {
585
            m_min = m + 1;
586
        }
587
    } 
588
    return &tbs[m_max];
589
}
590

    
591
static void tb_reset_jump_recursive(TranslationBlock *tb);
592

    
593
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
594
{
595
    TranslationBlock *tb1, *tb_next, **ptb;
596
    unsigned int n1;
597

    
598
    tb1 = tb->jmp_next[n];
599
    if (tb1 != NULL) {
600
        /* find head of list */
601
        for(;;) {
602
            n1 = (long)tb1 & 3;
603
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
604
            if (n1 == 2)
605
                break;
606
            tb1 = tb1->jmp_next[n1];
607
        }
608
        /* we are now sure now that tb jumps to tb1 */
609
        tb_next = tb1;
610

    
611
        /* remove tb from the jmp_first list */
612
        ptb = &tb_next->jmp_first;
613
        for(;;) {
614
            tb1 = *ptb;
615
            n1 = (long)tb1 & 3;
616
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
617
            if (n1 == n && tb1 == tb)
618
                break;
619
            ptb = &tb1->jmp_next[n1];
620
        }
621
        *ptb = tb->jmp_next[n];
622
        tb->jmp_next[n] = NULL;
623
        
624
        /* suppress the jump to next tb in generated code */
625
        tb_reset_jump(tb, n);
626

    
627
        /* suppress jumps in the tb on which we could have jump */
628
        tb_reset_jump_recursive(tb_next);
629
    }
630
}
631

    
632
static void tb_reset_jump_recursive(TranslationBlock *tb)
633
{
634
    tb_reset_jump_recursive2(tb, 0);
635
    tb_reset_jump_recursive2(tb, 1);
636
}
637

    
638
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
639
   breakpoint is reached */
640
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
641
{
642
#if defined(TARGET_I386)
643
    int i;
644

    
645
    for(i = 0; i < env->nb_breakpoints; i++) {
646
        if (env->breakpoints[i] == pc)
647
            return 0;
648
    }
649

    
650
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
651
        return -1;
652
    env->breakpoints[env->nb_breakpoints++] = pc;
653
    tb_invalidate_page(pc);
654
    return 0;
655
#else
656
    return -1;
657
#endif
658
}
659

    
660
/* remove a breakpoint */
661
int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
662
{
663
#if defined(TARGET_I386)
664
    int i;
665
    for(i = 0; i < env->nb_breakpoints; i++) {
666
        if (env->breakpoints[i] == pc)
667
            goto found;
668
    }
669
    return -1;
670
 found:
671
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
672
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
673
    env->nb_breakpoints--;
674
    tb_invalidate_page(pc);
675
    return 0;
676
#else
677
    return -1;
678
#endif
679
}
680

    
681
/* enable or disable single step mode. EXCP_DEBUG is returned by the
682
   CPU loop after each instruction */
683
void cpu_single_step(CPUState *env, int enabled)
684
{
685
#if defined(TARGET_I386)
686
    if (env->singlestep_enabled != enabled) {
687
        env->singlestep_enabled = enabled;
688
        /* must flush all the translated code to avoid inconsistancies */
689
        tb_flush();
690
    }
691
#endif
692
}
693

    
694
/* enable or disable low levels log */
695
void cpu_set_log(int log_flags)
696
{
697
    loglevel = log_flags;
698
    if (loglevel && !logfile) {
699
        logfile = fopen(logfilename, "w");
700
        if (!logfile) {
701
            perror(logfilename);
702
            _exit(1);
703
        }
704
        setvbuf(logfile, NULL, _IOLBF, 0);
705
    }
706
}
707

    
708
void cpu_set_log_filename(const char *filename)
709
{
710
    logfilename = strdup(filename);
711
}
712

    
713
/* mask must never be zero */
714
void cpu_interrupt(CPUState *env, int mask)
715
{
716
    TranslationBlock *tb;
717
    
718
    env->interrupt_request |= mask;
719
    /* if the cpu is currently executing code, we must unlink it and
720
       all the potentially executing TB */
721
    tb = env->current_tb;
722
    if (tb) {
723
        tb_reset_jump_recursive(tb);
724
    }
725
}
726

    
727

    
728
void cpu_abort(CPUState *env, const char *fmt, ...)
729
{
730
    va_list ap;
731

    
732
    va_start(ap, fmt);
733
    fprintf(stderr, "qemu: fatal: ");
734
    vfprintf(stderr, fmt, ap);
735
    fprintf(stderr, "\n");
736
#ifdef TARGET_I386
737
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
738
#endif
739
    va_end(ap);
740
    abort();
741
}
742

    
743
#ifdef TARGET_I386
744
/* unmap all maped pages and flush all associated code */
745
void page_unmap(void)
746
{
747
    PageDesc *pmap;
748
    int i;
749

    
750
    for(i = 0; i < L1_SIZE; i++) {
751
        pmap = l1_map[i];
752
        if (pmap) {
753
#if !defined(CONFIG_SOFTMMU)
754
            PageDesc *p;
755
            unsigned long addr;
756
            int j, ret, j1;
757
            
758
            p = pmap;
759
            for(j = 0;j < L2_SIZE;) {
760
                if (p->flags & PAGE_VALID) {
761
                    addr = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
762
                    /* we try to find a range to make less syscalls */
763
                    j1 = j;
764
                    p++;
765
                    j++;
766
                    while (j < L2_SIZE && (p->flags & PAGE_VALID)) {
767
                        p++;
768
                        j++;
769
                    }
770
                    ret = munmap((void *)addr, (j - j1) << TARGET_PAGE_BITS);
771
                    if (ret != 0) {
772
                        fprintf(stderr, "Could not unmap page 0x%08lx\n", addr);
773
                        exit(1);
774
                    }
775
                } else {
776
                    p++;
777
                    j++;
778
                }
779
            }
780
#endif
781
            free(pmap);
782
            l1_map[i] = NULL;
783
        }
784
    }
785
    tb_flush();
786
}
787
#endif
788

    
789
void tlb_flush(CPUState *env)
790
{
791
#if !defined(CONFIG_USER_ONLY)
792
    int i;
793
    for(i = 0; i < CPU_TLB_SIZE; i++) {
794
        env->tlb_read[0][i].address = -1;
795
        env->tlb_write[0][i].address = -1;
796
        env->tlb_read[1][i].address = -1;
797
        env->tlb_write[1][i].address = -1;
798
    }
799
#endif
800
}
801

    
802
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
803
{
804
    if (addr == (tlb_entry->address & 
805
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
806
        tlb_entry->address = -1;
807
}
808

    
809
void tlb_flush_page(CPUState *env, uint32_t addr)
810
{
811
#if !defined(CONFIG_USER_ONLY)
812
    int i;
813

    
814
    addr &= TARGET_PAGE_MASK;
815
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
816
    tlb_flush_entry(&env->tlb_read[0][i], addr);
817
    tlb_flush_entry(&env->tlb_write[0][i], addr);
818
    tlb_flush_entry(&env->tlb_read[1][i], addr);
819
    tlb_flush_entry(&env->tlb_write[1][i], addr);
820
#endif
821
}
822

    
823
/* make all write to page 'addr' trigger a TLB exception to detect
824
   self modifying code */
825
void tlb_flush_page_write(CPUState *env, uint32_t addr)
826
{
827
#if !defined(CONFIG_USER_ONLY)
828
    int i;
829

    
830
    addr &= TARGET_PAGE_MASK;
831
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
832
    tlb_flush_entry(&env->tlb_write[0][i], addr);
833
    tlb_flush_entry(&env->tlb_write[1][i], addr);
834
#endif
835
}
836

    
837
static inline unsigned long *physpage_find_alloc(unsigned int page)
838
{
839
    unsigned long **lp, *p;
840
    unsigned int index, i;
841

    
842
    index = page >> TARGET_PAGE_BITS;
843
    lp = &l1_physmap[index >> L2_BITS];
844
    p = *lp;
845
    if (!p) {
846
        /* allocate if not found */
847
        p = malloc(sizeof(unsigned long) * L2_SIZE);
848
        for(i = 0; i < L2_SIZE; i++)
849
            p[i] = IO_MEM_UNASSIGNED;
850
        *lp = p;
851
    }
852
    return p + (index & (L2_SIZE - 1));
853
}
854

    
855
/* return NULL if no page defined (unused memory) */
856
unsigned long physpage_find(unsigned long page)
857
{
858
    unsigned long *p;
859
    unsigned int index;
860
    index = page >> TARGET_PAGE_BITS;
861
    p = l1_physmap[index >> L2_BITS];
862
    if (!p)
863
        return IO_MEM_UNASSIGNED;
864
    return p[index & (L2_SIZE - 1)];
865
}
866

    
867
/* register physical memory. 'size' must be a multiple of the target
868
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
869
   io memory page */
870
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
871
                                  long phys_offset)
872
{
873
    unsigned long addr, end_addr;
874
    unsigned long *p;
875

    
876
    end_addr = start_addr + size;
877
    for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
878
        p = physpage_find_alloc(addr);
879
        *p = phys_offset;
880
        if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
881
            phys_offset += TARGET_PAGE_SIZE;
882
    }
883
}
884

    
885
static uint32_t unassigned_mem_readb(uint32_t addr)
886
{
887
    return 0;
888
}
889

    
890
static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
891
{
892
}
893

    
894
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
895
    unassigned_mem_readb,
896
    unassigned_mem_readb,
897
    unassigned_mem_readb,
898
};
899

    
900
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
901
    unassigned_mem_writeb,
902
    unassigned_mem_writeb,
903
    unassigned_mem_writeb,
904
};
905

    
906

    
907
static void io_mem_init(void)
908
{
909
    io_mem_nb = 1;
910
    cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
911
}
912

    
913
/* mem_read and mem_write are arrays of functions containing the
914
   function to access byte (index 0), word (index 1) and dword (index
915
   2). All functions must be supplied. If io_index is non zero, the
916
   corresponding io zone is modified. If it is zero, a new io zone is
917
   allocated. The return value can be used with
918
   cpu_register_physical_memory(). (-1) is returned if error. */
919
int cpu_register_io_memory(int io_index,
920
                           CPUReadMemoryFunc **mem_read,
921
                           CPUWriteMemoryFunc **mem_write)
922
{
923
    int i;
924

    
925
    if (io_index <= 0) {
926
        if (io_index >= IO_MEM_NB_ENTRIES)
927
            return -1;
928
        io_index = io_mem_nb++;
929
    } else {
930
        if (io_index >= IO_MEM_NB_ENTRIES)
931
            return -1;
932
    }
933
    
934
    for(i = 0;i < 3; i++) {
935
        io_mem_read[io_index][i] = mem_read[i];
936
        io_mem_write[io_index][i] = mem_write[i];
937
    }
938
    return io_index << IO_MEM_SHIFT;
939
}
940

    
941
#if !defined(CONFIG_USER_ONLY) 
942

    
943
#define MMUSUFFIX _cmmu
944
#define GETPC() NULL
945
#define env cpu_single_env
946

    
947
#define SHIFT 0
948
#include "softmmu_template.h"
949

    
950
#define SHIFT 1
951
#include "softmmu_template.h"
952

    
953
#define SHIFT 2
954
#include "softmmu_template.h"
955

    
956
#define SHIFT 3
957
#include "softmmu_template.h"
958

    
959
#undef env
960

    
961
#endif