Statistics
| Branch: | Revision:

root / exec.c @ facc68be

History | View | Annotate | Download (23.7 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <stdarg.h>
23
#include <string.h>
24
#include <errno.h>
25
#include <unistd.h>
26
#include <inttypes.h>
27
#include <sys/mman.h>
28

    
29
#include "config.h"
30
#ifdef TARGET_I386
31
#include "cpu-i386.h"
32
#endif
33
#ifdef TARGET_ARM
34
#include "cpu-arm.h"
35
#endif
36
#include "exec.h"
37

    
38
//#define DEBUG_TB_INVALIDATE
39
//#define DEBUG_FLUSH
40

    
41
/* make various TB consistency checks */
42
//#define DEBUG_TB_CHECK 
43

    
44
/* threshold to flush the translated code buffer */
45
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46

    
47
#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / 64)
48

    
49
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
51
int nb_tbs;
52
/* any access to the tbs or the page table must use this lock */
53
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
54

    
55
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
56
uint8_t *code_gen_ptr;
57

    
58
/* XXX: pack the flags in the low bits of the pointer ? */
59
typedef struct PageDesc {
60
    unsigned long flags;
61
    TranslationBlock *first_tb;
62
} PageDesc;
63

    
64
#define L2_BITS 10
65
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
66

    
67
#define L1_SIZE (1 << L1_BITS)
68
#define L2_SIZE (1 << L2_BITS)
69

    
70
static void tb_invalidate_page(unsigned long address);
71
static void io_mem_init(void);
72

    
73
unsigned long real_host_page_size;
74
unsigned long host_page_bits;
75
unsigned long host_page_size;
76
unsigned long host_page_mask;
77

    
78
static PageDesc *l1_map[L1_SIZE];
79

    
80
/* io memory support */
81
static unsigned long *l1_physmap[L1_SIZE];
82
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
83
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
84
static int io_mem_nb;
85

    
86
static void page_init(void)
87
{
88
    /* NOTE: we can always suppose that host_page_size >=
89
       TARGET_PAGE_SIZE */
90
    real_host_page_size = getpagesize();
91
    if (host_page_size == 0)
92
        host_page_size = real_host_page_size;
93
    if (host_page_size < TARGET_PAGE_SIZE)
94
        host_page_size = TARGET_PAGE_SIZE;
95
    host_page_bits = 0;
96
    while ((1 << host_page_bits) < host_page_size)
97
        host_page_bits++;
98
    host_page_mask = ~(host_page_size - 1);
99
}
100

    
101
/* dump memory mappings */
102
void page_dump(FILE *f)
103
{
104
    unsigned long start, end;
105
    int i, j, prot, prot1;
106
    PageDesc *p;
107

    
108
    fprintf(f, "%-8s %-8s %-8s %s\n",
109
            "start", "end", "size", "prot");
110
    start = -1;
111
    end = -1;
112
    prot = 0;
113
    for(i = 0; i <= L1_SIZE; i++) {
114
        if (i < L1_SIZE)
115
            p = l1_map[i];
116
        else
117
            p = NULL;
118
        for(j = 0;j < L2_SIZE; j++) {
119
            if (!p)
120
                prot1 = 0;
121
            else
122
                prot1 = p[j].flags;
123
            if (prot1 != prot) {
124
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
125
                if (start != -1) {
126
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
127
                            start, end, end - start, 
128
                            prot & PAGE_READ ? 'r' : '-',
129
                            prot & PAGE_WRITE ? 'w' : '-',
130
                            prot & PAGE_EXEC ? 'x' : '-');
131
                }
132
                if (prot1 != 0)
133
                    start = end;
134
                else
135
                    start = -1;
136
                prot = prot1;
137
            }
138
            if (!p)
139
                break;
140
        }
141
    }
142
}
143

    
144
static inline PageDesc *page_find_alloc(unsigned int index)
145
{
146
    PageDesc **lp, *p;
147

    
148
    lp = &l1_map[index >> L2_BITS];
149
    p = *lp;
150
    if (!p) {
151
        /* allocate if not found */
152
        p = malloc(sizeof(PageDesc) * L2_SIZE);
153
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
154
        *lp = p;
155
    }
156
    return p + (index & (L2_SIZE - 1));
157
}
158

    
159
static inline PageDesc *page_find(unsigned int index)
160
{
161
    PageDesc *p;
162

    
163
    p = l1_map[index >> L2_BITS];
164
    if (!p)
165
        return 0;
166
    return p + (index & (L2_SIZE - 1));
167
}
168

    
169
int page_get_flags(unsigned long address)
170
{
171
    PageDesc *p;
172

    
173
    p = page_find(address >> TARGET_PAGE_BITS);
174
    if (!p)
175
        return 0;
176
    return p->flags;
177
}
178

    
179
/* modify the flags of a page and invalidate the code if
180
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
181
   depending on PAGE_WRITE */
182
void page_set_flags(unsigned long start, unsigned long end, int flags)
183
{
184
    PageDesc *p;
185
    unsigned long addr;
186

    
187
    start = start & TARGET_PAGE_MASK;
188
    end = TARGET_PAGE_ALIGN(end);
189
    if (flags & PAGE_WRITE)
190
        flags |= PAGE_WRITE_ORG;
191
    spin_lock(&tb_lock);
192
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
193
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
194
        /* if the write protection is set, then we invalidate the code
195
           inside */
196
        if (!(p->flags & PAGE_WRITE) && 
197
            (flags & PAGE_WRITE) &&
198
            p->first_tb) {
199
            tb_invalidate_page(addr);
200
        }
201
        p->flags = flags;
202
    }
203
    spin_unlock(&tb_lock);
204
}
205

    
206
void cpu_exec_init(void)
207
{
208
    if (!code_gen_ptr) {
209
        code_gen_ptr = code_gen_buffer;
210
        page_init();
211
        io_mem_init();
212
    }
213
}
214

    
215
/* set to NULL all the 'first_tb' fields in all PageDescs */
216
static void page_flush_tb(void)
217
{
218
    int i, j;
219
    PageDesc *p;
220

    
221
    for(i = 0; i < L1_SIZE; i++) {
222
        p = l1_map[i];
223
        if (p) {
224
            for(j = 0; j < L2_SIZE; j++)
225
                p[j].first_tb = NULL;
226
        }
227
    }
228
}
229

    
230
/* flush all the translation blocks */
231
/* XXX: tb_flush is currently not thread safe */
232
void tb_flush(void)
233
{
234
    int i;
235
#ifdef DEBUG_FLUSH
236
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
237
           code_gen_ptr - code_gen_buffer, 
238
           nb_tbs, 
239
           (code_gen_ptr - code_gen_buffer) / nb_tbs);
240
#endif
241
    nb_tbs = 0;
242
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
243
        tb_hash[i] = NULL;
244
    page_flush_tb();
245
    code_gen_ptr = code_gen_buffer;
246
    /* XXX: flush processor icache at this point if cache flush is
247
       expensive */
248
}
249

    
250
#ifdef DEBUG_TB_CHECK
251

    
252
static void tb_invalidate_check(unsigned long address)
253
{
254
    TranslationBlock *tb;
255
    int i;
256
    address &= TARGET_PAGE_MASK;
257
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
258
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
259
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
260
                  address >= tb->pc + tb->size)) {
261
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
262
                       address, tb->pc, tb->size);
263
            }
264
        }
265
    }
266
}
267

    
268
/* verify that all the pages have correct rights for code */
269
static void tb_page_check(void)
270
{
271
    TranslationBlock *tb;
272
    int i, flags1, flags2;
273
    
274
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
275
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
276
            flags1 = page_get_flags(tb->pc);
277
            flags2 = page_get_flags(tb->pc + tb->size - 1);
278
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
279
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
280
                       tb->pc, tb->size, flags1, flags2);
281
            }
282
        }
283
    }
284
}
285

    
286
void tb_jmp_check(TranslationBlock *tb)
287
{
288
    TranslationBlock *tb1;
289
    unsigned int n1;
290

    
291
    /* suppress any remaining jumps to this TB */
292
    tb1 = tb->jmp_first;
293
    for(;;) {
294
        n1 = (long)tb1 & 3;
295
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
296
        if (n1 == 2)
297
            break;
298
        tb1 = tb1->jmp_next[n1];
299
    }
300
    /* check end of list */
301
    if (tb1 != tb) {
302
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
303
    }
304
}
305

    
306
#endif
307

    
308
/* invalidate one TB */
309
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
310
                             int next_offset)
311
{
312
    TranslationBlock *tb1;
313
    for(;;) {
314
        tb1 = *ptb;
315
        if (tb1 == tb) {
316
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
317
            break;
318
        }
319
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
320
    }
321
}
322

    
323
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
324
{
325
    TranslationBlock *tb1, **ptb;
326
    unsigned int n1;
327

    
328
    ptb = &tb->jmp_next[n];
329
    tb1 = *ptb;
330
    if (tb1) {
331
        /* find tb(n) in circular list */
332
        for(;;) {
333
            tb1 = *ptb;
334
            n1 = (long)tb1 & 3;
335
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
336
            if (n1 == n && tb1 == tb)
337
                break;
338
            if (n1 == 2) {
339
                ptb = &tb1->jmp_first;
340
            } else {
341
                ptb = &tb1->jmp_next[n1];
342
            }
343
        }
344
        /* now we can suppress tb(n) from the list */
345
        *ptb = tb->jmp_next[n];
346

    
347
        tb->jmp_next[n] = NULL;
348
    }
349
}
350

    
351
/* reset the jump entry 'n' of a TB so that it is not chained to
352
   another TB */
353
static inline void tb_reset_jump(TranslationBlock *tb, int n)
354
{
355
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
356
}
357

    
358
static inline void tb_invalidate(TranslationBlock *tb, int parity)
359
{
360
    PageDesc *p;
361
    unsigned int page_index1, page_index2;
362
    unsigned int h, n1;
363
    TranslationBlock *tb1, *tb2;
364
    
365
    /* remove the TB from the hash list */
366
    h = tb_hash_func(tb->pc);
367
    tb_remove(&tb_hash[h], tb, 
368
              offsetof(TranslationBlock, hash_next));
369
    /* remove the TB from the page list */
370
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
371
    if ((page_index1 & 1) == parity) {
372
        p = page_find(page_index1);
373
        tb_remove(&p->first_tb, tb, 
374
                  offsetof(TranslationBlock, page_next[page_index1 & 1]));
375
    }
376
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
377
    if ((page_index2 & 1) == parity) {
378
        p = page_find(page_index2);
379
        tb_remove(&p->first_tb, tb, 
380
                  offsetof(TranslationBlock, page_next[page_index2 & 1]));
381
    }
382

    
383
    /* suppress this TB from the two jump lists */
384
    tb_jmp_remove(tb, 0);
385
    tb_jmp_remove(tb, 1);
386

    
387
    /* suppress any remaining jumps to this TB */
388
    tb1 = tb->jmp_first;
389
    for(;;) {
390
        n1 = (long)tb1 & 3;
391
        if (n1 == 2)
392
            break;
393
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
394
        tb2 = tb1->jmp_next[n1];
395
        tb_reset_jump(tb1, n1);
396
        tb1->jmp_next[n1] = NULL;
397
        tb1 = tb2;
398
    }
399
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
400
}
401

    
402
/* invalidate all TBs which intersect with the target page starting at addr */
403
static void tb_invalidate_page(unsigned long address)
404
{
405
    TranslationBlock *tb_next, *tb;
406
    unsigned int page_index;
407
    int parity1, parity2;
408
    PageDesc *p;
409
#ifdef DEBUG_TB_INVALIDATE
410
    printf("tb_invalidate_page: %lx\n", address);
411
#endif
412

    
413
    page_index = address >> TARGET_PAGE_BITS;
414
    p = page_find(page_index);
415
    if (!p)
416
        return;
417
    tb = p->first_tb;
418
    parity1 = page_index & 1;
419
    parity2 = parity1 ^ 1;
420
    while (tb != NULL) {
421
        tb_next = tb->page_next[parity1];
422
        tb_invalidate(tb, parity2);
423
        tb = tb_next;
424
    }
425
    p->first_tb = NULL;
426
}
427

    
428
/* add the tb in the target page and protect it if necessary */
429
static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
430
{
431
    PageDesc *p;
432
    unsigned long host_start, host_end, addr, page_addr;
433
    int prot;
434

    
435
    p = page_find_alloc(page_index);
436
    tb->page_next[page_index & 1] = p->first_tb;
437
    p->first_tb = tb;
438
    if (p->flags & PAGE_WRITE) {
439
        /* force the host page as non writable (writes will have a
440
           page fault + mprotect overhead) */
441
        page_addr = (page_index << TARGET_PAGE_BITS);
442
        host_start = page_addr & host_page_mask;
443
        host_end = host_start + host_page_size;
444
        prot = 0;
445
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
446
            prot |= page_get_flags(addr);
447
        mprotect((void *)host_start, host_page_size, 
448
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
449
#ifdef DEBUG_TB_INVALIDATE
450
        printf("protecting code page: 0x%08lx\n", 
451
               host_start);
452
#endif
453
        p->flags &= ~PAGE_WRITE;
454
#ifdef DEBUG_TB_CHECK
455
        tb_page_check();
456
#endif
457
    }
458
}
459

    
460
/* Allocate a new translation block. Flush the translation buffer if
461
   too many translation blocks or too much generated code. */
462
TranslationBlock *tb_alloc(unsigned long pc)
463
{
464
    TranslationBlock *tb;
465

    
466
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
467
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
468
        return NULL;
469
    tb = &tbs[nb_tbs++];
470
    tb->pc = pc;
471
    return tb;
472
}
473

    
474
/* link the tb with the other TBs */
475
void tb_link(TranslationBlock *tb)
476
{
477
    unsigned int page_index1, page_index2;
478

    
479
    /* add in the page list */
480
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
481
    tb_alloc_page(tb, page_index1);
482
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
483
    if (page_index2 != page_index1) {
484
        tb_alloc_page(tb, page_index2);
485
    }
486
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
487
    tb->jmp_next[0] = NULL;
488
    tb->jmp_next[1] = NULL;
489

    
490
    /* init original jump addresses */
491
    if (tb->tb_next_offset[0] != 0xffff)
492
        tb_reset_jump(tb, 0);
493
    if (tb->tb_next_offset[1] != 0xffff)
494
        tb_reset_jump(tb, 1);
495
}
496

    
497
/* called from signal handler: invalidate the code and unprotect the
498
   page. Return TRUE if the fault was succesfully handled. */
499
int page_unprotect(unsigned long address)
500
{
501
    unsigned int page_index, prot, pindex;
502
    PageDesc *p, *p1;
503
    unsigned long host_start, host_end, addr;
504

    
505
    host_start = address & host_page_mask;
506
    page_index = host_start >> TARGET_PAGE_BITS;
507
    p1 = page_find(page_index);
508
    if (!p1)
509
        return 0;
510
    host_end = host_start + host_page_size;
511
    p = p1;
512
    prot = 0;
513
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
514
        prot |= p->flags;
515
        p++;
516
    }
517
    /* if the page was really writable, then we change its
518
       protection back to writable */
519
    if (prot & PAGE_WRITE_ORG) {
520
        mprotect((void *)host_start, host_page_size, 
521
                 (prot & PAGE_BITS) | PAGE_WRITE);
522
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
523
        p1[pindex].flags |= PAGE_WRITE;
524
        /* and since the content will be modified, we must invalidate
525
           the corresponding translated code. */
526
        tb_invalidate_page(address);
527
#ifdef DEBUG_TB_CHECK
528
        tb_invalidate_check(address);
529
#endif
530
        return 1;
531
    } else {
532
        return 0;
533
    }
534
}
535

    
536
/* call this function when system calls directly modify a memory area */
537
void page_unprotect_range(uint8_t *data, unsigned long data_size)
538
{
539
    unsigned long start, end, addr;
540

    
541
    start = (unsigned long)data;
542
    end = start + data_size;
543
    start &= TARGET_PAGE_MASK;
544
    end = TARGET_PAGE_ALIGN(end);
545
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
546
        page_unprotect(addr);
547
    }
548
}
549

    
550
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
551
   tb[1].tc_ptr. Return NULL if not found */
552
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
553
{
554
    int m_min, m_max, m;
555
    unsigned long v;
556
    TranslationBlock *tb;
557

    
558
    if (nb_tbs <= 0)
559
        return NULL;
560
    if (tc_ptr < (unsigned long)code_gen_buffer ||
561
        tc_ptr >= (unsigned long)code_gen_ptr)
562
        return NULL;
563
    /* binary search (cf Knuth) */
564
    m_min = 0;
565
    m_max = nb_tbs - 1;
566
    while (m_min <= m_max) {
567
        m = (m_min + m_max) >> 1;
568
        tb = &tbs[m];
569
        v = (unsigned long)tb->tc_ptr;
570
        if (v == tc_ptr)
571
            return tb;
572
        else if (tc_ptr < v) {
573
            m_max = m - 1;
574
        } else {
575
            m_min = m + 1;
576
        }
577
    } 
578
    return &tbs[m_max];
579
}
580

    
581
static void tb_reset_jump_recursive(TranslationBlock *tb);
582

    
583
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
584
{
585
    TranslationBlock *tb1, *tb_next, **ptb;
586
    unsigned int n1;
587

    
588
    tb1 = tb->jmp_next[n];
589
    if (tb1 != NULL) {
590
        /* find head of list */
591
        for(;;) {
592
            n1 = (long)tb1 & 3;
593
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
594
            if (n1 == 2)
595
                break;
596
            tb1 = tb1->jmp_next[n1];
597
        }
598
        /* we are now sure now that tb jumps to tb1 */
599
        tb_next = tb1;
600

    
601
        /* remove tb from the jmp_first list */
602
        ptb = &tb_next->jmp_first;
603
        for(;;) {
604
            tb1 = *ptb;
605
            n1 = (long)tb1 & 3;
606
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
607
            if (n1 == n && tb1 == tb)
608
                break;
609
            ptb = &tb1->jmp_next[n1];
610
        }
611
        *ptb = tb->jmp_next[n];
612
        tb->jmp_next[n] = NULL;
613
        
614
        /* suppress the jump to next tb in generated code */
615
        tb_reset_jump(tb, n);
616

    
617
        /* suppress jumps in the tb on which we could have jump */
618
        tb_reset_jump_recursive(tb_next);
619
    }
620
}
621

    
622
static void tb_reset_jump_recursive(TranslationBlock *tb)
623
{
624
    tb_reset_jump_recursive2(tb, 0);
625
    tb_reset_jump_recursive2(tb, 1);
626
}
627

    
628
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
629
   breakpoint is reached */
630
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
631
{
632
#if defined(TARGET_I386)
633
    int i;
634

    
635
    for(i = 0; i < env->nb_breakpoints; i++) {
636
        if (env->breakpoints[i] == pc)
637
            return 0;
638
    }
639

    
640
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
641
        return -1;
642
    env->breakpoints[env->nb_breakpoints++] = pc;
643
    tb_invalidate_page(pc);
644
    return 0;
645
#else
646
    return -1;
647
#endif
648
}
649

    
650
/* remove a breakpoint */
651
int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
652
{
653
#if defined(TARGET_I386)
654
    int i;
655
    for(i = 0; i < env->nb_breakpoints; i++) {
656
        if (env->breakpoints[i] == pc)
657
            goto found;
658
    }
659
    return -1;
660
 found:
661
    memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
662
            (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
663
    env->nb_breakpoints--;
664
    tb_invalidate_page(pc);
665
    return 0;
666
#else
667
    return -1;
668
#endif
669
}
670

    
671
/* enable or disable single step mode. EXCP_DEBUG is returned by the
672
   CPU loop after each instruction */
673
void cpu_single_step(CPUState *env, int enabled)
674
{
675
#if defined(TARGET_I386)
676
    if (env->singlestep_enabled != enabled) {
677
        env->singlestep_enabled = enabled;
678
        /* must flush all the translated code to avoid inconsistancies */
679
        tb_flush();
680
    }
681
#endif
682
}
683

    
684

    
685
/* mask must never be zero */
686
void cpu_interrupt(CPUState *env, int mask)
687
{
688
    TranslationBlock *tb;
689
    
690
    env->interrupt_request |= mask;
691
    /* if the cpu is currently executing code, we must unlink it and
692
       all the potentially executing TB */
693
    tb = env->current_tb;
694
    if (tb) {
695
        tb_reset_jump_recursive(tb);
696
    }
697
}
698

    
699

    
700
void cpu_abort(CPUState *env, const char *fmt, ...)
701
{
702
    va_list ap;
703

    
704
    va_start(ap, fmt);
705
    fprintf(stderr, "qemu: fatal: ");
706
    vfprintf(stderr, fmt, ap);
707
    fprintf(stderr, "\n");
708
#ifdef TARGET_I386
709
    cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
710
#endif
711
    va_end(ap);
712
    abort();
713
}
714

    
715
#ifdef TARGET_I386
716
/* unmap all maped pages and flush all associated code */
717
void page_unmap(void)
718
{
719
    PageDesc *p, *pmap;
720
    unsigned long addr;
721
    int i, j, ret, j1;
722

    
723
    for(i = 0; i < L1_SIZE; i++) {
724
        pmap = l1_map[i];
725
        if (pmap) {
726
            p = pmap;
727
            for(j = 0;j < L2_SIZE;) {
728
                if (p->flags & PAGE_VALID) {
729
                    addr = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
730
                    /* we try to find a range to make less syscalls */
731
                    j1 = j;
732
                    p++;
733
                    j++;
734
                    while (j < L2_SIZE && (p->flags & PAGE_VALID)) {
735
                        p++;
736
                        j++;
737
                    }
738
                    ret = munmap((void *)addr, (j - j1) << TARGET_PAGE_BITS);
739
                    if (ret != 0) {
740
                        fprintf(stderr, "Could not unmap page 0x%08lx\n", addr);
741
                        exit(1);
742
                    }
743
                } else {
744
                    p++;
745
                    j++;
746
                }
747
            }
748
            free(pmap);
749
            l1_map[i] = NULL;
750
        }
751
    }
752
    tb_flush();
753
}
754
#endif
755

    
756
void tlb_flush(CPUState *env)
757
{
758
#if defined(TARGET_I386)
759
    int i;
760
    for(i = 0; i < CPU_TLB_SIZE; i++) {
761
        env->tlb_read[0][i].address = -1;
762
        env->tlb_write[0][i].address = -1;
763
        env->tlb_read[1][i].address = -1;
764
        env->tlb_write[1][i].address = -1;
765
    }
766
#endif
767
}
768

    
769
void tlb_flush_page(CPUState *env, uint32_t addr)
770
{
771
#if defined(TARGET_I386)
772
    int i;
773

    
774
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
775
    env->tlb_read[0][i].address = -1;
776
    env->tlb_write[0][i].address = -1;
777
    env->tlb_read[1][i].address = -1;
778
    env->tlb_write[1][i].address = -1;
779
#endif
780
}
781

    
782
static inline unsigned long *physpage_find_alloc(unsigned int page)
783
{
784
    unsigned long **lp, *p;
785
    unsigned int index, i;
786

    
787
    index = page >> TARGET_PAGE_BITS;
788
    lp = &l1_physmap[index >> L2_BITS];
789
    p = *lp;
790
    if (!p) {
791
        /* allocate if not found */
792
        p = malloc(sizeof(unsigned long) * L2_SIZE);
793
        for(i = 0; i < L2_SIZE; i++)
794
            p[i] = IO_MEM_UNASSIGNED;
795
        *lp = p;
796
    }
797
    return p + (index & (L2_SIZE - 1));
798
}
799

    
800
/* return NULL if no page defined (unused memory) */
801
unsigned long physpage_find(unsigned long page)
802
{
803
    unsigned long *p;
804
    unsigned int index;
805
    index = page >> TARGET_PAGE_BITS;
806
    p = l1_physmap[index >> L2_BITS];
807
    if (!p)
808
        return IO_MEM_UNASSIGNED;
809
    return p[index & (L2_SIZE - 1)];
810
}
811

    
812
/* register physical memory. 'size' must be a multiple of the target
813
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
814
   io memory page */
815
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
816
                                  long phys_offset)
817
{
818
    unsigned long addr, end_addr;
819
    unsigned long *p;
820

    
821
    end_addr = start_addr + size;
822
    for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
823
        p = physpage_find_alloc(addr);
824
        *p = phys_offset;
825
        if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
826
            phys_offset += TARGET_PAGE_SIZE;
827
    }
828
}
829

    
830
static uint32_t unassigned_mem_readb(uint32_t addr)
831
{
832
    return 0;
833
}
834

    
835
static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
836
{
837
}
838

    
839
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
840
    unassigned_mem_readb,
841
    unassigned_mem_readb,
842
    unassigned_mem_readb,
843
};
844

    
845
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
846
    unassigned_mem_writeb,
847
    unassigned_mem_writeb,
848
    unassigned_mem_writeb,
849
};
850

    
851

    
852
static void io_mem_init(void)
853
{
854
    io_mem_nb = 1;
855
    cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
856
}
857

    
858
/* mem_read and mem_write are arrays of functions containing the
859
   function to access byte (index 0), word (index 1) and dword (index
860
   2). All functions must be supplied. If io_index is non zero, the
861
   corresponding io zone is modified. If it is zero, a new io zone is
862
   allocated. The return value can be used with
863
   cpu_register_physical_memory(). (-1) is returned if error. */
864
int cpu_register_io_memory(int io_index,
865
                           CPUReadMemoryFunc **mem_read,
866
                           CPUWriteMemoryFunc **mem_write)
867
{
868
    int i;
869

    
870
    if (io_index <= 0) {
871
        if (io_index >= IO_MEM_NB_ENTRIES)
872
            return -1;
873
        io_index = io_mem_nb++;
874
    } else {
875
        if (io_index >= IO_MEM_NB_ENTRIES)
876
            return -1;
877
    }
878
    
879
    for(i = 0;i < 3; i++) {
880
        io_mem_read[io_index][i] = mem_read[i];
881
        io_mem_write[io_index][i] = mem_write[i];
882
    }
883
    return io_index << IO_MEM_SHIFT;
884
}