Statistics
| Branch: | Revision:

root / exec.c @ e3b32540

History | View | Annotate | Download (14.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <stdarg.h>
23
#include <string.h>
24
#include <errno.h>
25
#include <unistd.h>
26
#include <inttypes.h>
27
#include <sys/mman.h>
28

    
29
#include "cpu-i386.h"
30
#include "exec.h"
31

    
32
//#define DEBUG_TB_INVALIDATE
33
#define DEBUG_FLUSH
34

    
35
/* make various TB consistency checks */
36
//#define DEBUG_TB_CHECK 
37

    
38
/* threshold to flush the translated code buffer */
39
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
40

    
41
#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / 64)
42

    
43
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
44
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
45
int nb_tbs;
46
/* any access to the tbs or the page table must use this lock */
47
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
48

    
49
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
50
uint8_t *code_gen_ptr;
51

    
52
/* XXX: pack the flags in the low bits of the pointer ? */
53
typedef struct PageDesc {
54
    unsigned long flags;
55
    TranslationBlock *first_tb;
56
} PageDesc;
57

    
58
#define L2_BITS 10
59
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
60

    
61
#define L1_SIZE (1 << L1_BITS)
62
#define L2_SIZE (1 << L2_BITS)
63

    
64
static void tb_invalidate_page(unsigned long address);
65

    
66
unsigned long real_host_page_size;
67
unsigned long host_page_bits;
68
unsigned long host_page_size;
69
unsigned long host_page_mask;
70

    
71
static PageDesc *l1_map[L1_SIZE];
72

    
73
void page_init(void)
74
{
75
    /* NOTE: we can always suppose that host_page_size >=
76
       TARGET_PAGE_SIZE */
77
    real_host_page_size = getpagesize();
78
    if (host_page_size == 0)
79
        host_page_size = real_host_page_size;
80
    if (host_page_size < TARGET_PAGE_SIZE)
81
        host_page_size = TARGET_PAGE_SIZE;
82
    host_page_bits = 0;
83
    while ((1 << host_page_bits) < host_page_size)
84
        host_page_bits++;
85
    host_page_mask = ~(host_page_size - 1);
86
}
87

    
88
/* dump memory mappings */
89
void page_dump(FILE *f)
90
{
91
    unsigned long start, end;
92
    int i, j, prot, prot1;
93
    PageDesc *p;
94

    
95
    fprintf(f, "%-8s %-8s %-8s %s\n",
96
            "start", "end", "size", "prot");
97
    start = -1;
98
    end = -1;
99
    prot = 0;
100
    for(i = 0; i <= L1_SIZE; i++) {
101
        if (i < L1_SIZE)
102
            p = l1_map[i];
103
        else
104
            p = NULL;
105
        for(j = 0;j < L2_SIZE; j++) {
106
            if (!p)
107
                prot1 = 0;
108
            else
109
                prot1 = p[j].flags;
110
            if (prot1 != prot) {
111
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
112
                if (start != -1) {
113
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
114
                            start, end, end - start, 
115
                            prot & PAGE_READ ? 'r' : '-',
116
                            prot & PAGE_WRITE ? 'w' : '-',
117
                            prot & PAGE_EXEC ? 'x' : '-');
118
                }
119
                if (prot1 != 0)
120
                    start = end;
121
                else
122
                    start = -1;
123
                prot = prot1;
124
            }
125
            if (!p)
126
                break;
127
        }
128
    }
129
}
130

    
131
static inline PageDesc *page_find_alloc(unsigned int index)
132
{
133
    PageDesc **lp, *p;
134

    
135
    lp = &l1_map[index >> L2_BITS];
136
    p = *lp;
137
    if (!p) {
138
        /* allocate if not found */
139
        p = malloc(sizeof(PageDesc) * L2_SIZE);
140
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
141
        *lp = p;
142
    }
143
    return p + (index & (L2_SIZE - 1));
144
}
145

    
146
static inline PageDesc *page_find(unsigned int index)
147
{
148
    PageDesc *p;
149

    
150
    p = l1_map[index >> L2_BITS];
151
    if (!p)
152
        return 0;
153
    return p + (index & (L2_SIZE - 1));
154
}
155

    
156
int page_get_flags(unsigned long address)
157
{
158
    PageDesc *p;
159

    
160
    p = page_find(address >> TARGET_PAGE_BITS);
161
    if (!p)
162
        return 0;
163
    return p->flags;
164
}
165

    
166
/* modify the flags of a page and invalidate the code if
167
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
168
   depending on PAGE_WRITE */
169
void page_set_flags(unsigned long start, unsigned long end, int flags)
170
{
171
    PageDesc *p;
172
    unsigned long addr;
173

    
174
    start = start & TARGET_PAGE_MASK;
175
    end = TARGET_PAGE_ALIGN(end);
176
    if (flags & PAGE_WRITE)
177
        flags |= PAGE_WRITE_ORG;
178
    spin_lock(&tb_lock);
179
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
180
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
181
        /* if the write protection is set, then we invalidate the code
182
           inside */
183
        if (!(p->flags & PAGE_WRITE) && 
184
            (flags & PAGE_WRITE) &&
185
            p->first_tb) {
186
            tb_invalidate_page(addr);
187
        }
188
        p->flags = flags;
189
    }
190
    spin_unlock(&tb_lock);
191
}
192

    
193
void cpu_x86_tblocks_init(void)
194
{
195
    if (!code_gen_ptr) {
196
        code_gen_ptr = code_gen_buffer;
197
    }
198
}
199

    
200
/* set to NULL all the 'first_tb' fields in all PageDescs */
201
static void page_flush_tb(void)
202
{
203
    int i, j;
204
    PageDesc *p;
205

    
206
    for(i = 0; i < L1_SIZE; i++) {
207
        p = l1_map[i];
208
        if (p) {
209
            for(j = 0; j < L2_SIZE; j++)
210
                p[j].first_tb = NULL;
211
        }
212
    }
213
}
214

    
215
/* flush all the translation blocks */
216
/* XXX: tb_flush is currently not thread safe */
217
void tb_flush(void)
218
{
219
    int i;
220
#ifdef DEBUG_FLUSH
221
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
222
           code_gen_ptr - code_gen_buffer, 
223
           nb_tbs, 
224
           (code_gen_ptr - code_gen_buffer) / nb_tbs);
225
#endif
226
    nb_tbs = 0;
227
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
228
        tb_hash[i] = NULL;
229
    page_flush_tb();
230
    code_gen_ptr = code_gen_buffer;
231
    /* XXX: flush processor icache at this point if cache flush is
232
       expensive */
233
}
234

    
235
#ifdef DEBUG_TB_CHECK
236

    
237
static void tb_invalidate_check(unsigned long address)
238
{
239
    TranslationBlock *tb;
240
    int i;
241
    address &= TARGET_PAGE_MASK;
242
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
243
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
244
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
245
                  address >= tb->pc + tb->size)) {
246
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
247
                       address, tb->pc, tb->size);
248
            }
249
        }
250
    }
251
}
252

    
253
/* verify that all the pages have correct rights for code */
254
static void tb_page_check(void)
255
{
256
    TranslationBlock *tb;
257
    int i, flags1, flags2;
258
    
259
    for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
260
        for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
261
            flags1 = page_get_flags(tb->pc);
262
            flags2 = page_get_flags(tb->pc + tb->size - 1);
263
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
264
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
265
                       tb->pc, tb->size, flags1, flags2);
266
            }
267
        }
268
    }
269
}
270

    
271
void tb_jmp_check(TranslationBlock *tb)
272
{
273
    TranslationBlock *tb1;
274
    unsigned int n1;
275

    
276
    /* suppress any remaining jumps to this TB */
277
    tb1 = tb->jmp_first;
278
    for(;;) {
279
        n1 = (long)tb1 & 3;
280
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
281
        if (n1 == 2)
282
            break;
283
        tb1 = tb1->jmp_next[n1];
284
    }
285
    /* check end of list */
286
    if (tb1 != tb) {
287
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
288
    }
289
}
290

    
291
#endif
292

    
293
/* invalidate one TB */
294
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
295
                             int next_offset)
296
{
297
    TranslationBlock *tb1;
298
    for(;;) {
299
        tb1 = *ptb;
300
        if (tb1 == tb) {
301
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
302
            break;
303
        }
304
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
305
    }
306
}
307

    
308
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
309
{
310
    TranslationBlock *tb1, **ptb;
311
    unsigned int n1;
312

    
313
    ptb = &tb->jmp_next[n];
314
    tb1 = *ptb;
315
    if (tb1) {
316
        /* find tb(n) in circular list */
317
        for(;;) {
318
            tb1 = *ptb;
319
            n1 = (long)tb1 & 3;
320
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
321
            if (n1 == n && tb1 == tb)
322
                break;
323
            if (n1 == 2) {
324
                ptb = &tb1->jmp_first;
325
            } else {
326
                ptb = &tb1->jmp_next[n1];
327
            }
328
        }
329
        /* now we can suppress tb(n) from the list */
330
        *ptb = tb->jmp_next[n];
331

    
332
        tb->jmp_next[n] = NULL;
333
    }
334
}
335

    
336
/* reset the jump entry 'n' of a TB so that it is not chained to
337
   another TB */
338
static inline void tb_reset_jump(TranslationBlock *tb, int n)
339
{
340
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
341
}
342

    
343
static inline void tb_invalidate(TranslationBlock *tb, int parity)
344
{
345
    PageDesc *p;
346
    unsigned int page_index1, page_index2;
347
    unsigned int h, n1;
348
    TranslationBlock *tb1, *tb2;
349
    
350
    /* remove the TB from the hash list */
351
    h = tb_hash_func(tb->pc);
352
    tb_remove(&tb_hash[h], tb, 
353
              offsetof(TranslationBlock, hash_next));
354
    /* remove the TB from the page list */
355
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
356
    if ((page_index1 & 1) == parity) {
357
        p = page_find(page_index1);
358
        tb_remove(&p->first_tb, tb, 
359
                  offsetof(TranslationBlock, page_next[page_index1 & 1]));
360
    }
361
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
362
    if ((page_index2 & 1) == parity) {
363
        p = page_find(page_index2);
364
        tb_remove(&p->first_tb, tb, 
365
                  offsetof(TranslationBlock, page_next[page_index2 & 1]));
366
    }
367

    
368
    /* suppress this TB from the two jump lists */
369
    tb_jmp_remove(tb, 0);
370
    tb_jmp_remove(tb, 1);
371

    
372
    /* suppress any remaining jumps to this TB */
373
    tb1 = tb->jmp_first;
374
    for(;;) {
375
        n1 = (long)tb1 & 3;
376
        if (n1 == 2)
377
            break;
378
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
379
        tb2 = tb1->jmp_next[n1];
380
        tb_reset_jump(tb1, n1);
381
        tb1->jmp_next[n1] = NULL;
382
        tb1 = tb2;
383
    }
384
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
385
}
386

    
387
/* invalidate all TBs which intersect with the target page starting at addr */
388
static void tb_invalidate_page(unsigned long address)
389
{
390
    TranslationBlock *tb_next, *tb;
391
    unsigned int page_index;
392
    int parity1, parity2;
393
    PageDesc *p;
394
#ifdef DEBUG_TB_INVALIDATE
395
    printf("tb_invalidate_page: %lx\n", address);
396
#endif
397

    
398
    page_index = address >> TARGET_PAGE_BITS;
399
    p = page_find(page_index);
400
    if (!p)
401
        return;
402
    tb = p->first_tb;
403
    parity1 = page_index & 1;
404
    parity2 = parity1 ^ 1;
405
    while (tb != NULL) {
406
        tb_next = tb->page_next[parity1];
407
        tb_invalidate(tb, parity2);
408
        tb = tb_next;
409
    }
410
    p->first_tb = NULL;
411
}
412

    
413
/* add the tb in the target page and protect it if necessary */
414
static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
415
{
416
    PageDesc *p;
417
    unsigned long host_start, host_end, addr, page_addr;
418
    int prot;
419

    
420
    p = page_find_alloc(page_index);
421
    tb->page_next[page_index & 1] = p->first_tb;
422
    p->first_tb = tb;
423
    if (p->flags & PAGE_WRITE) {
424
        /* force the host page as non writable (writes will have a
425
           page fault + mprotect overhead) */
426
        page_addr = (page_index << TARGET_PAGE_BITS);
427
        host_start = page_addr & host_page_mask;
428
        host_end = host_start + host_page_size;
429
        prot = 0;
430
        for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
431
            prot |= page_get_flags(addr);
432
        mprotect((void *)host_start, host_page_size, 
433
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
434
#ifdef DEBUG_TB_INVALIDATE
435
        printf("protecting code page: 0x%08lx\n", 
436
               host_start);
437
#endif
438
        p->flags &= ~PAGE_WRITE;
439
#ifdef DEBUG_TB_CHECK
440
        tb_page_check();
441
#endif
442
    }
443
}
444

    
445
/* Allocate a new translation block. Flush the translation buffer if
446
   too many translation blocks or too much generated code. */
447
TranslationBlock *tb_alloc(unsigned long pc)
448
{
449
    TranslationBlock *tb;
450

    
451
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
452
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
453
        return NULL;
454
    tb = &tbs[nb_tbs++];
455
    tb->pc = pc;
456
    return tb;
457
}
458

    
459
/* link the tb with the other TBs */
460
void tb_link(TranslationBlock *tb)
461
{
462
    unsigned int page_index1, page_index2;
463

    
464
    /* add in the page list */
465
    page_index1 = tb->pc >> TARGET_PAGE_BITS;
466
    tb_alloc_page(tb, page_index1);
467
    page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
468
    if (page_index2 != page_index1) {
469
        tb_alloc_page(tb, page_index2);
470
    }
471
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
472
    tb->jmp_next[0] = NULL;
473
    tb->jmp_next[1] = NULL;
474

    
475
    /* init original jump addresses */
476
    if (tb->tb_next_offset[0] != 0xffff)
477
        tb_reset_jump(tb, 0);
478
    if (tb->tb_next_offset[1] != 0xffff)
479
        tb_reset_jump(tb, 1);
480
}
481

    
482
/* called from signal handler: invalidate the code and unprotect the
483
   page. Return TRUE if the fault was succesfully handled. */
484
int page_unprotect(unsigned long address)
485
{
486
    unsigned int page_index, prot, pindex;
487
    PageDesc *p, *p1;
488
    unsigned long host_start, host_end, addr;
489

    
490
    host_start = address & host_page_mask;
491
    page_index = host_start >> TARGET_PAGE_BITS;
492
    p1 = page_find(page_index);
493
    if (!p1)
494
        return 0;
495
    host_end = host_start + host_page_size;
496
    p = p1;
497
    prot = 0;
498
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
499
        prot |= p->flags;
500
        p++;
501
    }
502
    /* if the page was really writable, then we change its
503
       protection back to writable */
504
    if (prot & PAGE_WRITE_ORG) {
505
        mprotect((void *)host_start, host_page_size, 
506
                 (prot & PAGE_BITS) | PAGE_WRITE);
507
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
508
        p1[pindex].flags |= PAGE_WRITE;
509
        /* and since the content will be modified, we must invalidate
510
           the corresponding translated code. */
511
        tb_invalidate_page(address);
512
#ifdef DEBUG_TB_CHECK
513
        tb_invalidate_check(address);
514
#endif
515
        return 1;
516
    } else {
517
        return 0;
518
    }
519
}
520

    
521
/* call this function when system calls directly modify a memory area */
522
void page_unprotect_range(uint8_t *data, unsigned long data_size)
523
{
524
    unsigned long start, end, addr;
525

    
526
    start = (unsigned long)data;
527
    end = start + data_size;
528
    start &= TARGET_PAGE_MASK;
529
    end = TARGET_PAGE_ALIGN(end);
530
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
531
        page_unprotect(addr);
532
    }
533
}