Revision 90f18422

b/exec.c
83 83
    uint32_t phys_offset;
84 84
} PhysPageDesc;
85 85

  
86
/* Note: the VirtPage handling is absolete and will be suppressed
87
   ASAP */
86 88
typedef struct VirtPageDesc {
87 89
    /* physical address of code page. It is valid only if 'valid_tag'
88 90
       matches 'virt_valid_tag' */ 
......
113 115
PhysPageDesc **l1_phys_map;
114 116

  
115 117
#if !defined(CONFIG_USER_ONLY)
118
#if TARGET_LONG_BITS > 32
119
#define VIRT_L_BITS 9
120
#define VIRT_L_SIZE (1 << VIRT_L_BITS)
121
static void *l1_virt_map[VIRT_L_SIZE];
122
#else
116 123
static VirtPageDesc *l1_virt_map[L1_SIZE];
124
#endif
117 125
static unsigned int virt_valid_tag;
118 126
#endif
119 127

  
......
234 242
static void tlb_protect_code(CPUState *env, target_ulong addr);
235 243
static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
236 244

  
237
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
245
static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
238 246
{
239
    VirtPageDesc **lp, *p;
240

  
241
    /* XXX: should not truncate for 64 bit addresses */
242 247
#if TARGET_LONG_BITS > 32
243
    index &= (L1_SIZE - 1);
244
#endif
248
    void **p, **lp;
249

  
250
    p = l1_virt_map;
251
    lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
252
    p = *lp;
253
    if (!p) {
254
        if (!alloc)
255
            return NULL;
256
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
257
        *lp = p;
258
    }
259
    lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
260
    p = *lp;
261
    if (!p) {
262
        if (!alloc)
263
            return NULL;
264
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
265
        *lp = p;
266
    }
267
    lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
268
    p = *lp;
269
    if (!p) {
270
        if (!alloc)
271
            return NULL;
272
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
273
        *lp = p;
274
    }
275
    lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
276
    p = *lp;
277
    if (!p) {
278
        if (!alloc)
279
            return NULL;
280
        p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
281
        *lp = p;
282
    }
283
    lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
284
    p = *lp;
285
    if (!p) {
286
        if (!alloc)
287
            return NULL;
288
        p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
289
        *lp = p;
290
    }
291
    return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
292
#else
293
    VirtPageDesc *p, **lp;
294

  
245 295
    lp = &l1_virt_map[index >> L2_BITS];
246 296
    p = *lp;
247 297
    if (!p) {
248 298
        /* allocate if not found */
249
        p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
250
        memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
299
        if (!alloc)
300
            return NULL;
301
        p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
251 302
        *lp = p;
252 303
    }
253 304
    return p + (index & (L2_SIZE - 1));
305
#endif
254 306
}
255 307

  
256
static inline VirtPageDesc *virt_page_find(unsigned int index)
308
static inline VirtPageDesc *virt_page_find(target_ulong index)
257 309
{
258
    VirtPageDesc *p;
310
    return virt_page_find_alloc(index, 0);
311
}
259 312

  
260
    p = l1_virt_map[index >> L2_BITS];
261
    if (!p)
262
        return 0;
263
    return p + (index & (L2_SIZE - 1));
313
#if TARGET_LONG_BITS > 32
314
static void virt_page_flush_internal(void **p, int level)
315
{
316
    int i; 
317
    if (level == 0) {
318
        VirtPageDesc *q = (VirtPageDesc *)p;
319
        for(i = 0; i < VIRT_L_SIZE; i++)
320
            q[i].valid_tag = 0;
321
    } else {
322
        level--;
323
        for(i = 0; i < VIRT_L_SIZE; i++) {
324
            if (p[i])
325
                virt_page_flush_internal(p[i], level);
326
        }
327
    }
264 328
}
329
#endif
265 330

  
266 331
static void virt_page_flush(void)
267 332
{
268
    int i, j;
269
    VirtPageDesc *p;
270
    
271 333
    virt_valid_tag++;
272 334

  
273 335
    if (virt_valid_tag == 0) {
274 336
        virt_valid_tag = 1;
275
        for(i = 0; i < L1_SIZE; i++) {
276
            p = l1_virt_map[i];
277
            if (p) {
278
                for(j = 0; j < L2_SIZE; j++)
279
                    p[j].valid_tag = 0;
337
#if TARGET_LONG_BITS > 32
338
        virt_page_flush_internal(l1_virt_map, 5);
339
#else
340
        {
341
            int i, j;
342
            VirtPageDesc *p;
343
            for(i = 0; i < L1_SIZE; i++) {
344
                p = l1_virt_map[i];
345
                if (p) {
346
                    for(j = 0; j < L2_SIZE; j++)
347
                        p[j].valid_tag = 0;
348
                }
280 349
            }
281 350
        }
351
#endif
282 352
    }
283 353
}
284 354
#else
......
945 1015
        
946 1016
        /* save the code memory mappings (needed to invalidate the code) */
947 1017
        addr = tb->pc & TARGET_PAGE_MASK;
948
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
1018
        vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
949 1019
#ifdef DEBUG_TLB_CHECK 
950 1020
        if (vp->valid_tag == virt_valid_tag &&
951 1021
            vp->phys_addr != tb->page_addr[0]) {
......
963 1033
        
964 1034
        if (tb->page_addr[1] != -1) {
965 1035
            addr += TARGET_PAGE_SIZE;
966
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
1036
            vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
967 1037
#ifdef DEBUG_TLB_CHECK 
968 1038
            if (vp->valid_tag == virt_valid_tag &&
969 1039
                vp->phys_addr != tb->page_addr[1]) { 
......
1572 1642
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1573 1643
        }
1574 1644
        
1575
        index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1645
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1576 1646
        addend -= vaddr;
1577 1647
        if (prot & PAGE_READ) {
1578 1648
            env->tlb_read[is_user][index].address = address;
......
1635 1705
                           original mapping */
1636 1706
                        VirtPageDesc *vp;
1637 1707
                        
1638
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1708
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1639 1709
                        vp->phys_addr = pd;
1640 1710
                        vp->prot = prot;
1641 1711
                        vp->valid_tag = virt_valid_tag;

Also available in: Unified diff