Revision 61382a50 target-i386/helper2.c

b/target-i386/helper2.c
210 210
    flags = page_get_flags(addr);
211 211
    if (flags & PAGE_VALID) {
212 212
        virt_addr = addr & ~0xfff;
213
#if !defined(CONFIG_SOFTMMU)
213 214
        munmap((void *)virt_addr, 4096);
215
#endif
214 216
        page_set_flags(virt_addr, virt_addr + 4096, 0);
215 217
    }
216 218
}
......
221 223
   1  = generate PF fault
222 224
   2  = soft MMU activation required for this block
223 225
*/
224
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
226
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, 
227
                             int is_write, int is_user, int is_softmmu)
225 228
{
226 229
    uint8_t *pde_ptr, *pte_ptr;
227 230
    uint32_t pde, pte, virt_addr;
228
    int cpl, error_code, is_dirty, is_user, prot, page_size, ret;
231
    int error_code, is_dirty, prot, page_size, ret;
229 232
    unsigned long pd;
230 233
    
231
    cpl = env->hflags & HF_CPL_MASK;
232
    is_user = (cpl == 3);
233
    
234 234
#ifdef DEBUG_MMU
235 235
    printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", 
236 236
           addr, is_write, is_user, env->eip);
......
252 252

  
253 253
    /* page directory entry */
254 254
    pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3));
255
    pde = ldl(pde_ptr);
255
    pde = ldl_raw(pde_ptr);
256 256
    if (!(pde & PG_PRESENT_MASK)) {
257 257
        error_code = 0;
258 258
        goto do_fault;
......
274 274
            pde |= PG_ACCESSED_MASK;
275 275
            if (is_dirty)
276 276
                pde |= PG_DIRTY_MASK;
277
            stl(pde_ptr, pde);
277
            stl_raw(pde_ptr, pde);
278 278
        }
279 279
        
280 280
        pte = pde & ~0x003ff000; /* align to 4MB */
......
283 283
    } else {
284 284
        if (!(pde & PG_ACCESSED_MASK)) {
285 285
            pde |= PG_ACCESSED_MASK;
286
            stl(pde_ptr, pde);
286
            stl_raw(pde_ptr, pde);
287 287
        }
288 288

  
289 289
        /* page directory entry */
290 290
        pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc));
291
        pte = ldl(pte_ptr);
291
        pte = ldl_raw(pte_ptr);
292 292
        if (!(pte & PG_PRESENT_MASK)) {
293 293
            error_code = 0;
294 294
            goto do_fault;
......
308 308
            pte |= PG_ACCESSED_MASK;
309 309
            if (is_dirty)
310 310
                pte |= PG_DIRTY_MASK;
311
            stl(pte_ptr, pte);
311
            stl_raw(pte_ptr, pte);
312 312
        }
313 313
        page_size = 4096;
314 314
        virt_addr = addr & ~0xfff;
......
325 325
    }
326 326
    
327 327
 do_mapping:
328
    if (env->hflags & HF_SOFTMMU_MASK) {
328
#if !defined(CONFIG_SOFTMMU)
329
    if (is_softmmu) 
330
#endif
331
    {
329 332
        unsigned long paddr, vaddr, address, addend, page_offset;
330 333
        int index;
331 334

  
......
352 355
            env->tlb_write[is_user][index].address = address;
353 356
            env->tlb_write[is_user][index].addend = addend;
354 357
        }
358
        page_set_flags(vaddr, vaddr + TARGET_PAGE_SIZE, 
359
                       PAGE_VALID | PAGE_EXEC | prot);
360
        ret = 0;
355 361
    }
356
    ret = 0;
357
    /* XXX: incorrect for 4MB pages */
358
    pd = physpage_find(pte & ~0xfff);
359
    if ((pd & 0xfff) != 0) {
360
        /* IO access: no mapping is done as it will be handled by the
361
           soft MMU */
362
        if (!(env->hflags & HF_SOFTMMU_MASK))
363
            ret = 2;
364
    } else {
365
        void *map_addr;
366
        map_addr = mmap((void *)virt_addr, page_size, prot, 
367
                        MAP_SHARED | MAP_FIXED, phys_ram_fd, pd);
368
        if (map_addr == MAP_FAILED) {
369
            fprintf(stderr, 
370
                    "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
371
                    pte & ~0xfff, virt_addr);
372
            exit(1);
373
        }
362
#if !defined(CONFIG_SOFTMMU)
363
    else {
364
        ret = 0;
365
        /* XXX: incorrect for 4MB pages */
366
        pd = physpage_find(pte & ~0xfff);
367
        if ((pd & 0xfff) != 0) {
368
            /* IO access: no mapping is done as it will be handled by the
369
               soft MMU */
370
            if (!(env->hflags & HF_SOFTMMU_MASK))
371
                ret = 2;
372
        } else {
373
            void *map_addr;
374
            map_addr = mmap((void *)virt_addr, page_size, prot, 
375
                            MAP_SHARED | MAP_FIXED, phys_ram_fd, pd);
376
            if (map_addr == MAP_FAILED) {
377
                fprintf(stderr, 
378
                        "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
379
                        pte & ~0xfff, virt_addr);
380
                exit(1);
381
            }
374 382
#ifdef DEBUG_MMU
375
        printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", 
376
               pte & ~0xfff, virt_addr, (page_size != 4096));
383
            printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", 
384
                   pte & ~0xfff, virt_addr, (page_size != 4096));
377 385
#endif
378
        page_set_flags(virt_addr, virt_addr + page_size, 
379
                       PAGE_VALID | PAGE_EXEC | prot);
386
            page_set_flags(virt_addr, virt_addr + page_size, 
387
                           PAGE_VALID | PAGE_EXEC | prot);
388
        }
380 389
    }
390
#endif
381 391
    return ret;
382 392
 do_fault_protect:
383 393
    error_code = PG_ERROR_P_MASK;

Also available in: Unified diff