Revision 14ce26e7 target-i386/helper2.c

b/target-i386/helper2.c
77 77
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78 78
    }
79 79
#endif
80
    {
81
        int family, model, stepping;
82
#ifdef TARGET_X86_64
83
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86
        family = 6;
87
        model = 2;
88
        stepping = 3;
89
#else
90
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93
#if 0
94
        /* pentium 75-200 */
95
        family = 5;
96
        model = 2;
97
        stepping = 11;
98
#else
99
        /* pentium pro */
100
        family = 6;
101
        model = 1;
102
        stepping = 3;
103
#endif
104
#endif
105
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
106
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
108
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
109
#ifdef TARGET_X86_64
110
        /* currently not enabled for std i386 because not fully tested */
111
        env->cpuid_features |= CPUID_APIC | CPUID_FXSR | CPUID_PAE |
112
            CPUID_SSE | CPUID_SSE2;
113
#endif
114
    }
80 115
    cpu_single_env = env;
81 116
    cpu_reset(env);
82 117
    return env;
......
107 142
    env->tr.limit = 0xffff;
108 143
    env->tr.flags = DESC_P_MASK;
109 144
    
110
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0xffff0000, 0xffff, 0); 
111
    cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
112
    cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
113
    cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
114
    cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
115
    cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
145
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
146
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
147
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
148
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
149
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
150
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
116 151
    
117 152
    env->eip = 0xfff0;
118 153
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
......
136 171
static const char *cc_op_str[] = {
137 172
    "DYNAMIC",
138 173
    "EFLAGS",
174

  
139 175
    "MULB",
140 176
    "MULW",
141 177
    "MULL",
178
    "MULQ",
179

  
142 180
    "ADDB",
143 181
    "ADDW",
144 182
    "ADDL",
183
    "ADDQ",
184

  
145 185
    "ADCB",
146 186
    "ADCW",
147 187
    "ADCL",
188
    "ADCQ",
189

  
148 190
    "SUBB",
149 191
    "SUBW",
150 192
    "SUBL",
193
    "SUBQ",
194

  
151 195
    "SBBB",
152 196
    "SBBW",
153 197
    "SBBL",
198
    "SBBQ",
199

  
154 200
    "LOGICB",
155 201
    "LOGICW",
156 202
    "LOGICL",
203
    "LOGICQ",
204

  
157 205
    "INCB",
158 206
    "INCW",
159 207
    "INCL",
208
    "INCQ",
209

  
160 210
    "DECB",
161 211
    "DECW",
162 212
    "DECL",
213
    "DECQ",
214

  
163 215
    "SHLB",
164 216
    "SHLW",
165 217
    "SHLL",
218
    "SHLQ",
219

  
166 220
    "SARB",
167 221
    "SARW",
168 222
    "SARL",
223
    "SARQ",
169 224
};
170 225

  
171 226
void cpu_dump_state(CPUState *env, FILE *f, 
......
177 232
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
178 233

  
179 234
    eflags = env->eflags;
180
    cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
181
            "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
182
            "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
183
            env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX], 
184
            env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP], 
185
            env->eip, eflags,
186
            eflags & DF_MASK ? 'D' : '-',
187
            eflags & CC_O ? 'O' : '-',
188
            eflags & CC_S ? 'S' : '-',
189
            eflags & CC_Z ? 'Z' : '-',
190
            eflags & CC_A ? 'A' : '-',
191
            eflags & CC_P ? 'P' : '-',
192
            eflags & CC_C ? 'C' : '-',
193
            env->hflags & HF_CPL_MASK, 
194
            (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
195
            (env->a20_mask >> 20) & 1);
196
    for(i = 0; i < 6; i++) {
197
        SegmentCache *sc = &env->segs[i];
198
        cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
199
                seg_name[i],
200
                sc->selector,
201
                (int)sc->base,
202
                sc->limit,
203
                sc->flags);
235
#ifdef TARGET_X86_64
236
    if (env->hflags & HF_CS64_MASK) {
237
        cpu_fprintf(f, 
238
                    "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
239
                    "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
240
                    "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
241
                    "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
242
                    "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
243
                    env->regs[R_EAX], 
244
                    env->regs[R_EBX], 
245
                    env->regs[R_ECX], 
246
                    env->regs[R_EDX], 
247
                    env->regs[R_ESI], 
248
                    env->regs[R_EDI], 
249
                    env->regs[R_EBP], 
250
                    env->regs[R_ESP], 
251
                    env->regs[8], 
252
                    env->regs[9], 
253
                    env->regs[10], 
254
                    env->regs[11], 
255
                    env->regs[12], 
256
                    env->regs[13], 
257
                    env->regs[14], 
258
                    env->regs[15], 
259
                    env->eip, eflags,
260
                    eflags & DF_MASK ? 'D' : '-',
261
                    eflags & CC_O ? 'O' : '-',
262
                    eflags & CC_S ? 'S' : '-',
263
                    eflags & CC_Z ? 'Z' : '-',
264
                    eflags & CC_A ? 'A' : '-',
265
                    eflags & CC_P ? 'P' : '-',
266
                    eflags & CC_C ? 'C' : '-',
267
                    env->hflags & HF_CPL_MASK, 
268
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
269
                    (env->a20_mask >> 20) & 1);
270
    } else 
271
#endif
272
    {
273
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
274
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
275
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
276
                    (uint32_t)env->regs[R_EAX], 
277
                    (uint32_t)env->regs[R_EBX], 
278
                    (uint32_t)env->regs[R_ECX], 
279
                    (uint32_t)env->regs[R_EDX], 
280
                    (uint32_t)env->regs[R_ESI], 
281
                    (uint32_t)env->regs[R_EDI], 
282
                    (uint32_t)env->regs[R_EBP], 
283
                    (uint32_t)env->regs[R_ESP], 
284
                    (uint32_t)env->eip, eflags,
285
                    eflags & DF_MASK ? 'D' : '-',
286
                    eflags & CC_O ? 'O' : '-',
287
                    eflags & CC_S ? 'S' : '-',
288
                    eflags & CC_Z ? 'Z' : '-',
289
                    eflags & CC_A ? 'A' : '-',
290
                    eflags & CC_P ? 'P' : '-',
291
                    eflags & CC_C ? 'C' : '-',
292
                    env->hflags & HF_CPL_MASK, 
293
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
294
                    (env->a20_mask >> 20) & 1);
295
    }
296

  
297
#ifdef TARGET_X86_64
298
    if (env->hflags & HF_LMA_MASK) {
299
        for(i = 0; i < 6; i++) {
300
            SegmentCache *sc = &env->segs[i];
301
            cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
302
                        seg_name[i],
303
                        sc->selector,
304
                        sc->base,
305
                        sc->limit,
306
                        sc->flags);
307
        }
308
        cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
309
                    env->ldt.selector,
310
                    env->ldt.base,
311
                    env->ldt.limit,
312
                    env->ldt.flags);
313
        cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
314
                    env->tr.selector,
315
                    env->tr.base,
316
                    env->tr.limit,
317
                    env->tr.flags);
318
        cpu_fprintf(f, "GDT=     %016llx %08x\n",
319
                    env->gdt.base, env->gdt.limit);
320
        cpu_fprintf(f, "IDT=     %016llx %08x\n",
321
                    env->idt.base, env->idt.limit);
322
        cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
323
                    (uint32_t)env->cr[0], 
324
                    env->cr[2], 
325
                    env->cr[3], 
326
                    (uint32_t)env->cr[4]);
327
    } else
328
#endif
329
    {
330
        for(i = 0; i < 6; i++) {
331
            SegmentCache *sc = &env->segs[i];
332
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
333
                        seg_name[i],
334
                        sc->selector,
335
                        (uint32_t)sc->base,
336
                        sc->limit,
337
                        sc->flags);
338
        }
339
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
340
                    env->ldt.selector,
341
                    (uint32_t)env->ldt.base,
342
                    env->ldt.limit,
343
                    env->ldt.flags);
344
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
345
                    env->tr.selector,
346
                    (uint32_t)env->tr.base,
347
                    env->tr.limit,
348
                    env->tr.flags);
349
        cpu_fprintf(f, "GDT=     %08x %08x\n",
350
                    (uint32_t)env->gdt.base, env->gdt.limit);
351
        cpu_fprintf(f, "IDT=     %08x %08x\n",
352
                    (uint32_t)env->idt.base, env->idt.limit);
353
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
354
                    (uint32_t)env->cr[0], 
355
                    (uint32_t)env->cr[2], 
356
                    (uint32_t)env->cr[3], 
357
                    (uint32_t)env->cr[4]);
204 358
    }
205
    cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
206
            env->ldt.selector,
207
            (int)env->ldt.base,
208
            env->ldt.limit,
209
            env->ldt.flags);
210
    cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
211
            env->tr.selector,
212
            (int)env->tr.base,
213
            env->tr.limit,
214
            env->tr.flags);
215
    cpu_fprintf(f, "GDT=     %08x %08x\n",
216
            (int)env->gdt.base, env->gdt.limit);
217
    cpu_fprintf(f, "IDT=     %08x %08x\n",
218
            (int)env->idt.base, env->idt.limit);
219
    cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
220
            env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
221
    
222 359
    if (flags & X86_DUMP_CCOP) {
223 360
        if ((unsigned)env->cc_op < CC_OP_NB)
224 361
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
225 362
        else
226 363
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
227
        cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
228
                env->cc_src, env->cc_dst, cc_op_name);
364
#ifdef TARGET_X86_64
365
        if (env->hflags & HF_CS64_MASK) {
366
            cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
367
                        env->cc_src, env->cc_dst, 
368
                        cc_op_name);
369
        } else 
370
#endif
371
        {
372
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
373
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
374
                        cc_op_name);
375
        }
229 376
    }
230 377
    if (flags & X86_DUMP_FPU) {
231 378
        cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", 
......
274 421
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
275 422
        tlb_flush(env, 1);
276 423
    }
424

  
425
#ifdef TARGET_X86_64
426
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
427
        (env->efer & MSR_EFER_LME)) {
428
        /* enter in long mode */
429
        /* XXX: generate an exception */
430
        if (!(env->cr[4] & CR4_PAE_MASK))
431
            return;
432
        env->efer |= MSR_EFER_LMA;
433
        env->hflags |= HF_LMA_MASK;
434
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
435
               (env->efer & MSR_EFER_LMA)) {
436
        /* exit long mode */
437
        env->efer &= ~MSR_EFER_LMA;
438
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
439
        env->eip &= 0xffffffff;
440
    }
441
#endif
277 442
    env->cr[0] = new_cr0 | CR0_ET_MASK;
278 443
    
279 444
    /* update PE flag in hidden flags */
......
286 451
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
287 452
}
288 453

  
289
void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
454
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
290 455
{
291 456
    env->cr[3] = new_cr3;
292 457
    if (env->cr[0] & CR0_PG_MASK) {
293 458
#if defined(DEBUG_MMU)
294
        printf("CR3 update: CR3=%08x\n", new_cr3);
459
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
295 460
#endif
296 461
        tlb_flush(env, 0);
297 462
    }
......
300 465
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
301 466
{
302 467
#if defined(DEBUG_MMU)
303
    printf("CR4 update: CR4=%08x\n", env->cr[4]);
468
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
304 469
#endif
305 470
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
306 471
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
......
315 480
    tlb_flush_page(env, addr);
316 481
}
317 482

  
483
static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)
484
{
485
    /* XXX: incorrect */
486
    return phys_ram_base + addr;
487
}
488

  
489
/* WARNING: addr must be aligned */
490
uint32_t ldl_phys_aligned(target_phys_addr_t addr)
491
{
492
    uint8_t *ptr;
493
    uint32_t val;
494
    ptr = get_phys_mem_ptr(addr);
495
    if (!ptr)
496
        val = 0;
497
    else
498
        val = ldl_raw(ptr);
499
    return val;
500
}
501

  
502
void stl_phys_aligned(target_phys_addr_t addr, uint32_t val)
503
{
504
    uint8_t *ptr;
505
    ptr = get_phys_mem_ptr(addr);
506
    if (!ptr)
507
        return;
508
    stl_raw(ptr, val);
509
}
510

  
318 511
/* return value:
319 512
   -1 = cannot handle fault 
320 513
   0  = nothing more to do 
321 514
   1  = generate PF fault
322 515
   2  = soft MMU activation required for this block
323 516
*/
324
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, 
517
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
325 518
                             int is_write, int is_user, int is_softmmu)
326 519
{
327
    uint8_t *pde_ptr, *pte_ptr;
328
    uint32_t pde, pte, virt_addr, ptep;
520
    uint32_t pdpe_addr, pde_addr, pte_addr;
521
    uint32_t pde, pte, ptep, pdpe;
329 522
    int error_code, is_dirty, prot, page_size, ret;
330
    unsigned long paddr, vaddr, page_offset;
523
    unsigned long paddr, page_offset;
524
    target_ulong vaddr, virt_addr;
331 525
    
332 526
#if defined(DEBUG_MMU)
333
    printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", 
527
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
334 528
           addr, is_write, is_user, env->eip);
335 529
#endif
336 530
    is_write &= 1;
......
349 543
        goto do_mapping;
350 544
    }
351 545

  
352
    /* page directory entry */
353
    pde_ptr = phys_ram_base + 
354
        (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
355
    pde = ldl_raw(pde_ptr);
356
    if (!(pde & PG_PRESENT_MASK)) {
357
        error_code = 0;
358
        goto do_fault;
359
    }
360
    /* if PSE bit is set, then we use a 4MB page */
361
    if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
362
        if (is_user) {
363
            if (!(pde & PG_USER_MASK))
364
                goto do_fault_protect;
365
            if (is_write && !(pde & PG_RW_MASK))
366
                goto do_fault_protect;
367
        } else {
368
            if ((env->cr[0] & CR0_WP_MASK) && 
369
                is_write && !(pde & PG_RW_MASK)) 
370
                goto do_fault_protect;
546

  
547
    if (env->cr[4] & CR4_PAE_MASK) {
548
        /* XXX: we only use 32 bit physical addresses */
549
#ifdef TARGET_X86_64
550
        if (env->hflags & HF_LMA_MASK) {
551
            uint32_t pml4e_addr, pml4e;
552
            int32_t sext;
553

  
554
            /* XXX: handle user + rw rights */
555
            /* XXX: handle NX flag */
556
            /* test virtual address sign extension */
557
            sext = (int64_t)addr >> 47;
558
            if (sext != 0 && sext != -1) {
559
                error_code = 0;
560
                goto do_fault;
561
            }
562
            
563
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
564
                env->a20_mask;
565
            pml4e = ldl_phys_aligned(pml4e_addr);
566
            if (!(pml4e & PG_PRESENT_MASK)) {
567
                error_code = 0;
568
                goto do_fault;
569
            }
570
            if (!(pml4e & PG_ACCESSED_MASK)) {
571
                pml4e |= PG_ACCESSED_MASK;
572
                stl_phys_aligned(pml4e_addr, pml4e);
573
            }
574
            
575
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
576
                env->a20_mask;
577
            pdpe = ldl_phys_aligned(pdpe_addr);
578
            if (!(pdpe & PG_PRESENT_MASK)) {
579
                error_code = 0;
580
                goto do_fault;
581
            }
582
            if (!(pdpe & PG_ACCESSED_MASK)) {
583
                pdpe |= PG_ACCESSED_MASK;
584
                stl_phys_aligned(pdpe_addr, pdpe);
585
            }
586
        } else 
587
#endif
588
        {
589
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
590
                env->a20_mask;
591
            pdpe = ldl_phys_aligned(pdpe_addr);
592
            if (!(pdpe & PG_PRESENT_MASK)) {
593
                error_code = 0;
594
                goto do_fault;
595
            }
371 596
        }
372
        is_dirty = is_write && !(pde & PG_DIRTY_MASK);
373
        if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
374
            pde |= PG_ACCESSED_MASK;
375
            if (is_dirty)
376
                pde |= PG_DIRTY_MASK;
377
            stl_raw(pde_ptr, pde);
597

  
598
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
599
            env->a20_mask;
600
        pde = ldl_phys_aligned(pde_addr);
601
        if (!(pde & PG_PRESENT_MASK)) {
602
            error_code = 0;
603
            goto do_fault;
378 604
        }
379
        
380
        pte = pde & ~0x003ff000; /* align to 4MB */
381
        ptep = pte;
382
        page_size = 4096 * 1024;
383
        virt_addr = addr & ~0x003fffff;
384
    } else {
385
        if (!(pde & PG_ACCESSED_MASK)) {
386
            pde |= PG_ACCESSED_MASK;
387
            stl_raw(pde_ptr, pde);
605
        if (pde & PG_PSE_MASK) {
606
            /* 2 MB page */
607
            page_size = 2048 * 1024;
608
            goto handle_big_page;
609
        } else {
610
            /* 4 KB page */
611
            if (!(pde & PG_ACCESSED_MASK)) {
612
                pde |= PG_ACCESSED_MASK;
613
                stl_phys_aligned(pde_addr, pde);
614
            }
615
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
616
                env->a20_mask;
617
            goto handle_4k_page;
388 618
        }
389

  
619
    } else {
390 620
        /* page directory entry */
391
        pte_ptr = phys_ram_base + 
392
            (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
393
        pte = ldl_raw(pte_ptr);
394
        if (!(pte & PG_PRESENT_MASK)) {
621
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
622
            env->a20_mask;
623
        pde = ldl_phys_aligned(pde_addr);
624
        if (!(pde & PG_PRESENT_MASK)) {
395 625
            error_code = 0;
396 626
            goto do_fault;
397 627
        }
398
        /* combine pde and pte user and rw protections */
399
        ptep = pte & pde;
400
        if (is_user) {
401
            if (!(ptep & PG_USER_MASK))
402
                goto do_fault_protect;
403
            if (is_write && !(ptep & PG_RW_MASK))
404
                goto do_fault_protect;
628
        /* if PSE bit is set, then we use a 4MB page */
629
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
630
            page_size = 4096 * 1024;
631
        handle_big_page:
632
            if (is_user) {
633
                if (!(pde & PG_USER_MASK))
634
                    goto do_fault_protect;
635
                if (is_write && !(pde & PG_RW_MASK))
636
                    goto do_fault_protect;
637
            } else {
638
                if ((env->cr[0] & CR0_WP_MASK) && 
639
                    is_write && !(pde & PG_RW_MASK)) 
640
                    goto do_fault_protect;
641
            }
642
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
643
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
644
                pde |= PG_ACCESSED_MASK;
645
                if (is_dirty)
646
                    pde |= PG_DIRTY_MASK;
647
                stl_phys_aligned(pde_addr, pde);
648
            }
649
        
650
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
651
            ptep = pte;
652
            virt_addr = addr & ~(page_size - 1);
405 653
        } else {
406
            if ((env->cr[0] & CR0_WP_MASK) &&
407
                is_write && !(ptep & PG_RW_MASK)) 
408
                goto do_fault_protect;
409
        }
410
        is_dirty = is_write && !(pte & PG_DIRTY_MASK);
411
        if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
412
            pte |= PG_ACCESSED_MASK;
413
            if (is_dirty)
414
                pte |= PG_DIRTY_MASK;
415
            stl_raw(pte_ptr, pte);
654
            if (!(pde & PG_ACCESSED_MASK)) {
655
                pde |= PG_ACCESSED_MASK;
656
                stl_phys_aligned(pde_addr, pde);
657
            }
658

  
659
            /* page directory entry */
660
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
661
                env->a20_mask;
662
        handle_4k_page:
663
            pte = ldl_phys_aligned(pte_addr);
664
            if (!(pte & PG_PRESENT_MASK)) {
665
                error_code = 0;
666
                goto do_fault;
667
            }
668
            /* combine pde and pte user and rw protections */
669
            ptep = pte & pde;
670
            if (is_user) {
671
                if (!(ptep & PG_USER_MASK))
672
                    goto do_fault_protect;
673
                if (is_write && !(ptep & PG_RW_MASK))
674
                    goto do_fault_protect;
675
            } else {
676
                if ((env->cr[0] & CR0_WP_MASK) &&
677
                    is_write && !(ptep & PG_RW_MASK)) 
678
                    goto do_fault_protect;
679
            }
680
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
681
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
682
                pte |= PG_ACCESSED_MASK;
683
                if (is_dirty)
684
                    pte |= PG_DIRTY_MASK;
685
                stl_phys_aligned(pte_addr, pte);
686
            }
687
            page_size = 4096;
688
            virt_addr = addr & ~0xfff;
416 689
        }
417
        page_size = 4096;
418
        virt_addr = addr & ~0xfff;
419
    }
420 690

  
421
    /* the page can be put in the TLB */
422
    prot = PAGE_READ;
423
    if (pte & PG_DIRTY_MASK) {
424
        /* only set write access if already dirty... otherwise wait
425
           for dirty access */
426
        if (is_user) {
427
            if (ptep & PG_RW_MASK)
428
                prot |= PAGE_WRITE;
429
        } else {
430
            if (!(env->cr[0] & CR0_WP_MASK) ||
431
                (ptep & PG_RW_MASK))
432
                prot |= PAGE_WRITE;
691
        /* the page can be put in the TLB */
692
        prot = PAGE_READ;
693
        if (pte & PG_DIRTY_MASK) {
694
            /* only set write access if already dirty... otherwise wait
695
               for dirty access */
696
            if (is_user) {
697
                if (ptep & PG_RW_MASK)
698
                    prot |= PAGE_WRITE;
699
            } else {
700
                if (!(env->cr[0] & CR0_WP_MASK) ||
701
                    (ptep & PG_RW_MASK))
702
                    prot |= PAGE_WRITE;
703
            }
433 704
        }
434 705
    }
435

  
436 706
 do_mapping:
437 707
    pte = pte & env->a20_mask;
438 708

  

Also available in: Unified diff