Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 14ce26e7

History | View | Annotate | Download (25 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39

    
40
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41
#define modify_ldt_ldt_s user_desc
42
#endif
43
#endif /* USE_CODE_COPY */
44

    
45
CPUX86State *cpu_x86_init(void)
46
{
47
    CPUX86State *env;
48
    static int inited;
49

    
50
    cpu_exec_init();
51

    
52
    env = malloc(sizeof(CPUX86State));
53
    if (!env)
54
        return NULL;
55
    memset(env, 0, sizeof(CPUX86State));
56
    /* init various static tables */
57
    if (!inited) {
58
        inited = 1;
59
        optimize_flags_init();
60
    }
61
#ifdef USE_CODE_COPY
62
    /* testing code for code copy case */
63
    {
64
        struct modify_ldt_ldt_s ldt;
65

    
66
        ldt.entry_number = 1;
67
        ldt.base_addr = (unsigned long)env;
68
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69
        ldt.seg_32bit = 1;
70
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71
        ldt.read_exec_only = 0;
72
        ldt.limit_in_pages = 1;
73
        ldt.seg_not_present = 0;
74
        ldt.useable = 1;
75
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76
        
77
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78
    }
79
#endif
80
    {
81
        int family, model, stepping;
82
#ifdef TARGET_X86_64
83
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86
        family = 6;
87
        model = 2;
88
        stepping = 3;
89
#else
90
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93
#if 0
94
        /* pentium 75-200 */
95
        family = 5;
96
        model = 2;
97
        stepping = 11;
98
#else
99
        /* pentium pro */
100
        family = 6;
101
        model = 1;
102
        stepping = 3;
103
#endif
104
#endif
105
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
106
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
108
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
109
#ifdef TARGET_X86_64
110
        /* currently not enabled for std i386 because not fully tested */
111
        env->cpuid_features |= CPUID_APIC | CPUID_FXSR | CPUID_PAE |
112
            CPUID_SSE | CPUID_SSE2;
113
#endif
114
    }
115
    cpu_single_env = env;
116
    cpu_reset(env);
117
    return env;
118
}
119

    
120
/* NOTE: must be called outside the CPU execute loop */
121
void cpu_reset(CPUX86State *env)
122
{
123
    int i;
124

    
125
    memset(env, 0, offsetof(CPUX86State, breakpoints));
126

    
127
    tlb_flush(env, 1);
128

    
129
    /* init to reset state */
130

    
131
#ifdef CONFIG_SOFTMMU
132
    env->hflags |= HF_SOFTMMU_MASK;
133
#endif
134

    
135
    cpu_x86_update_cr0(env, 0x60000010);
136
    env->a20_mask = 0xffffffff;
137
    
138
    env->idt.limit = 0xffff;
139
    env->gdt.limit = 0xffff;
140
    env->ldt.limit = 0xffff;
141
    env->ldt.flags = DESC_P_MASK;
142
    env->tr.limit = 0xffff;
143
    env->tr.flags = DESC_P_MASK;
144
    
145
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
146
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
147
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
148
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
149
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
150
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
151
    
152
    env->eip = 0xfff0;
153
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
154
    
155
    env->eflags = 0x2;
156
    
157
    /* FPU init */
158
    for(i = 0;i < 8; i++)
159
        env->fptags[i] = 1;
160
    env->fpuc = 0x37f;
161
}
162

    
163
void cpu_x86_close(CPUX86State *env)
164
{
165
    free(env);
166
}
167

    
168
/***********************************************************/
169
/* x86 debug */
170

    
171
static const char *cc_op_str[] = {
172
    "DYNAMIC",
173
    "EFLAGS",
174

    
175
    "MULB",
176
    "MULW",
177
    "MULL",
178
    "MULQ",
179

    
180
    "ADDB",
181
    "ADDW",
182
    "ADDL",
183
    "ADDQ",
184

    
185
    "ADCB",
186
    "ADCW",
187
    "ADCL",
188
    "ADCQ",
189

    
190
    "SUBB",
191
    "SUBW",
192
    "SUBL",
193
    "SUBQ",
194

    
195
    "SBBB",
196
    "SBBW",
197
    "SBBL",
198
    "SBBQ",
199

    
200
    "LOGICB",
201
    "LOGICW",
202
    "LOGICL",
203
    "LOGICQ",
204

    
205
    "INCB",
206
    "INCW",
207
    "INCL",
208
    "INCQ",
209

    
210
    "DECB",
211
    "DECW",
212
    "DECL",
213
    "DECQ",
214

    
215
    "SHLB",
216
    "SHLW",
217
    "SHLL",
218
    "SHLQ",
219

    
220
    "SARB",
221
    "SARW",
222
    "SARL",
223
    "SARQ",
224
};
225

    
226
void cpu_dump_state(CPUState *env, FILE *f, 
227
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
228
                    int flags)
229
{
230
    int eflags, i;
231
    char cc_op_name[32];
232
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
233

    
234
    eflags = env->eflags;
235
#ifdef TARGET_X86_64
236
    if (env->hflags & HF_CS64_MASK) {
237
        cpu_fprintf(f, 
238
                    "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
239
                    "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
240
                    "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
241
                    "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
242
                    "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
243
                    env->regs[R_EAX], 
244
                    env->regs[R_EBX], 
245
                    env->regs[R_ECX], 
246
                    env->regs[R_EDX], 
247
                    env->regs[R_ESI], 
248
                    env->regs[R_EDI], 
249
                    env->regs[R_EBP], 
250
                    env->regs[R_ESP], 
251
                    env->regs[8], 
252
                    env->regs[9], 
253
                    env->regs[10], 
254
                    env->regs[11], 
255
                    env->regs[12], 
256
                    env->regs[13], 
257
                    env->regs[14], 
258
                    env->regs[15], 
259
                    env->eip, eflags,
260
                    eflags & DF_MASK ? 'D' : '-',
261
                    eflags & CC_O ? 'O' : '-',
262
                    eflags & CC_S ? 'S' : '-',
263
                    eflags & CC_Z ? 'Z' : '-',
264
                    eflags & CC_A ? 'A' : '-',
265
                    eflags & CC_P ? 'P' : '-',
266
                    eflags & CC_C ? 'C' : '-',
267
                    env->hflags & HF_CPL_MASK, 
268
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
269
                    (env->a20_mask >> 20) & 1);
270
    } else 
271
#endif
272
    {
273
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
274
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
275
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
276
                    (uint32_t)env->regs[R_EAX], 
277
                    (uint32_t)env->regs[R_EBX], 
278
                    (uint32_t)env->regs[R_ECX], 
279
                    (uint32_t)env->regs[R_EDX], 
280
                    (uint32_t)env->regs[R_ESI], 
281
                    (uint32_t)env->regs[R_EDI], 
282
                    (uint32_t)env->regs[R_EBP], 
283
                    (uint32_t)env->regs[R_ESP], 
284
                    (uint32_t)env->eip, eflags,
285
                    eflags & DF_MASK ? 'D' : '-',
286
                    eflags & CC_O ? 'O' : '-',
287
                    eflags & CC_S ? 'S' : '-',
288
                    eflags & CC_Z ? 'Z' : '-',
289
                    eflags & CC_A ? 'A' : '-',
290
                    eflags & CC_P ? 'P' : '-',
291
                    eflags & CC_C ? 'C' : '-',
292
                    env->hflags & HF_CPL_MASK, 
293
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
294
                    (env->a20_mask >> 20) & 1);
295
    }
296

    
297
#ifdef TARGET_X86_64
298
    if (env->hflags & HF_LMA_MASK) {
299
        for(i = 0; i < 6; i++) {
300
            SegmentCache *sc = &env->segs[i];
301
            cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
302
                        seg_name[i],
303
                        sc->selector,
304
                        sc->base,
305
                        sc->limit,
306
                        sc->flags);
307
        }
308
        cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
309
                    env->ldt.selector,
310
                    env->ldt.base,
311
                    env->ldt.limit,
312
                    env->ldt.flags);
313
        cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
314
                    env->tr.selector,
315
                    env->tr.base,
316
                    env->tr.limit,
317
                    env->tr.flags);
318
        cpu_fprintf(f, "GDT=     %016llx %08x\n",
319
                    env->gdt.base, env->gdt.limit);
320
        cpu_fprintf(f, "IDT=     %016llx %08x\n",
321
                    env->idt.base, env->idt.limit);
322
        cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
323
                    (uint32_t)env->cr[0], 
324
                    env->cr[2], 
325
                    env->cr[3], 
326
                    (uint32_t)env->cr[4]);
327
    } else
328
#endif
329
    {
330
        for(i = 0; i < 6; i++) {
331
            SegmentCache *sc = &env->segs[i];
332
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
333
                        seg_name[i],
334
                        sc->selector,
335
                        (uint32_t)sc->base,
336
                        sc->limit,
337
                        sc->flags);
338
        }
339
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
340
                    env->ldt.selector,
341
                    (uint32_t)env->ldt.base,
342
                    env->ldt.limit,
343
                    env->ldt.flags);
344
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
345
                    env->tr.selector,
346
                    (uint32_t)env->tr.base,
347
                    env->tr.limit,
348
                    env->tr.flags);
349
        cpu_fprintf(f, "GDT=     %08x %08x\n",
350
                    (uint32_t)env->gdt.base, env->gdt.limit);
351
        cpu_fprintf(f, "IDT=     %08x %08x\n",
352
                    (uint32_t)env->idt.base, env->idt.limit);
353
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
354
                    (uint32_t)env->cr[0], 
355
                    (uint32_t)env->cr[2], 
356
                    (uint32_t)env->cr[3], 
357
                    (uint32_t)env->cr[4]);
358
    }
359
    if (flags & X86_DUMP_CCOP) {
360
        if ((unsigned)env->cc_op < CC_OP_NB)
361
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
362
        else
363
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
364
#ifdef TARGET_X86_64
365
        if (env->hflags & HF_CS64_MASK) {
366
            cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
367
                        env->cc_src, env->cc_dst, 
368
                        cc_op_name);
369
        } else 
370
#endif
371
        {
372
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
373
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
374
                        cc_op_name);
375
        }
376
    }
377
    if (flags & X86_DUMP_FPU) {
378
        cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", 
379
                (double)env->fpregs[0], 
380
                (double)env->fpregs[1], 
381
                (double)env->fpregs[2], 
382
                (double)env->fpregs[3]);
383
        cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n", 
384
                (double)env->fpregs[4], 
385
                (double)env->fpregs[5], 
386
                (double)env->fpregs[7], 
387
                (double)env->fpregs[8]);
388
    }
389
}
390

    
391
/***********************************************************/
392
/* x86 mmu */
393
/* XXX: add PGE support */
394

    
395
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
396
{
397
    a20_state = (a20_state != 0);
398
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
399
#if defined(DEBUG_MMU)
400
        printf("A20 update: a20=%d\n", a20_state);
401
#endif
402
        /* if the cpu is currently executing code, we must unlink it and
403
           all the potentially executing TB */
404
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
405

    
406
        /* when a20 is changed, all the MMU mappings are invalid, so
407
           we must flush everything */
408
        tlb_flush(env, 1);
409
        env->a20_mask = 0xffefffff | (a20_state << 20);
410
    }
411
}
412

    
413
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
414
{
415
    int pe_state;
416

    
417
#if defined(DEBUG_MMU)
418
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
419
#endif
420
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
421
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
422
        tlb_flush(env, 1);
423
    }
424

    
425
#ifdef TARGET_X86_64
426
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
427
        (env->efer & MSR_EFER_LME)) {
428
        /* enter in long mode */
429
        /* XXX: generate an exception */
430
        if (!(env->cr[4] & CR4_PAE_MASK))
431
            return;
432
        env->efer |= MSR_EFER_LMA;
433
        env->hflags |= HF_LMA_MASK;
434
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
435
               (env->efer & MSR_EFER_LMA)) {
436
        /* exit long mode */
437
        env->efer &= ~MSR_EFER_LMA;
438
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
439
        env->eip &= 0xffffffff;
440
    }
441
#endif
442
    env->cr[0] = new_cr0 | CR0_ET_MASK;
443
    
444
    /* update PE flag in hidden flags */
445
    pe_state = (env->cr[0] & CR0_PE_MASK);
446
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
447
    /* ensure that ADDSEG is always set in real mode */
448
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
449
    /* update FPU flags */
450
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
451
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
452
}
453

    
454
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
455
{
456
    env->cr[3] = new_cr3;
457
    if (env->cr[0] & CR0_PG_MASK) {
458
#if defined(DEBUG_MMU)
459
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
460
#endif
461
        tlb_flush(env, 0);
462
    }
463
}
464

    
465
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
466
{
467
#if defined(DEBUG_MMU)
468
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
469
#endif
470
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
471
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
472
        tlb_flush(env, 1);
473
    }
474
    env->cr[4] = new_cr4;
475
}
476

    
477
/* XXX: also flush 4MB pages */
478
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
479
{
480
    tlb_flush_page(env, addr);
481
}
482

    
483
static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)
484
{
485
    /* XXX: incorrect */
486
    return phys_ram_base + addr;
487
}
488

    
489
/* WARNING: addr must be aligned */
490
uint32_t ldl_phys_aligned(target_phys_addr_t addr)
491
{
492
    uint8_t *ptr;
493
    uint32_t val;
494
    ptr = get_phys_mem_ptr(addr);
495
    if (!ptr)
496
        val = 0;
497
    else
498
        val = ldl_raw(ptr);
499
    return val;
500
}
501

    
502
void stl_phys_aligned(target_phys_addr_t addr, uint32_t val)
503
{
504
    uint8_t *ptr;
505
    ptr = get_phys_mem_ptr(addr);
506
    if (!ptr)
507
        return;
508
    stl_raw(ptr, val);
509
}
510

    
511
/* return value:
512
   -1 = cannot handle fault 
513
   0  = nothing more to do 
514
   1  = generate PF fault
515
   2  = soft MMU activation required for this block
516
*/
517
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
518
                             int is_write, int is_user, int is_softmmu)
519
{
520
    uint32_t pdpe_addr, pde_addr, pte_addr;
521
    uint32_t pde, pte, ptep, pdpe;
522
    int error_code, is_dirty, prot, page_size, ret;
523
    unsigned long paddr, page_offset;
524
    target_ulong vaddr, virt_addr;
525
    
526
#if defined(DEBUG_MMU)
527
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
528
           addr, is_write, is_user, env->eip);
529
#endif
530
    is_write &= 1;
531
    
532
    if (env->user_mode_only) {
533
        /* user mode only emulation */
534
        error_code = 0;
535
        goto do_fault;
536
    }
537

    
538
    if (!(env->cr[0] & CR0_PG_MASK)) {
539
        pte = addr;
540
        virt_addr = addr & TARGET_PAGE_MASK;
541
        prot = PAGE_READ | PAGE_WRITE;
542
        page_size = 4096;
543
        goto do_mapping;
544
    }
545

    
546

    
547
    if (env->cr[4] & CR4_PAE_MASK) {
548
        /* XXX: we only use 32 bit physical addresses */
549
#ifdef TARGET_X86_64
550
        if (env->hflags & HF_LMA_MASK) {
551
            uint32_t pml4e_addr, pml4e;
552
            int32_t sext;
553

    
554
            /* XXX: handle user + rw rights */
555
            /* XXX: handle NX flag */
556
            /* test virtual address sign extension */
557
            sext = (int64_t)addr >> 47;
558
            if (sext != 0 && sext != -1) {
559
                error_code = 0;
560
                goto do_fault;
561
            }
562
            
563
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
564
                env->a20_mask;
565
            pml4e = ldl_phys_aligned(pml4e_addr);
566
            if (!(pml4e & PG_PRESENT_MASK)) {
567
                error_code = 0;
568
                goto do_fault;
569
            }
570
            if (!(pml4e & PG_ACCESSED_MASK)) {
571
                pml4e |= PG_ACCESSED_MASK;
572
                stl_phys_aligned(pml4e_addr, pml4e);
573
            }
574
            
575
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
576
                env->a20_mask;
577
            pdpe = ldl_phys_aligned(pdpe_addr);
578
            if (!(pdpe & PG_PRESENT_MASK)) {
579
                error_code = 0;
580
                goto do_fault;
581
            }
582
            if (!(pdpe & PG_ACCESSED_MASK)) {
583
                pdpe |= PG_ACCESSED_MASK;
584
                stl_phys_aligned(pdpe_addr, pdpe);
585
            }
586
        } else 
587
#endif
588
        {
589
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
590
                env->a20_mask;
591
            pdpe = ldl_phys_aligned(pdpe_addr);
592
            if (!(pdpe & PG_PRESENT_MASK)) {
593
                error_code = 0;
594
                goto do_fault;
595
            }
596
        }
597

    
598
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
599
            env->a20_mask;
600
        pde = ldl_phys_aligned(pde_addr);
601
        if (!(pde & PG_PRESENT_MASK)) {
602
            error_code = 0;
603
            goto do_fault;
604
        }
605
        if (pde & PG_PSE_MASK) {
606
            /* 2 MB page */
607
            page_size = 2048 * 1024;
608
            goto handle_big_page;
609
        } else {
610
            /* 4 KB page */
611
            if (!(pde & PG_ACCESSED_MASK)) {
612
                pde |= PG_ACCESSED_MASK;
613
                stl_phys_aligned(pde_addr, pde);
614
            }
615
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
616
                env->a20_mask;
617
            goto handle_4k_page;
618
        }
619
    } else {
620
        /* page directory entry */
621
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
622
            env->a20_mask;
623
        pde = ldl_phys_aligned(pde_addr);
624
        if (!(pde & PG_PRESENT_MASK)) {
625
            error_code = 0;
626
            goto do_fault;
627
        }
628
        /* if PSE bit is set, then we use a 4MB page */
629
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
630
            page_size = 4096 * 1024;
631
        handle_big_page:
632
            if (is_user) {
633
                if (!(pde & PG_USER_MASK))
634
                    goto do_fault_protect;
635
                if (is_write && !(pde & PG_RW_MASK))
636
                    goto do_fault_protect;
637
            } else {
638
                if ((env->cr[0] & CR0_WP_MASK) && 
639
                    is_write && !(pde & PG_RW_MASK)) 
640
                    goto do_fault_protect;
641
            }
642
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
643
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
644
                pde |= PG_ACCESSED_MASK;
645
                if (is_dirty)
646
                    pde |= PG_DIRTY_MASK;
647
                stl_phys_aligned(pde_addr, pde);
648
            }
649
        
650
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
651
            ptep = pte;
652
            virt_addr = addr & ~(page_size - 1);
653
        } else {
654
            if (!(pde & PG_ACCESSED_MASK)) {
655
                pde |= PG_ACCESSED_MASK;
656
                stl_phys_aligned(pde_addr, pde);
657
            }
658

    
659
            /* page directory entry */
660
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
661
                env->a20_mask;
662
        handle_4k_page:
663
            pte = ldl_phys_aligned(pte_addr);
664
            if (!(pte & PG_PRESENT_MASK)) {
665
                error_code = 0;
666
                goto do_fault;
667
            }
668
            /* combine pde and pte user and rw protections */
669
            ptep = pte & pde;
670
            if (is_user) {
671
                if (!(ptep & PG_USER_MASK))
672
                    goto do_fault_protect;
673
                if (is_write && !(ptep & PG_RW_MASK))
674
                    goto do_fault_protect;
675
            } else {
676
                if ((env->cr[0] & CR0_WP_MASK) &&
677
                    is_write && !(ptep & PG_RW_MASK)) 
678
                    goto do_fault_protect;
679
            }
680
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
681
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
682
                pte |= PG_ACCESSED_MASK;
683
                if (is_dirty)
684
                    pte |= PG_DIRTY_MASK;
685
                stl_phys_aligned(pte_addr, pte);
686
            }
687
            page_size = 4096;
688
            virt_addr = addr & ~0xfff;
689
        }
690

    
691
        /* the page can be put in the TLB */
692
        prot = PAGE_READ;
693
        if (pte & PG_DIRTY_MASK) {
694
            /* only set write access if already dirty... otherwise wait
695
               for dirty access */
696
            if (is_user) {
697
                if (ptep & PG_RW_MASK)
698
                    prot |= PAGE_WRITE;
699
            } else {
700
                if (!(env->cr[0] & CR0_WP_MASK) ||
701
                    (ptep & PG_RW_MASK))
702
                    prot |= PAGE_WRITE;
703
            }
704
        }
705
    }
706
 do_mapping:
707
    pte = pte & env->a20_mask;
708

    
709
    /* Even if 4MB pages, we map only one 4KB page in the cache to
710
       avoid filling it too fast */
711
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
712
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
713
    vaddr = virt_addr + page_offset;
714
    
715
    ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
716
    return ret;
717
 do_fault_protect:
718
    error_code = PG_ERROR_P_MASK;
719
 do_fault:
720
    env->cr[2] = addr;
721
    env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
722
    if (is_user)
723
        env->error_code |= PG_ERROR_U_MASK;
724
    return 1;
725
}
726

    
727
#if defined(CONFIG_USER_ONLY) 
728
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
729
{
730
    return addr;
731
}
732
#else
733
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
734
{
735
    uint8_t *pde_ptr, *pte_ptr;
736
    uint32_t pde, pte, paddr, page_offset, page_size;
737

    
738
    if (!(env->cr[0] & CR0_PG_MASK)) {
739
        pte = addr;
740
        page_size = 4096;
741
    } else {
742
        /* page directory entry */
743
        pde_ptr = phys_ram_base + 
744
            (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
745
        pde = ldl_raw(pde_ptr);
746
        if (!(pde & PG_PRESENT_MASK)) 
747
            return -1;
748
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
749
            pte = pde & ~0x003ff000; /* align to 4MB */
750
            page_size = 4096 * 1024;
751
        } else {
752
            /* page directory entry */
753
            pte_ptr = phys_ram_base + 
754
                (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
755
            pte = ldl_raw(pte_ptr);
756
            if (!(pte & PG_PRESENT_MASK))
757
                return -1;
758
            page_size = 4096;
759
        }
760
    }
761
    pte = pte & env->a20_mask;
762
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
763
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
764
    return paddr;
765
}
766
#endif
767

    
768
#if defined(USE_CODE_COPY)
769
struct fpstate {
770
    uint16_t fpuc;
771
    uint16_t dummy1;
772
    uint16_t fpus;
773
    uint16_t dummy2;
774
    uint16_t fptag;
775
    uint16_t dummy3;
776

    
777
    uint32_t fpip;
778
    uint32_t fpcs;
779
    uint32_t fpoo;
780
    uint32_t fpos;
781
    uint8_t fpregs1[8 * 10];
782
};
783

    
784
void restore_native_fp_state(CPUState *env)
785
{
786
    int fptag, i, j;
787
    struct fpstate fp1, *fp = &fp1;
788
    
789
    fp->fpuc = env->fpuc;
790
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
791
    fptag = 0;
792
    for (i=7; i>=0; i--) {
793
        fptag <<= 2;
794
        if (env->fptags[i]) {
795
            fptag |= 3;
796
        } else {
797
            /* the FPU automatically computes it */
798
        }
799
    }
800
    fp->fptag = fptag;
801
    j = env->fpstt;
802
    for(i = 0;i < 8; i++) {
803
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
804
        j = (j + 1) & 7;
805
    }
806
    asm volatile ("frstor %0" : "=m" (*fp));
807
    env->native_fp_regs = 1;
808
}
809
 
810
void save_native_fp_state(CPUState *env)
811
{
812
    int fptag, i, j;
813
    uint16_t fpuc;
814
    struct fpstate fp1, *fp = &fp1;
815

    
816
    asm volatile ("fsave %0" : : "m" (*fp));
817
    env->fpuc = fp->fpuc;
818
    env->fpstt = (fp->fpus >> 11) & 7;
819
    env->fpus = fp->fpus & ~0x3800;
820
    fptag = fp->fptag;
821
    for(i = 0;i < 8; i++) {
822
        env->fptags[i] = ((fptag & 3) == 3);
823
        fptag >>= 2;
824
    }
825
    j = env->fpstt;
826
    for(i = 0;i < 8; i++) {
827
        memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
828
        j = (j + 1) & 7;
829
    }
830
    /* we must restore the default rounding state */
831
    /* XXX: we do not restore the exception state */
832
    fpuc = 0x037f | (env->fpuc & (3 << 10));
833
    asm volatile("fldcw %0" : : "m" (fpuc));
834
    env->native_fp_regs = 0;
835
}
836
#endif