Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ d3c61721

History | View | Annotate | Download (25.2 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39

    
40
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41
#define modify_ldt_ldt_s user_desc
42
#endif
43
#endif /* USE_CODE_COPY */
44

    
45
CPUX86State *cpu_x86_init(void)
46
{
47
    CPUX86State *env;
48
    static int inited;
49

    
50
    cpu_exec_init();
51

    
52
    env = malloc(sizeof(CPUX86State));
53
    if (!env)
54
        return NULL;
55
    memset(env, 0, sizeof(CPUX86State));
56
    /* init various static tables */
57
    if (!inited) {
58
        inited = 1;
59
        optimize_flags_init();
60
    }
61
#ifdef USE_CODE_COPY
62
    /* testing code for code copy case */
63
    {
64
        struct modify_ldt_ldt_s ldt;
65

    
66
        ldt.entry_number = 1;
67
        ldt.base_addr = (unsigned long)env;
68
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69
        ldt.seg_32bit = 1;
70
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71
        ldt.read_exec_only = 0;
72
        ldt.limit_in_pages = 1;
73
        ldt.seg_not_present = 0;
74
        ldt.useable = 1;
75
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76
        
77
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78
    }
79
#endif
80
    {
81
        int family, model, stepping;
82
#ifdef TARGET_X86_64
83
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86
        family = 6;
87
        model = 2;
88
        stepping = 3;
89
#else
90
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93
#if 0
94
        /* pentium 75-200 */
95
        family = 5;
96
        model = 2;
97
        stepping = 11;
98
#else
99
        /* pentium pro */
100
        family = 6;
101
        model = 1;
102
        stepping = 3;
103
#endif
104
#endif
105
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
106
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
108
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
109
#ifdef TARGET_X86_64
110
        /* currently not enabled for std i386 because not fully tested */
111
        env->cpuid_features |= CPUID_APIC | CPUID_FXSR | CPUID_PAE |
112
            CPUID_SSE | CPUID_SSE2;
113
#endif
114
    }
115
    cpu_single_env = env;
116
    cpu_reset(env);
117
    return env;
118
}
119

    
120
/* NOTE: must be called outside the CPU execute loop */
121
void cpu_reset(CPUX86State *env)
122
{
123
    int i;
124

    
125
    memset(env, 0, offsetof(CPUX86State, breakpoints));
126

    
127
    tlb_flush(env, 1);
128

    
129
    /* init to reset state */
130

    
131
#ifdef CONFIG_SOFTMMU
132
    env->hflags |= HF_SOFTMMU_MASK;
133
#endif
134

    
135
    cpu_x86_update_cr0(env, 0x60000010);
136
    env->a20_mask = 0xffffffff;
137
    
138
    env->idt.limit = 0xffff;
139
    env->gdt.limit = 0xffff;
140
    env->ldt.limit = 0xffff;
141
    env->ldt.flags = DESC_P_MASK;
142
    env->tr.limit = 0xffff;
143
    env->tr.flags = DESC_P_MASK;
144
    
145
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
146
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
147
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
148
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
149
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
150
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
151
    
152
    env->eip = 0xfff0;
153
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
154
    
155
    env->eflags = 0x2;
156
    
157
    /* FPU init */
158
    for(i = 0;i < 8; i++)
159
        env->fptags[i] = 1;
160
    env->fpuc = 0x37f;
161

    
162
    env->mxcsr = 0x1f80;
163
}
164

    
165
void cpu_x86_close(CPUX86State *env)
166
{
167
    free(env);
168
}
169

    
170
/***********************************************************/
171
/* x86 debug */
172

    
173
static const char *cc_op_str[] = {
174
    "DYNAMIC",
175
    "EFLAGS",
176

    
177
    "MULB",
178
    "MULW",
179
    "MULL",
180
    "MULQ",
181

    
182
    "ADDB",
183
    "ADDW",
184
    "ADDL",
185
    "ADDQ",
186

    
187
    "ADCB",
188
    "ADCW",
189
    "ADCL",
190
    "ADCQ",
191

    
192
    "SUBB",
193
    "SUBW",
194
    "SUBL",
195
    "SUBQ",
196

    
197
    "SBBB",
198
    "SBBW",
199
    "SBBL",
200
    "SBBQ",
201

    
202
    "LOGICB",
203
    "LOGICW",
204
    "LOGICL",
205
    "LOGICQ",
206

    
207
    "INCB",
208
    "INCW",
209
    "INCL",
210
    "INCQ",
211

    
212
    "DECB",
213
    "DECW",
214
    "DECL",
215
    "DECQ",
216

    
217
    "SHLB",
218
    "SHLW",
219
    "SHLL",
220
    "SHLQ",
221

    
222
    "SARB",
223
    "SARW",
224
    "SARL",
225
    "SARQ",
226
};
227

    
228
void cpu_dump_state(CPUState *env, FILE *f, 
229
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
230
                    int flags)
231
{
232
    int eflags, i;
233
    char cc_op_name[32];
234
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
235

    
236
    eflags = env->eflags;
237
#ifdef TARGET_X86_64
238
    if (env->hflags & HF_CS64_MASK) {
239
        cpu_fprintf(f, 
240
                    "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
241
                    "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
242
                    "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
243
                    "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
244
                    "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
245
                    env->regs[R_EAX], 
246
                    env->regs[R_EBX], 
247
                    env->regs[R_ECX], 
248
                    env->regs[R_EDX], 
249
                    env->regs[R_ESI], 
250
                    env->regs[R_EDI], 
251
                    env->regs[R_EBP], 
252
                    env->regs[R_ESP], 
253
                    env->regs[8], 
254
                    env->regs[9], 
255
                    env->regs[10], 
256
                    env->regs[11], 
257
                    env->regs[12], 
258
                    env->regs[13], 
259
                    env->regs[14], 
260
                    env->regs[15], 
261
                    env->eip, eflags,
262
                    eflags & DF_MASK ? 'D' : '-',
263
                    eflags & CC_O ? 'O' : '-',
264
                    eflags & CC_S ? 'S' : '-',
265
                    eflags & CC_Z ? 'Z' : '-',
266
                    eflags & CC_A ? 'A' : '-',
267
                    eflags & CC_P ? 'P' : '-',
268
                    eflags & CC_C ? 'C' : '-',
269
                    env->hflags & HF_CPL_MASK, 
270
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
271
                    (env->a20_mask >> 20) & 1);
272
    } else 
273
#endif
274
    {
275
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
276
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
277
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
278
                    (uint32_t)env->regs[R_EAX], 
279
                    (uint32_t)env->regs[R_EBX], 
280
                    (uint32_t)env->regs[R_ECX], 
281
                    (uint32_t)env->regs[R_EDX], 
282
                    (uint32_t)env->regs[R_ESI], 
283
                    (uint32_t)env->regs[R_EDI], 
284
                    (uint32_t)env->regs[R_EBP], 
285
                    (uint32_t)env->regs[R_ESP], 
286
                    (uint32_t)env->eip, eflags,
287
                    eflags & DF_MASK ? 'D' : '-',
288
                    eflags & CC_O ? 'O' : '-',
289
                    eflags & CC_S ? 'S' : '-',
290
                    eflags & CC_Z ? 'Z' : '-',
291
                    eflags & CC_A ? 'A' : '-',
292
                    eflags & CC_P ? 'P' : '-',
293
                    eflags & CC_C ? 'C' : '-',
294
                    env->hflags & HF_CPL_MASK, 
295
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
296
                    (env->a20_mask >> 20) & 1);
297
    }
298

    
299
#ifdef TARGET_X86_64
300
    if (env->hflags & HF_LMA_MASK) {
301
        for(i = 0; i < 6; i++) {
302
            SegmentCache *sc = &env->segs[i];
303
            cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
304
                        seg_name[i],
305
                        sc->selector,
306
                        sc->base,
307
                        sc->limit,
308
                        sc->flags);
309
        }
310
        cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
311
                    env->ldt.selector,
312
                    env->ldt.base,
313
                    env->ldt.limit,
314
                    env->ldt.flags);
315
        cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
316
                    env->tr.selector,
317
                    env->tr.base,
318
                    env->tr.limit,
319
                    env->tr.flags);
320
        cpu_fprintf(f, "GDT=     %016llx %08x\n",
321
                    env->gdt.base, env->gdt.limit);
322
        cpu_fprintf(f, "IDT=     %016llx %08x\n",
323
                    env->idt.base, env->idt.limit);
324
        cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
325
                    (uint32_t)env->cr[0], 
326
                    env->cr[2], 
327
                    env->cr[3], 
328
                    (uint32_t)env->cr[4]);
329
    } else
330
#endif
331
    {
332
        for(i = 0; i < 6; i++) {
333
            SegmentCache *sc = &env->segs[i];
334
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
335
                        seg_name[i],
336
                        sc->selector,
337
                        (uint32_t)sc->base,
338
                        sc->limit,
339
                        sc->flags);
340
        }
341
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
342
                    env->ldt.selector,
343
                    (uint32_t)env->ldt.base,
344
                    env->ldt.limit,
345
                    env->ldt.flags);
346
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
347
                    env->tr.selector,
348
                    (uint32_t)env->tr.base,
349
                    env->tr.limit,
350
                    env->tr.flags);
351
        cpu_fprintf(f, "GDT=     %08x %08x\n",
352
                    (uint32_t)env->gdt.base, env->gdt.limit);
353
        cpu_fprintf(f, "IDT=     %08x %08x\n",
354
                    (uint32_t)env->idt.base, env->idt.limit);
355
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
356
                    (uint32_t)env->cr[0], 
357
                    (uint32_t)env->cr[2], 
358
                    (uint32_t)env->cr[3], 
359
                    (uint32_t)env->cr[4]);
360
    }
361
    if (flags & X86_DUMP_CCOP) {
362
        if ((unsigned)env->cc_op < CC_OP_NB)
363
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
364
        else
365
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
366
#ifdef TARGET_X86_64
367
        if (env->hflags & HF_CS64_MASK) {
368
            cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
369
                        env->cc_src, env->cc_dst, 
370
                        cc_op_name);
371
        } else 
372
#endif
373
        {
374
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
375
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
376
                        cc_op_name);
377
        }
378
    }
379
    if (flags & X86_DUMP_FPU) {
380
        cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", 
381
                (double)env->fpregs[0].d, 
382
                (double)env->fpregs[1].d, 
383
                (double)env->fpregs[2].d, 
384
                (double)env->fpregs[3].d);
385
        cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n", 
386
                (double)env->fpregs[4].d, 
387
                (double)env->fpregs[5].d, 
388
                (double)env->fpregs[7].d, 
389
                (double)env->fpregs[8].d);
390
    }
391
}
392

    
393
/***********************************************************/
394
/* x86 mmu */
395
/* XXX: add PGE support */
396

    
397
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
398
{
399
    a20_state = (a20_state != 0);
400
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
401
#if defined(DEBUG_MMU)
402
        printf("A20 update: a20=%d\n", a20_state);
403
#endif
404
        /* if the cpu is currently executing code, we must unlink it and
405
           all the potentially executing TB */
406
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
407

    
408
        /* when a20 is changed, all the MMU mappings are invalid, so
409
           we must flush everything */
410
        tlb_flush(env, 1);
411
        env->a20_mask = 0xffefffff | (a20_state << 20);
412
    }
413
}
414

    
415
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
416
{
417
    int pe_state;
418

    
419
#if defined(DEBUG_MMU)
420
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
421
#endif
422
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
423
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
424
        tlb_flush(env, 1);
425
    }
426

    
427
#ifdef TARGET_X86_64
428
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
429
        (env->efer & MSR_EFER_LME)) {
430
        /* enter in long mode */
431
        /* XXX: generate an exception */
432
        if (!(env->cr[4] & CR4_PAE_MASK))
433
            return;
434
        env->efer |= MSR_EFER_LMA;
435
        env->hflags |= HF_LMA_MASK;
436
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
437
               (env->efer & MSR_EFER_LMA)) {
438
        /* exit long mode */
439
        env->efer &= ~MSR_EFER_LMA;
440
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
441
        env->eip &= 0xffffffff;
442
    }
443
#endif
444
    env->cr[0] = new_cr0 | CR0_ET_MASK;
445
    
446
    /* update PE flag in hidden flags */
447
    pe_state = (env->cr[0] & CR0_PE_MASK);
448
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
449
    /* ensure that ADDSEG is always set in real mode */
450
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
451
    /* update FPU flags */
452
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
453
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
454
}
455

    
456
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
457
{
458
    env->cr[3] = new_cr3;
459
    if (env->cr[0] & CR0_PG_MASK) {
460
#if defined(DEBUG_MMU)
461
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
462
#endif
463
        tlb_flush(env, 0);
464
    }
465
}
466

    
467
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
468
{
469
#if defined(DEBUG_MMU)
470
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
471
#endif
472
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
473
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
474
        tlb_flush(env, 1);
475
    }
476
    /* SSE handling */
477
    if (!(env->cpuid_features & CPUID_SSE))
478
        new_cr4 &= ~CR4_OSFXSR_MASK;
479
    if (new_cr4 & CR4_OSFXSR_MASK)
480
        env->hflags |= HF_OSFXSR_MASK;
481
    else
482
        env->hflags &= ~HF_OSFXSR_MASK;
483

    
484
    env->cr[4] = new_cr4;
485
}
486

    
487
/* XXX: also flush 4MB pages */
488
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
489
{
490
    tlb_flush_page(env, addr);
491
}
492

    
493
static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)
494
{
495
    /* XXX: incorrect */
496
    return phys_ram_base + addr;
497
}
498

    
499
/* WARNING: addr must be aligned */
500
uint32_t ldl_phys_aligned(target_phys_addr_t addr)
501
{
502
    uint8_t *ptr;
503
    uint32_t val;
504
    ptr = get_phys_mem_ptr(addr);
505
    if (!ptr)
506
        val = 0;
507
    else
508
        val = ldl_raw(ptr);
509
    return val;
510
}
511

    
512
void stl_phys_aligned(target_phys_addr_t addr, uint32_t val)
513
{
514
    uint8_t *ptr;
515
    ptr = get_phys_mem_ptr(addr);
516
    if (!ptr)
517
        return;
518
    stl_raw(ptr, val);
519
}
520

    
521
/* return value:
522
   -1 = cannot handle fault 
523
   0  = nothing more to do 
524
   1  = generate PF fault
525
   2  = soft MMU activation required for this block
526
*/
527
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
528
                             int is_write, int is_user, int is_softmmu)
529
{
530
    uint32_t pdpe_addr, pde_addr, pte_addr;
531
    uint32_t pde, pte, ptep, pdpe;
532
    int error_code, is_dirty, prot, page_size, ret;
533
    unsigned long paddr, page_offset;
534
    target_ulong vaddr, virt_addr;
535
    
536
#if defined(DEBUG_MMU)
537
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
538
           addr, is_write, is_user, env->eip);
539
#endif
540
    is_write &= 1;
541
    
542
    if (env->user_mode_only) {
543
        /* user mode only emulation */
544
        error_code = 0;
545
        goto do_fault;
546
    }
547

    
548
    if (!(env->cr[0] & CR0_PG_MASK)) {
549
        pte = addr;
550
        virt_addr = addr & TARGET_PAGE_MASK;
551
        prot = PAGE_READ | PAGE_WRITE;
552
        page_size = 4096;
553
        goto do_mapping;
554
    }
555

    
556

    
557
    if (env->cr[4] & CR4_PAE_MASK) {
558
        /* XXX: we only use 32 bit physical addresses */
559
#ifdef TARGET_X86_64
560
        if (env->hflags & HF_LMA_MASK) {
561
            uint32_t pml4e_addr, pml4e;
562
            int32_t sext;
563

    
564
            /* XXX: handle user + rw rights */
565
            /* XXX: handle NX flag */
566
            /* test virtual address sign extension */
567
            sext = (int64_t)addr >> 47;
568
            if (sext != 0 && sext != -1) {
569
                error_code = 0;
570
                goto do_fault;
571
            }
572
            
573
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
574
                env->a20_mask;
575
            pml4e = ldl_phys_aligned(pml4e_addr);
576
            if (!(pml4e & PG_PRESENT_MASK)) {
577
                error_code = 0;
578
                goto do_fault;
579
            }
580
            if (!(pml4e & PG_ACCESSED_MASK)) {
581
                pml4e |= PG_ACCESSED_MASK;
582
                stl_phys_aligned(pml4e_addr, pml4e);
583
            }
584
            
585
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
586
                env->a20_mask;
587
            pdpe = ldl_phys_aligned(pdpe_addr);
588
            if (!(pdpe & PG_PRESENT_MASK)) {
589
                error_code = 0;
590
                goto do_fault;
591
            }
592
            if (!(pdpe & PG_ACCESSED_MASK)) {
593
                pdpe |= PG_ACCESSED_MASK;
594
                stl_phys_aligned(pdpe_addr, pdpe);
595
            }
596
        } else 
597
#endif
598
        {
599
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
600
                env->a20_mask;
601
            pdpe = ldl_phys_aligned(pdpe_addr);
602
            if (!(pdpe & PG_PRESENT_MASK)) {
603
                error_code = 0;
604
                goto do_fault;
605
            }
606
        }
607

    
608
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
609
            env->a20_mask;
610
        pde = ldl_phys_aligned(pde_addr);
611
        if (!(pde & PG_PRESENT_MASK)) {
612
            error_code = 0;
613
            goto do_fault;
614
        }
615
        if (pde & PG_PSE_MASK) {
616
            /* 2 MB page */
617
            page_size = 2048 * 1024;
618
            goto handle_big_page;
619
        } else {
620
            /* 4 KB page */
621
            if (!(pde & PG_ACCESSED_MASK)) {
622
                pde |= PG_ACCESSED_MASK;
623
                stl_phys_aligned(pde_addr, pde);
624
            }
625
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
626
                env->a20_mask;
627
            goto handle_4k_page;
628
        }
629
    } else {
630
        /* page directory entry */
631
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
632
            env->a20_mask;
633
        pde = ldl_phys_aligned(pde_addr);
634
        if (!(pde & PG_PRESENT_MASK)) {
635
            error_code = 0;
636
            goto do_fault;
637
        }
638
        /* if PSE bit is set, then we use a 4MB page */
639
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
640
            page_size = 4096 * 1024;
641
        handle_big_page:
642
            if (is_user) {
643
                if (!(pde & PG_USER_MASK))
644
                    goto do_fault_protect;
645
                if (is_write && !(pde & PG_RW_MASK))
646
                    goto do_fault_protect;
647
            } else {
648
                if ((env->cr[0] & CR0_WP_MASK) && 
649
                    is_write && !(pde & PG_RW_MASK)) 
650
                    goto do_fault_protect;
651
            }
652
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
653
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
654
                pde |= PG_ACCESSED_MASK;
655
                if (is_dirty)
656
                    pde |= PG_DIRTY_MASK;
657
                stl_phys_aligned(pde_addr, pde);
658
            }
659
        
660
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
661
            ptep = pte;
662
            virt_addr = addr & ~(page_size - 1);
663
        } else {
664
            if (!(pde & PG_ACCESSED_MASK)) {
665
                pde |= PG_ACCESSED_MASK;
666
                stl_phys_aligned(pde_addr, pde);
667
            }
668

    
669
            /* page directory entry */
670
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
671
                env->a20_mask;
672
        handle_4k_page:
673
            pte = ldl_phys_aligned(pte_addr);
674
            if (!(pte & PG_PRESENT_MASK)) {
675
                error_code = 0;
676
                goto do_fault;
677
            }
678
            /* combine pde and pte user and rw protections */
679
            ptep = pte & pde;
680
            if (is_user) {
681
                if (!(ptep & PG_USER_MASK))
682
                    goto do_fault_protect;
683
                if (is_write && !(ptep & PG_RW_MASK))
684
                    goto do_fault_protect;
685
            } else {
686
                if ((env->cr[0] & CR0_WP_MASK) &&
687
                    is_write && !(ptep & PG_RW_MASK)) 
688
                    goto do_fault_protect;
689
            }
690
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
691
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
692
                pte |= PG_ACCESSED_MASK;
693
                if (is_dirty)
694
                    pte |= PG_DIRTY_MASK;
695
                stl_phys_aligned(pte_addr, pte);
696
            }
697
            page_size = 4096;
698
            virt_addr = addr & ~0xfff;
699
        }
700

    
701
        /* the page can be put in the TLB */
702
        prot = PAGE_READ;
703
        if (pte & PG_DIRTY_MASK) {
704
            /* only set write access if already dirty... otherwise wait
705
               for dirty access */
706
            if (is_user) {
707
                if (ptep & PG_RW_MASK)
708
                    prot |= PAGE_WRITE;
709
            } else {
710
                if (!(env->cr[0] & CR0_WP_MASK) ||
711
                    (ptep & PG_RW_MASK))
712
                    prot |= PAGE_WRITE;
713
            }
714
        }
715
    }
716
 do_mapping:
717
    pte = pte & env->a20_mask;
718

    
719
    /* Even if 4MB pages, we map only one 4KB page in the cache to
720
       avoid filling it too fast */
721
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
722
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
723
    vaddr = virt_addr + page_offset;
724
    
725
    ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
726
    return ret;
727
 do_fault_protect:
728
    error_code = PG_ERROR_P_MASK;
729
 do_fault:
730
    env->cr[2] = addr;
731
    env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
732
    if (is_user)
733
        env->error_code |= PG_ERROR_U_MASK;
734
    return 1;
735
}
736

    
737
#if defined(CONFIG_USER_ONLY) 
738
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
739
{
740
    return addr;
741
}
742
#else
743
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
744
{
745
    uint8_t *pde_ptr, *pte_ptr;
746
    uint32_t pde, pte, paddr, page_offset, page_size;
747

    
748
    if (!(env->cr[0] & CR0_PG_MASK)) {
749
        pte = addr;
750
        page_size = 4096;
751
    } else {
752
        /* page directory entry */
753
        pde_ptr = phys_ram_base + 
754
            (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
755
        pde = ldl_raw(pde_ptr);
756
        if (!(pde & PG_PRESENT_MASK)) 
757
            return -1;
758
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
759
            pte = pde & ~0x003ff000; /* align to 4MB */
760
            page_size = 4096 * 1024;
761
        } else {
762
            /* page directory entry */
763
            pte_ptr = phys_ram_base + 
764
                (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
765
            pte = ldl_raw(pte_ptr);
766
            if (!(pte & PG_PRESENT_MASK))
767
                return -1;
768
            page_size = 4096;
769
        }
770
    }
771
    pte = pte & env->a20_mask;
772
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
773
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
774
    return paddr;
775
}
776
#endif
777

    
778
#if defined(USE_CODE_COPY)
779
struct fpstate {
780
    uint16_t fpuc;
781
    uint16_t dummy1;
782
    uint16_t fpus;
783
    uint16_t dummy2;
784
    uint16_t fptag;
785
    uint16_t dummy3;
786

    
787
    uint32_t fpip;
788
    uint32_t fpcs;
789
    uint32_t fpoo;
790
    uint32_t fpos;
791
    uint8_t fpregs1[8 * 10];
792
};
793

    
794
void restore_native_fp_state(CPUState *env)
795
{
796
    int fptag, i, j;
797
    struct fpstate fp1, *fp = &fp1;
798
    
799
    fp->fpuc = env->fpuc;
800
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
801
    fptag = 0;
802
    for (i=7; i>=0; i--) {
803
        fptag <<= 2;
804
        if (env->fptags[i]) {
805
            fptag |= 3;
806
        } else {
807
            /* the FPU automatically computes it */
808
        }
809
    }
810
    fp->fptag = fptag;
811
    j = env->fpstt;
812
    for(i = 0;i < 8; i++) {
813
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
814
        j = (j + 1) & 7;
815
    }
816
    asm volatile ("frstor %0" : "=m" (*fp));
817
    env->native_fp_regs = 1;
818
}
819
 
820
void save_native_fp_state(CPUState *env)
821
{
822
    int fptag, i, j;
823
    uint16_t fpuc;
824
    struct fpstate fp1, *fp = &fp1;
825

    
826
    asm volatile ("fsave %0" : : "m" (*fp));
827
    env->fpuc = fp->fpuc;
828
    env->fpstt = (fp->fpus >> 11) & 7;
829
    env->fpus = fp->fpus & ~0x3800;
830
    fptag = fp->fptag;
831
    for(i = 0;i < 8; i++) {
832
        env->fptags[i] = ((fptag & 3) == 3);
833
        fptag >>= 2;
834
    }
835
    j = env->fpstt;
836
    for(i = 0;i < 8; i++) {
837
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
838
        j = (j + 1) & 7;
839
    }
840
    /* we must restore the default rounding state */
841
    /* XXX: we do not restore the exception state */
842
    fpuc = 0x037f | (env->fpuc & (3 << 10));
843
    asm volatile("fldcw %0" : : "m" (fpuc));
844
    env->native_fp_regs = 0;
845
}
846
#endif