Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 83fcb515

History | View | Annotate | Download (32.2 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
int modify_ldt(int func, void *ptr, unsigned long bytecount)
39
{
40
        return syscall(__NR_modify_ldt, func, ptr, bytecount);
41
}
42

    
43
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44
#define modify_ldt_ldt_s user_desc
45
#endif
46
#endif /* USE_CODE_COPY */
47

    
48
CPUX86State *cpu_x86_init(void)
49
{
50
    CPUX86State *env;
51
    static int inited;
52

    
53
    env = qemu_mallocz(sizeof(CPUX86State));
54
    if (!env)
55
        return NULL;
56
    cpu_exec_init(env);
57

    
58
    /* init various static tables */
59
    if (!inited) {
60
        inited = 1;
61
        optimize_flags_init();
62
    }
63
#ifdef USE_CODE_COPY
64
    /* testing code for code copy case */
65
    {
66
        struct modify_ldt_ldt_s ldt;
67

    
68
        ldt.entry_number = 1;
69
        ldt.base_addr = (unsigned long)env;
70
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
71
        ldt.seg_32bit = 1;
72
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73
        ldt.read_exec_only = 0;
74
        ldt.limit_in_pages = 1;
75
        ldt.seg_not_present = 0;
76
        ldt.useable = 1;
77
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
78
        
79
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
80
    }
81
#endif
82
    {
83
        int family, model, stepping;
84
#ifdef TARGET_X86_64
85
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
88
        family = 6;
89
        model = 2;
90
        stepping = 3;
91
#else
92
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
95
#if 0
96
        /* pentium 75-200 */
97
        family = 5;
98
        model = 2;
99
        stepping = 11;
100
#else
101
        /* pentium pro */
102
        family = 6;
103
        model = 3;
104
        stepping = 3;
105
#endif
106
#endif
107
        env->cpuid_level = 2;
108
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
109
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
111
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112
                               CPUID_PAT);
113
        env->pat = 0x0007040600070406ULL;
114
        env->cpuid_ext_features = CPUID_EXT_SSE3;
115
        env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
116
        env->cpuid_features |= CPUID_APIC;
117
        env->cpuid_xlevel = 0;
118
        {
119
            const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120
            int c, len, i;
121
            len = strlen(model_id);
122
            for(i = 0; i < 48; i++) {
123
                if (i >= len)
124
                    c = '\0';
125
                else
126
                    c = model_id[i];
127
                env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
128
            }
129
        }
130
#ifdef TARGET_X86_64
131
        /* currently not enabled for std i386 because not fully tested */
132
        env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
133
        env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
134
        env->cpuid_xlevel = 0x80000008;
135

    
136
        /* these features are needed for Win64 and aren't fully implemented */
137
        env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
138
#endif
139
    }
140
    cpu_reset(env);
141
#ifdef USE_KQEMU
142
    kqemu_init(env);
143
#endif
144
    return env;
145
}
146

    
147
/* NOTE: must be called outside the CPU execute loop */
148
void cpu_reset(CPUX86State *env)
149
{
150
    int i;
151

    
152
    memset(env, 0, offsetof(CPUX86State, breakpoints));
153

    
154
    tlb_flush(env, 1);
155

    
156
    /* init to reset state */
157

    
158
#ifdef CONFIG_SOFTMMU
159
    env->hflags |= HF_SOFTMMU_MASK;
160
#endif
161

    
162
    cpu_x86_update_cr0(env, 0x60000010);
163
    env->a20_mask = 0xffffffff;
164
    
165
    env->idt.limit = 0xffff;
166
    env->gdt.limit = 0xffff;
167
    env->ldt.limit = 0xffff;
168
    env->ldt.flags = DESC_P_MASK;
169
    env->tr.limit = 0xffff;
170
    env->tr.flags = DESC_P_MASK;
171
    
172
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
173
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
174
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
175
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
176
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
177
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
178
    
179
    env->eip = 0xfff0;
180
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
181
    
182
    env->eflags = 0x2;
183
    
184
    /* FPU init */
185
    for(i = 0;i < 8; i++)
186
        env->fptags[i] = 1;
187
    env->fpuc = 0x37f;
188

    
189
    env->mxcsr = 0x1f80;
190
}
191

    
192
void cpu_x86_close(CPUX86State *env)
193
{
194
    free(env);
195
}
196

    
197
/***********************************************************/
198
/* x86 debug */
199

    
200
static const char *cc_op_str[] = {
201
    "DYNAMIC",
202
    "EFLAGS",
203

    
204
    "MULB",
205
    "MULW",
206
    "MULL",
207
    "MULQ",
208

    
209
    "ADDB",
210
    "ADDW",
211
    "ADDL",
212
    "ADDQ",
213

    
214
    "ADCB",
215
    "ADCW",
216
    "ADCL",
217
    "ADCQ",
218

    
219
    "SUBB",
220
    "SUBW",
221
    "SUBL",
222
    "SUBQ",
223

    
224
    "SBBB",
225
    "SBBW",
226
    "SBBL",
227
    "SBBQ",
228

    
229
    "LOGICB",
230
    "LOGICW",
231
    "LOGICL",
232
    "LOGICQ",
233

    
234
    "INCB",
235
    "INCW",
236
    "INCL",
237
    "INCQ",
238

    
239
    "DECB",
240
    "DECW",
241
    "DECL",
242
    "DECQ",
243

    
244
    "SHLB",
245
    "SHLW",
246
    "SHLL",
247
    "SHLQ",
248

    
249
    "SARB",
250
    "SARW",
251
    "SARL",
252
    "SARQ",
253
};
254

    
255
void cpu_dump_state(CPUState *env, FILE *f, 
256
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
257
                    int flags)
258
{
259
    int eflags, i, nb;
260
    char cc_op_name[32];
261
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
262

    
263
    eflags = env->eflags;
264
#ifdef TARGET_X86_64
265
    if (env->hflags & HF_CS64_MASK) {
266
        cpu_fprintf(f, 
267
                    "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
268
                    "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
269
                    "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
270
                    "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
271
                    "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
272
                    env->regs[R_EAX], 
273
                    env->regs[R_EBX], 
274
                    env->regs[R_ECX], 
275
                    env->regs[R_EDX], 
276
                    env->regs[R_ESI], 
277
                    env->regs[R_EDI], 
278
                    env->regs[R_EBP], 
279
                    env->regs[R_ESP], 
280
                    env->regs[8], 
281
                    env->regs[9], 
282
                    env->regs[10], 
283
                    env->regs[11], 
284
                    env->regs[12], 
285
                    env->regs[13], 
286
                    env->regs[14], 
287
                    env->regs[15], 
288
                    env->eip, eflags,
289
                    eflags & DF_MASK ? 'D' : '-',
290
                    eflags & CC_O ? 'O' : '-',
291
                    eflags & CC_S ? 'S' : '-',
292
                    eflags & CC_Z ? 'Z' : '-',
293
                    eflags & CC_A ? 'A' : '-',
294
                    eflags & CC_P ? 'P' : '-',
295
                    eflags & CC_C ? 'C' : '-',
296
                    env->hflags & HF_CPL_MASK, 
297
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
298
                    (env->a20_mask >> 20) & 1,
299
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
300
    } else 
301
#endif
302
    {
303
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
304
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
305
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
306
                    (uint32_t)env->regs[R_EAX], 
307
                    (uint32_t)env->regs[R_EBX], 
308
                    (uint32_t)env->regs[R_ECX], 
309
                    (uint32_t)env->regs[R_EDX], 
310
                    (uint32_t)env->regs[R_ESI], 
311
                    (uint32_t)env->regs[R_EDI], 
312
                    (uint32_t)env->regs[R_EBP], 
313
                    (uint32_t)env->regs[R_ESP], 
314
                    (uint32_t)env->eip, eflags,
315
                    eflags & DF_MASK ? 'D' : '-',
316
                    eflags & CC_O ? 'O' : '-',
317
                    eflags & CC_S ? 'S' : '-',
318
                    eflags & CC_Z ? 'Z' : '-',
319
                    eflags & CC_A ? 'A' : '-',
320
                    eflags & CC_P ? 'P' : '-',
321
                    eflags & CC_C ? 'C' : '-',
322
                    env->hflags & HF_CPL_MASK, 
323
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
324
                    (env->a20_mask >> 20) & 1,
325
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
326
    }
327

    
328
#ifdef TARGET_X86_64
329
    if (env->hflags & HF_LMA_MASK) {
330
        for(i = 0; i < 6; i++) {
331
            SegmentCache *sc = &env->segs[i];
332
            cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
333
                        seg_name[i],
334
                        sc->selector,
335
                        sc->base,
336
                        sc->limit,
337
                        sc->flags);
338
        }
339
        cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
340
                    env->ldt.selector,
341
                    env->ldt.base,
342
                    env->ldt.limit,
343
                    env->ldt.flags);
344
        cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
345
                    env->tr.selector,
346
                    env->tr.base,
347
                    env->tr.limit,
348
                    env->tr.flags);
349
        cpu_fprintf(f, "GDT=     %016llx %08x\n",
350
                    env->gdt.base, env->gdt.limit);
351
        cpu_fprintf(f, "IDT=     %016llx %08x\n",
352
                    env->idt.base, env->idt.limit);
353
        cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
354
                    (uint32_t)env->cr[0], 
355
                    env->cr[2], 
356
                    env->cr[3], 
357
                    (uint32_t)env->cr[4]);
358
    } else
359
#endif
360
    {
361
        for(i = 0; i < 6; i++) {
362
            SegmentCache *sc = &env->segs[i];
363
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
364
                        seg_name[i],
365
                        sc->selector,
366
                        (uint32_t)sc->base,
367
                        sc->limit,
368
                        sc->flags);
369
        }
370
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
371
                    env->ldt.selector,
372
                    (uint32_t)env->ldt.base,
373
                    env->ldt.limit,
374
                    env->ldt.flags);
375
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
376
                    env->tr.selector,
377
                    (uint32_t)env->tr.base,
378
                    env->tr.limit,
379
                    env->tr.flags);
380
        cpu_fprintf(f, "GDT=     %08x %08x\n",
381
                    (uint32_t)env->gdt.base, env->gdt.limit);
382
        cpu_fprintf(f, "IDT=     %08x %08x\n",
383
                    (uint32_t)env->idt.base, env->idt.limit);
384
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
385
                    (uint32_t)env->cr[0], 
386
                    (uint32_t)env->cr[2], 
387
                    (uint32_t)env->cr[3], 
388
                    (uint32_t)env->cr[4]);
389
    }
390
    if (flags & X86_DUMP_CCOP) {
391
        if ((unsigned)env->cc_op < CC_OP_NB)
392
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
393
        else
394
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
395
#ifdef TARGET_X86_64
396
        if (env->hflags & HF_CS64_MASK) {
397
            cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
398
                        env->cc_src, env->cc_dst, 
399
                        cc_op_name);
400
        } else 
401
#endif
402
        {
403
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
404
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
405
                        cc_op_name);
406
        }
407
    }
408
    if (flags & X86_DUMP_FPU) {
409
        int fptag;
410
        fptag = 0;
411
        for(i = 0; i < 8; i++) {
412
            fptag |= ((!env->fptags[i]) << i);
413
        }
414
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
415
                    env->fpuc,
416
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
417
                    env->fpstt,
418
                    fptag,
419
                    env->mxcsr);
420
        for(i=0;i<8;i++) {
421
#if defined(USE_X86LDOUBLE)
422
            union {
423
                long double d;
424
                struct {
425
                    uint64_t lower;
426
                    uint16_t upper;
427
                } l;
428
            } tmp;
429
            tmp.d = env->fpregs[i].d;
430
            cpu_fprintf(f, "FPR%d=%016llx %04x",
431
                        i, tmp.l.lower, tmp.l.upper);
432
#else
433
            cpu_fprintf(f, "FPR%d=%016llx",
434
                        i, env->fpregs[i].mmx.q);
435
#endif
436
            if ((i & 1) == 1)
437
                cpu_fprintf(f, "\n");
438
            else
439
                cpu_fprintf(f, " ");
440
        }
441
        if (env->hflags & HF_CS64_MASK) 
442
            nb = 16;
443
        else
444
            nb = 8;
445
        for(i=0;i<nb;i++) {
446
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
447
                        i, 
448
                        env->xmm_regs[i].XMM_L(3),
449
                        env->xmm_regs[i].XMM_L(2),
450
                        env->xmm_regs[i].XMM_L(1),
451
                        env->xmm_regs[i].XMM_L(0));
452
            if ((i & 1) == 1)
453
                cpu_fprintf(f, "\n");
454
            else
455
                cpu_fprintf(f, " ");
456
        }
457
    }
458
}
459

    
460
/***********************************************************/
461
/* x86 mmu */
462
/* XXX: add PGE support */
463

    
464
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
465
{
466
    a20_state = (a20_state != 0);
467
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
468
#if defined(DEBUG_MMU)
469
        printf("A20 update: a20=%d\n", a20_state);
470
#endif
471
        /* if the cpu is currently executing code, we must unlink it and
472
           all the potentially executing TB */
473
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
474

    
475
        /* when a20 is changed, all the MMU mappings are invalid, so
476
           we must flush everything */
477
        tlb_flush(env, 1);
478
        env->a20_mask = 0xffefffff | (a20_state << 20);
479
    }
480
}
481

    
482
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
483
{
484
    int pe_state;
485

    
486
#if defined(DEBUG_MMU)
487
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
488
#endif
489
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
490
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
491
        tlb_flush(env, 1);
492
    }
493

    
494
#ifdef TARGET_X86_64
495
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
496
        (env->efer & MSR_EFER_LME)) {
497
        /* enter in long mode */
498
        /* XXX: generate an exception */
499
        if (!(env->cr[4] & CR4_PAE_MASK))
500
            return;
501
        env->efer |= MSR_EFER_LMA;
502
        env->hflags |= HF_LMA_MASK;
503
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
504
               (env->efer & MSR_EFER_LMA)) {
505
        /* exit long mode */
506
        env->efer &= ~MSR_EFER_LMA;
507
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
508
        env->eip &= 0xffffffff;
509
    }
510
#endif
511
    env->cr[0] = new_cr0 | CR0_ET_MASK;
512
    
513
    /* update PE flag in hidden flags */
514
    pe_state = (env->cr[0] & CR0_PE_MASK);
515
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
516
    /* ensure that ADDSEG is always set in real mode */
517
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
518
    /* update FPU flags */
519
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
520
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
521
}
522

    
523
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
524
   the PDPT */
525
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
526
{
527
    env->cr[3] = new_cr3;
528
    if (env->cr[0] & CR0_PG_MASK) {
529
#if defined(DEBUG_MMU)
530
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
531
#endif
532
        tlb_flush(env, 0);
533
    }
534
}
535

    
536
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
537
{
538
#if defined(DEBUG_MMU)
539
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
540
#endif
541
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
542
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
543
        tlb_flush(env, 1);
544
    }
545
    /* SSE handling */
546
    if (!(env->cpuid_features & CPUID_SSE))
547
        new_cr4 &= ~CR4_OSFXSR_MASK;
548
    if (new_cr4 & CR4_OSFXSR_MASK)
549
        env->hflags |= HF_OSFXSR_MASK;
550
    else
551
        env->hflags &= ~HF_OSFXSR_MASK;
552

    
553
    env->cr[4] = new_cr4;
554
}
555

    
556
/* XXX: also flush 4MB pages */
557
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
558
{
559
    tlb_flush_page(env, addr);
560
}
561

    
562
#if defined(CONFIG_USER_ONLY) 
563

    
564
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
565
                             int is_write, int is_user, int is_softmmu)
566
{
567
    /* user mode only emulation */
568
    is_write &= 1;
569
    env->cr[2] = addr;
570
    env->error_code = (is_write << PG_ERROR_W_BIT);
571
    env->error_code |= PG_ERROR_U_MASK;
572
    env->exception_index = EXCP0E_PAGE;
573
    return 1;
574
}
575

    
576
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
577
{
578
    return addr;
579
}
580

    
581
#else
582

    
583
#define PHYS_ADDR_MASK 0xfffff000
584

    
585
/* return value:
586
   -1 = cannot handle fault 
587
   0  = nothing more to do 
588
   1  = generate PF fault
589
   2  = soft MMU activation required for this block
590
*/
591
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
592
                             int is_write1, int is_user, int is_softmmu)
593
{
594
    uint64_t ptep, pte;
595
    uint32_t pdpe_addr, pde_addr, pte_addr;
596
    int error_code, is_dirty, prot, page_size, ret, is_write;
597
    unsigned long paddr, page_offset;
598
    target_ulong vaddr, virt_addr;
599
    
600
#if defined(DEBUG_MMU)
601
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
602
           addr, is_write1, is_user, env->eip);
603
#endif
604
    is_write = is_write1 & 1;
605
    
606
    if (!(env->cr[0] & CR0_PG_MASK)) {
607
        pte = addr;
608
        virt_addr = addr & TARGET_PAGE_MASK;
609
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
610
        page_size = 4096;
611
        goto do_mapping;
612
    }
613

    
614
    if (env->cr[4] & CR4_PAE_MASK) {
615
        uint64_t pde, pdpe;
616

    
617
        /* XXX: we only use 32 bit physical addresses */
618
#ifdef TARGET_X86_64
619
        if (env->hflags & HF_LMA_MASK) {
620
            uint32_t pml4e_addr;
621
            uint64_t pml4e;
622
            int32_t sext;
623

    
624
            /* test virtual address sign extension */
625
            sext = (int64_t)addr >> 47;
626
            if (sext != 0 && sext != -1) {
627
                env->error_code = 0;
628
                env->exception_index = EXCP0D_GPF;
629
                return 1;
630
            }
631
            
632
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
633
                env->a20_mask;
634
            pml4e = ldq_phys(pml4e_addr);
635
            if (!(pml4e & PG_PRESENT_MASK)) {
636
                error_code = 0;
637
                goto do_fault;
638
            }
639
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
640
                error_code = PG_ERROR_RSVD_MASK;
641
                goto do_fault;
642
            }
643
            if (!(pml4e & PG_ACCESSED_MASK)) {
644
                pml4e |= PG_ACCESSED_MASK;
645
                stl_phys_notdirty(pml4e_addr, pml4e);
646
            }
647
            ptep = pml4e ^ PG_NX_MASK;
648
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & 
649
                env->a20_mask;
650
            pdpe = ldq_phys(pdpe_addr);
651
            if (!(pdpe & PG_PRESENT_MASK)) {
652
                error_code = 0;
653
                goto do_fault;
654
            }
655
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
656
                error_code = PG_ERROR_RSVD_MASK;
657
                goto do_fault;
658
            }
659
            ptep &= pdpe ^ PG_NX_MASK;
660
            if (!(pdpe & PG_ACCESSED_MASK)) {
661
                pdpe |= PG_ACCESSED_MASK;
662
                stl_phys_notdirty(pdpe_addr, pdpe);
663
            }
664
        } else
665
#endif
666
        {
667
            /* XXX: load them when cr3 is loaded ? */
668
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
669
                env->a20_mask;
670
            pdpe = ldq_phys(pdpe_addr);
671
            if (!(pdpe & PG_PRESENT_MASK)) {
672
                error_code = 0;
673
                goto do_fault;
674
            }
675
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
676
        }
677

    
678
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
679
            env->a20_mask;
680
        pde = ldq_phys(pde_addr);
681
        if (!(pde & PG_PRESENT_MASK)) {
682
            error_code = 0;
683
            goto do_fault;
684
        }
685
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
686
            error_code = PG_ERROR_RSVD_MASK;
687
            goto do_fault;
688
        }
689
        ptep &= pde ^ PG_NX_MASK;
690
        if (pde & PG_PSE_MASK) {
691
            /* 2 MB page */
692
            page_size = 2048 * 1024;
693
            ptep ^= PG_NX_MASK;
694
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
695
                goto do_fault_protect;
696
            if (is_user) {
697
                if (!(ptep & PG_USER_MASK))
698
                    goto do_fault_protect;
699
                if (is_write && !(ptep & PG_RW_MASK))
700
                    goto do_fault_protect;
701
            } else {
702
                if ((env->cr[0] & CR0_WP_MASK) && 
703
                    is_write && !(ptep & PG_RW_MASK)) 
704
                    goto do_fault_protect;
705
            }
706
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
707
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
708
                pde |= PG_ACCESSED_MASK;
709
                if (is_dirty)
710
                    pde |= PG_DIRTY_MASK;
711
                stl_phys_notdirty(pde_addr, pde);
712
            }
713
            /* align to page_size */
714
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); 
715
            virt_addr = addr & ~(page_size - 1);
716
        } else {
717
            /* 4 KB page */
718
            if (!(pde & PG_ACCESSED_MASK)) {
719
                pde |= PG_ACCESSED_MASK;
720
                stl_phys_notdirty(pde_addr, pde);
721
            }
722
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
723
                env->a20_mask;
724
            pte = ldq_phys(pte_addr);
725
            if (!(pte & PG_PRESENT_MASK)) {
726
                error_code = 0;
727
                goto do_fault;
728
            }
729
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
730
                error_code = PG_ERROR_RSVD_MASK;
731
                goto do_fault;
732
            }
733
            /* combine pde and pte nx, user and rw protections */
734
            ptep &= pte ^ PG_NX_MASK;
735
            ptep ^= PG_NX_MASK;
736
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
737
                goto do_fault_protect; 
738
            if (is_user) {
739
                if (!(ptep & PG_USER_MASK))
740
                    goto do_fault_protect;
741
                if (is_write && !(ptep & PG_RW_MASK))
742
                    goto do_fault_protect;
743
            } else {
744
                if ((env->cr[0] & CR0_WP_MASK) &&
745
                    is_write && !(ptep & PG_RW_MASK)) 
746
                    goto do_fault_protect;
747
            }
748
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
749
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
750
                pte |= PG_ACCESSED_MASK;
751
                if (is_dirty)
752
                    pte |= PG_DIRTY_MASK;
753
                stl_phys_notdirty(pte_addr, pte);
754
            }
755
            page_size = 4096;
756
            virt_addr = addr & ~0xfff;
757
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
758
        }
759
    } else {
760
        uint32_t pde;
761

    
762
        /* page directory entry */
763
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
764
            env->a20_mask;
765
        pde = ldl_phys(pde_addr);
766
        if (!(pde & PG_PRESENT_MASK)) {
767
            error_code = 0;
768
            goto do_fault;
769
        }
770
        /* if PSE bit is set, then we use a 4MB page */
771
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
772
            page_size = 4096 * 1024;
773
            if (is_user) {
774
                if (!(pde & PG_USER_MASK))
775
                    goto do_fault_protect;
776
                if (is_write && !(pde & PG_RW_MASK))
777
                    goto do_fault_protect;
778
            } else {
779
                if ((env->cr[0] & CR0_WP_MASK) && 
780
                    is_write && !(pde & PG_RW_MASK)) 
781
                    goto do_fault_protect;
782
            }
783
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
784
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
785
                pde |= PG_ACCESSED_MASK;
786
                if (is_dirty)
787
                    pde |= PG_DIRTY_MASK;
788
                stl_phys_notdirty(pde_addr, pde);
789
            }
790
        
791
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
792
            ptep = pte;
793
            virt_addr = addr & ~(page_size - 1);
794
        } else {
795
            if (!(pde & PG_ACCESSED_MASK)) {
796
                pde |= PG_ACCESSED_MASK;
797
                stl_phys_notdirty(pde_addr, pde);
798
            }
799

    
800
            /* page directory entry */
801
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
802
                env->a20_mask;
803
            pte = ldl_phys(pte_addr);
804
            if (!(pte & PG_PRESENT_MASK)) {
805
                error_code = 0;
806
                goto do_fault;
807
            }
808
            /* combine pde and pte user and rw protections */
809
            ptep = pte & pde;
810
            if (is_user) {
811
                if (!(ptep & PG_USER_MASK))
812
                    goto do_fault_protect;
813
                if (is_write && !(ptep & PG_RW_MASK))
814
                    goto do_fault_protect;
815
            } else {
816
                if ((env->cr[0] & CR0_WP_MASK) &&
817
                    is_write && !(ptep & PG_RW_MASK)) 
818
                    goto do_fault_protect;
819
            }
820
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
821
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
822
                pte |= PG_ACCESSED_MASK;
823
                if (is_dirty)
824
                    pte |= PG_DIRTY_MASK;
825
                stl_phys_notdirty(pte_addr, pte);
826
            }
827
            page_size = 4096;
828
            virt_addr = addr & ~0xfff;
829
        }
830
    }
831
    /* the page can be put in the TLB */
832
    prot = PAGE_READ;
833
    if (!(ptep & PG_NX_MASK))
834
        prot |= PAGE_EXEC;
835
    if (pte & PG_DIRTY_MASK) {
836
        /* only set write access if already dirty... otherwise wait
837
           for dirty access */
838
        if (is_user) {
839
            if (ptep & PG_RW_MASK)
840
                prot |= PAGE_WRITE;
841
        } else {
842
            if (!(env->cr[0] & CR0_WP_MASK) ||
843
                (ptep & PG_RW_MASK))
844
                prot |= PAGE_WRITE;
845
        }
846
    }
847
 do_mapping:
848
    pte = pte & env->a20_mask;
849

    
850
    /* Even if 4MB pages, we map only one 4KB page in the cache to
851
       avoid filling it too fast */
852
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
853
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
854
    vaddr = virt_addr + page_offset;
855
    
856
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
857
    return ret;
858
 do_fault_protect:
859
    error_code = PG_ERROR_P_MASK;
860
 do_fault:
861
    env->cr[2] = addr;
862
    error_code |= (is_write << PG_ERROR_W_BIT);
863
    if (is_user)
864
        error_code |= PG_ERROR_U_MASK;
865
    if (is_write1 == 2 && 
866
        (env->efer & MSR_EFER_NXE) && 
867
        (env->cr[4] & CR4_PAE_MASK))
868
        error_code |= PG_ERROR_I_D_MASK;
869
    env->error_code = error_code;
870
    env->exception_index = EXCP0E_PAGE;
871
    return 1;
872
}
873

    
874
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
875
{
876
    uint32_t pde_addr, pte_addr;
877
    uint32_t pde, pte, paddr, page_offset, page_size;
878

    
879
    if (env->cr[4] & CR4_PAE_MASK) {
880
        uint32_t pdpe_addr, pde_addr, pte_addr;
881
        uint32_t pdpe;
882

    
883
        /* XXX: we only use 32 bit physical addresses */
884
#ifdef TARGET_X86_64
885
        if (env->hflags & HF_LMA_MASK) {
886
            uint32_t pml4e_addr, pml4e;
887
            int32_t sext;
888

    
889
            /* test virtual address sign extension */
890
            sext = (int64_t)addr >> 47;
891
            if (sext != 0 && sext != -1)
892
                return -1;
893
            
894
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
895
                env->a20_mask;
896
            pml4e = ldl_phys(pml4e_addr);
897
            if (!(pml4e & PG_PRESENT_MASK))
898
                return -1;
899
            
900
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
901
                env->a20_mask;
902
            pdpe = ldl_phys(pdpe_addr);
903
            if (!(pdpe & PG_PRESENT_MASK))
904
                return -1;
905
        } else 
906
#endif
907
        {
908
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
909
                env->a20_mask;
910
            pdpe = ldl_phys(pdpe_addr);
911
            if (!(pdpe & PG_PRESENT_MASK))
912
                return -1;
913
        }
914

    
915
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
916
            env->a20_mask;
917
        pde = ldl_phys(pde_addr);
918
        if (!(pde & PG_PRESENT_MASK)) {
919
            return -1;
920
        }
921
        if (pde & PG_PSE_MASK) {
922
            /* 2 MB page */
923
            page_size = 2048 * 1024;
924
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
925
        } else {
926
            /* 4 KB page */
927
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
928
                env->a20_mask;
929
            page_size = 4096;
930
            pte = ldl_phys(pte_addr);
931
        }
932
    } else {
933
        if (!(env->cr[0] & CR0_PG_MASK)) {
934
            pte = addr;
935
            page_size = 4096;
936
        } else {
937
            /* page directory entry */
938
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
939
            pde = ldl_phys(pde_addr);
940
            if (!(pde & PG_PRESENT_MASK)) 
941
                return -1;
942
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
943
                pte = pde & ~0x003ff000; /* align to 4MB */
944
                page_size = 4096 * 1024;
945
            } else {
946
                /* page directory entry */
947
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
948
                pte = ldl_phys(pte_addr);
949
                if (!(pte & PG_PRESENT_MASK))
950
                    return -1;
951
                page_size = 4096;
952
            }
953
        }
954
        pte = pte & env->a20_mask;
955
    }
956

    
957
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
958
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
959
    return paddr;
960
}
961
#endif /* !CONFIG_USER_ONLY */
962

    
963
#if defined(USE_CODE_COPY)
964
struct fpstate {
965
    uint16_t fpuc;
966
    uint16_t dummy1;
967
    uint16_t fpus;
968
    uint16_t dummy2;
969
    uint16_t fptag;
970
    uint16_t dummy3;
971

    
972
    uint32_t fpip;
973
    uint32_t fpcs;
974
    uint32_t fpoo;
975
    uint32_t fpos;
976
    uint8_t fpregs1[8 * 10];
977
};
978

    
979
void restore_native_fp_state(CPUState *env)
980
{
981
    int fptag, i, j;
982
    struct fpstate fp1, *fp = &fp1;
983
    
984
    fp->fpuc = env->fpuc;
985
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
986
    fptag = 0;
987
    for (i=7; i>=0; i--) {
988
        fptag <<= 2;
989
        if (env->fptags[i]) {
990
            fptag |= 3;
991
        } else {
992
            /* the FPU automatically computes it */
993
        }
994
    }
995
    fp->fptag = fptag;
996
    j = env->fpstt;
997
    for(i = 0;i < 8; i++) {
998
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
999
        j = (j + 1) & 7;
1000
    }
1001
    asm volatile ("frstor %0" : "=m" (*fp));
1002
    env->native_fp_regs = 1;
1003
}
1004
 
1005
void save_native_fp_state(CPUState *env)
1006
{
1007
    int fptag, i, j;
1008
    uint16_t fpuc;
1009
    struct fpstate fp1, *fp = &fp1;
1010

    
1011
    asm volatile ("fsave %0" : : "m" (*fp));
1012
    env->fpuc = fp->fpuc;
1013
    env->fpstt = (fp->fpus >> 11) & 7;
1014
    env->fpus = fp->fpus & ~0x3800;
1015
    fptag = fp->fptag;
1016
    for(i = 0;i < 8; i++) {
1017
        env->fptags[i] = ((fptag & 3) == 3);
1018
        fptag >>= 2;
1019
    }
1020
    j = env->fpstt;
1021
    for(i = 0;i < 8; i++) {
1022
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1023
        j = (j + 1) & 7;
1024
    }
1025
    /* we must restore the default rounding state */
1026
    /* XXX: we do not restore the exception state */
1027
    fpuc = 0x037f | (env->fpuc & (3 << 10));
1028
    asm volatile("fldcw %0" : : "m" (fpuc));
1029
    env->native_fp_regs = 0;
1030
}
1031
#endif