Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 173d6cfe

History | View | Annotate | Download (28.8 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39

    
40
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41
#define modify_ldt_ldt_s user_desc
42
#endif
43
#endif /* USE_CODE_COPY */
44

    
45
CPUX86State *cpu_x86_init(void)
46
{
47
    CPUX86State *env;
48
    static int inited;
49

    
50
    env = qemu_mallocz(sizeof(CPUX86State));
51
    if (!env)
52
        return NULL;
53
    cpu_exec_init(env);
54

    
55
    /* init various static tables */
56
    if (!inited) {
57
        inited = 1;
58
        optimize_flags_init();
59
    }
60
#ifdef USE_CODE_COPY
61
    /* testing code for code copy case */
62
    {
63
        struct modify_ldt_ldt_s ldt;
64

    
65
        ldt.entry_number = 1;
66
        ldt.base_addr = (unsigned long)env;
67
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
68
        ldt.seg_32bit = 1;
69
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
70
        ldt.read_exec_only = 0;
71
        ldt.limit_in_pages = 1;
72
        ldt.seg_not_present = 0;
73
        ldt.useable = 1;
74
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
75
        
76
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
77
    }
78
#endif
79
    {
80
        int family, model, stepping;
81
#ifdef TARGET_X86_64
82
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
83
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
84
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
85
        family = 6;
86
        model = 2;
87
        stepping = 3;
88
#else
89
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
90
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
91
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
92
#if 0
93
        /* pentium 75-200 */
94
        family = 5;
95
        model = 2;
96
        stepping = 11;
97
#else
98
        /* pentium pro */
99
        family = 6;
100
        model = 3;
101
        stepping = 3;
102
#endif
103
#endif
104
        env->cpuid_level = 2;
105
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
106
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
108
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
109
                               CPUID_PAT);
110
        env->pat = 0x0007040600070406ULL;
111
        env->cpuid_ext_features = 0;
112
        env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
113
        env->cpuid_xlevel = 0;
114
        {
115
            const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
116
            int c, len, i;
117
            len = strlen(model_id);
118
            for(i = 0; i < 48; i++) {
119
                if (i >= len)
120
                    c = '\0';
121
                else
122
                    c = model_id[i];
123
                env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
124
            }
125
        }
126
#ifdef TARGET_X86_64
127
        /* currently not enabled for std i386 because not fully tested */
128
        env->cpuid_features |= CPUID_APIC;
129
        env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
130
        env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL;
131
        env->cpuid_xlevel = 0x80000008;
132

    
133
        /* these features are needed for Win64 and aren't fully implemented */
134
        env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
135
#endif
136
    }
137
    cpu_reset(env);
138
#ifdef USE_KQEMU
139
    kqemu_init(env);
140
#endif
141
    return env;
142
}
143

    
144
/* NOTE: must be called outside the CPU execute loop */
145
void cpu_reset(CPUX86State *env)
146
{
147
    int i;
148

    
149
    memset(env, 0, offsetof(CPUX86State, breakpoints));
150

    
151
    tlb_flush(env, 1);
152

    
153
    /* init to reset state */
154

    
155
#ifdef CONFIG_SOFTMMU
156
    env->hflags |= HF_SOFTMMU_MASK;
157
#endif
158

    
159
    cpu_x86_update_cr0(env, 0x60000010);
160
    env->a20_mask = 0xffffffff;
161
    
162
    env->idt.limit = 0xffff;
163
    env->gdt.limit = 0xffff;
164
    env->ldt.limit = 0xffff;
165
    env->ldt.flags = DESC_P_MASK;
166
    env->tr.limit = 0xffff;
167
    env->tr.flags = DESC_P_MASK;
168
    
169
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
170
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
171
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
172
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
173
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
174
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
175
    
176
    env->eip = 0xfff0;
177
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
178
    
179
    env->eflags = 0x2;
180
    
181
    /* FPU init */
182
    for(i = 0;i < 8; i++)
183
        env->fptags[i] = 1;
184
    env->fpuc = 0x37f;
185

    
186
    env->mxcsr = 0x1f80;
187
}
188

    
189
void cpu_x86_close(CPUX86State *env)
190
{
191
    free(env);
192
}
193

    
194
/***********************************************************/
195
/* x86 debug */
196

    
197
static const char *cc_op_str[] = {
198
    "DYNAMIC",
199
    "EFLAGS",
200

    
201
    "MULB",
202
    "MULW",
203
    "MULL",
204
    "MULQ",
205

    
206
    "ADDB",
207
    "ADDW",
208
    "ADDL",
209
    "ADDQ",
210

    
211
    "ADCB",
212
    "ADCW",
213
    "ADCL",
214
    "ADCQ",
215

    
216
    "SUBB",
217
    "SUBW",
218
    "SUBL",
219
    "SUBQ",
220

    
221
    "SBBB",
222
    "SBBW",
223
    "SBBL",
224
    "SBBQ",
225

    
226
    "LOGICB",
227
    "LOGICW",
228
    "LOGICL",
229
    "LOGICQ",
230

    
231
    "INCB",
232
    "INCW",
233
    "INCL",
234
    "INCQ",
235

    
236
    "DECB",
237
    "DECW",
238
    "DECL",
239
    "DECQ",
240

    
241
    "SHLB",
242
    "SHLW",
243
    "SHLL",
244
    "SHLQ",
245

    
246
    "SARB",
247
    "SARW",
248
    "SARL",
249
    "SARQ",
250
};
251

    
252
void cpu_dump_state(CPUState *env, FILE *f, 
253
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
254
                    int flags)
255
{
256
    int eflags, i, nb;
257
    char cc_op_name[32];
258
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
259

    
260
    eflags = env->eflags;
261
#ifdef TARGET_X86_64
262
    if (env->hflags & HF_CS64_MASK) {
263
        cpu_fprintf(f, 
264
                    "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
265
                    "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
266
                    "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
267
                    "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
268
                    "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
269
                    env->regs[R_EAX], 
270
                    env->regs[R_EBX], 
271
                    env->regs[R_ECX], 
272
                    env->regs[R_EDX], 
273
                    env->regs[R_ESI], 
274
                    env->regs[R_EDI], 
275
                    env->regs[R_EBP], 
276
                    env->regs[R_ESP], 
277
                    env->regs[8], 
278
                    env->regs[9], 
279
                    env->regs[10], 
280
                    env->regs[11], 
281
                    env->regs[12], 
282
                    env->regs[13], 
283
                    env->regs[14], 
284
                    env->regs[15], 
285
                    env->eip, eflags,
286
                    eflags & DF_MASK ? 'D' : '-',
287
                    eflags & CC_O ? 'O' : '-',
288
                    eflags & CC_S ? 'S' : '-',
289
                    eflags & CC_Z ? 'Z' : '-',
290
                    eflags & CC_A ? 'A' : '-',
291
                    eflags & CC_P ? 'P' : '-',
292
                    eflags & CC_C ? 'C' : '-',
293
                    env->hflags & HF_CPL_MASK, 
294
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
295
                    (env->a20_mask >> 20) & 1);
296
    } else 
297
#endif
298
    {
299
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
300
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
301
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
302
                    (uint32_t)env->regs[R_EAX], 
303
                    (uint32_t)env->regs[R_EBX], 
304
                    (uint32_t)env->regs[R_ECX], 
305
                    (uint32_t)env->regs[R_EDX], 
306
                    (uint32_t)env->regs[R_ESI], 
307
                    (uint32_t)env->regs[R_EDI], 
308
                    (uint32_t)env->regs[R_EBP], 
309
                    (uint32_t)env->regs[R_ESP], 
310
                    (uint32_t)env->eip, eflags,
311
                    eflags & DF_MASK ? 'D' : '-',
312
                    eflags & CC_O ? 'O' : '-',
313
                    eflags & CC_S ? 'S' : '-',
314
                    eflags & CC_Z ? 'Z' : '-',
315
                    eflags & CC_A ? 'A' : '-',
316
                    eflags & CC_P ? 'P' : '-',
317
                    eflags & CC_C ? 'C' : '-',
318
                    env->hflags & HF_CPL_MASK, 
319
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
320
                    (env->a20_mask >> 20) & 1);
321
    }
322

    
323
#ifdef TARGET_X86_64
324
    if (env->hflags & HF_LMA_MASK) {
325
        for(i = 0; i < 6; i++) {
326
            SegmentCache *sc = &env->segs[i];
327
            cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
328
                        seg_name[i],
329
                        sc->selector,
330
                        sc->base,
331
                        sc->limit,
332
                        sc->flags);
333
        }
334
        cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
335
                    env->ldt.selector,
336
                    env->ldt.base,
337
                    env->ldt.limit,
338
                    env->ldt.flags);
339
        cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
340
                    env->tr.selector,
341
                    env->tr.base,
342
                    env->tr.limit,
343
                    env->tr.flags);
344
        cpu_fprintf(f, "GDT=     %016llx %08x\n",
345
                    env->gdt.base, env->gdt.limit);
346
        cpu_fprintf(f, "IDT=     %016llx %08x\n",
347
                    env->idt.base, env->idt.limit);
348
        cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
349
                    (uint32_t)env->cr[0], 
350
                    env->cr[2], 
351
                    env->cr[3], 
352
                    (uint32_t)env->cr[4]);
353
    } else
354
#endif
355
    {
356
        for(i = 0; i < 6; i++) {
357
            SegmentCache *sc = &env->segs[i];
358
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
359
                        seg_name[i],
360
                        sc->selector,
361
                        (uint32_t)sc->base,
362
                        sc->limit,
363
                        sc->flags);
364
        }
365
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
366
                    env->ldt.selector,
367
                    (uint32_t)env->ldt.base,
368
                    env->ldt.limit,
369
                    env->ldt.flags);
370
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
371
                    env->tr.selector,
372
                    (uint32_t)env->tr.base,
373
                    env->tr.limit,
374
                    env->tr.flags);
375
        cpu_fprintf(f, "GDT=     %08x %08x\n",
376
                    (uint32_t)env->gdt.base, env->gdt.limit);
377
        cpu_fprintf(f, "IDT=     %08x %08x\n",
378
                    (uint32_t)env->idt.base, env->idt.limit);
379
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
380
                    (uint32_t)env->cr[0], 
381
                    (uint32_t)env->cr[2], 
382
                    (uint32_t)env->cr[3], 
383
                    (uint32_t)env->cr[4]);
384
    }
385
    if (flags & X86_DUMP_CCOP) {
386
        if ((unsigned)env->cc_op < CC_OP_NB)
387
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
388
        else
389
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
390
#ifdef TARGET_X86_64
391
        if (env->hflags & HF_CS64_MASK) {
392
            cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
393
                        env->cc_src, env->cc_dst, 
394
                        cc_op_name);
395
        } else 
396
#endif
397
        {
398
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
399
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
400
                        cc_op_name);
401
        }
402
    }
403
    if (flags & X86_DUMP_FPU) {
404
        int fptag;
405
        fptag = 0;
406
        for(i = 0; i < 8; i++) {
407
            fptag |= ((!env->fptags[i]) << i);
408
        }
409
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
410
                    env->fpuc,
411
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
412
                    env->fpstt,
413
                    fptag,
414
                    env->mxcsr);
415
        for(i=0;i<8;i++) {
416
#if defined(USE_X86LDOUBLE)
417
            union {
418
                long double d;
419
                struct {
420
                    uint64_t lower;
421
                    uint16_t upper;
422
                } l;
423
            } tmp;
424
            tmp.d = env->fpregs[i].d;
425
            cpu_fprintf(f, "FPR%d=%016llx %04x",
426
                        i, tmp.l.lower, tmp.l.upper);
427
#else
428
            cpu_fprintf(f, "FPR%d=%016llx",
429
                        i, env->fpregs[i].mmx.q);
430
#endif
431
            if ((i & 1) == 1)
432
                cpu_fprintf(f, "\n");
433
            else
434
                cpu_fprintf(f, " ");
435
        }
436
        if (env->hflags & HF_CS64_MASK) 
437
            nb = 16;
438
        else
439
            nb = 8;
440
        for(i=0;i<nb;i++) {
441
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
442
                        i, 
443
                        env->xmm_regs[i].XMM_L(3),
444
                        env->xmm_regs[i].XMM_L(2),
445
                        env->xmm_regs[i].XMM_L(1),
446
                        env->xmm_regs[i].XMM_L(0));
447
            if ((i & 1) == 1)
448
                cpu_fprintf(f, "\n");
449
            else
450
                cpu_fprintf(f, " ");
451
        }
452
    }
453
}
454

    
455
/***********************************************************/
456
/* x86 mmu */
457
/* XXX: add PGE support */
458

    
459
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
460
{
461
    a20_state = (a20_state != 0);
462
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
463
#if defined(DEBUG_MMU)
464
        printf("A20 update: a20=%d\n", a20_state);
465
#endif
466
        /* if the cpu is currently executing code, we must unlink it and
467
           all the potentially executing TB */
468
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
469

    
470
        /* when a20 is changed, all the MMU mappings are invalid, so
471
           we must flush everything */
472
        tlb_flush(env, 1);
473
        env->a20_mask = 0xffefffff | (a20_state << 20);
474
    }
475
}
476

    
477
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
478
{
479
    int pe_state;
480

    
481
#if defined(DEBUG_MMU)
482
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
483
#endif
484
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
485
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
486
        tlb_flush(env, 1);
487
    }
488

    
489
#ifdef TARGET_X86_64
490
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
491
        (env->efer & MSR_EFER_LME)) {
492
        /* enter in long mode */
493
        /* XXX: generate an exception */
494
        if (!(env->cr[4] & CR4_PAE_MASK))
495
            return;
496
        env->efer |= MSR_EFER_LMA;
497
        env->hflags |= HF_LMA_MASK;
498
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
499
               (env->efer & MSR_EFER_LMA)) {
500
        /* exit long mode */
501
        env->efer &= ~MSR_EFER_LMA;
502
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
503
        env->eip &= 0xffffffff;
504
    }
505
#endif
506
    env->cr[0] = new_cr0 | CR0_ET_MASK;
507
    
508
    /* update PE flag in hidden flags */
509
    pe_state = (env->cr[0] & CR0_PE_MASK);
510
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
511
    /* ensure that ADDSEG is always set in real mode */
512
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
513
    /* update FPU flags */
514
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
515
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
516
}
517

    
518
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
519
   the PDPT */
520
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
521
{
522
    env->cr[3] = new_cr3;
523
    if (env->cr[0] & CR0_PG_MASK) {
524
#if defined(DEBUG_MMU)
525
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
526
#endif
527
        tlb_flush(env, 0);
528
    }
529
}
530

    
531
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
532
{
533
#if defined(DEBUG_MMU)
534
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
535
#endif
536
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
537
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
538
        tlb_flush(env, 1);
539
    }
540
    /* SSE handling */
541
    if (!(env->cpuid_features & CPUID_SSE))
542
        new_cr4 &= ~CR4_OSFXSR_MASK;
543
    if (new_cr4 & CR4_OSFXSR_MASK)
544
        env->hflags |= HF_OSFXSR_MASK;
545
    else
546
        env->hflags &= ~HF_OSFXSR_MASK;
547

    
548
    env->cr[4] = new_cr4;
549
}
550

    
551
/* XXX: also flush 4MB pages */
552
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
553
{
554
    tlb_flush_page(env, addr);
555
}
556

    
557
#if defined(CONFIG_USER_ONLY) 
558

    
559
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
560
                             int is_write, int is_user, int is_softmmu)
561
{
562
    /* user mode only emulation */
563
    is_write &= 1;
564
    env->cr[2] = addr;
565
    env->error_code = (is_write << PG_ERROR_W_BIT);
566
    env->error_code |= PG_ERROR_U_MASK;
567
    return 1;
568
}
569

    
570
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
571
{
572
    return addr;
573
}
574

    
575
#else
576

    
577
/* return value:
578
   -1 = cannot handle fault 
579
   0  = nothing more to do 
580
   1  = generate PF fault
581
   2  = soft MMU activation required for this block
582
*/
583
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
584
                             int is_write, int is_user, int is_softmmu)
585
{
586
    uint32_t pdpe_addr, pde_addr, pte_addr;
587
    uint32_t pde, pte, ptep, pdpe;
588
    int error_code, is_dirty, prot, page_size, ret;
589
    unsigned long paddr, page_offset;
590
    target_ulong vaddr, virt_addr;
591
    
592
#if defined(DEBUG_MMU)
593
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
594
           addr, is_write, is_user, env->eip);
595
#endif
596
    is_write &= 1;
597
    
598
    if (!(env->cr[0] & CR0_PG_MASK)) {
599
        pte = addr;
600
        virt_addr = addr & TARGET_PAGE_MASK;
601
        prot = PAGE_READ | PAGE_WRITE;
602
        page_size = 4096;
603
        goto do_mapping;
604
    }
605

    
606
    if (env->cr[4] & CR4_PAE_MASK) {
607
        /* XXX: we only use 32 bit physical addresses */
608
#ifdef TARGET_X86_64
609
        if (env->hflags & HF_LMA_MASK) {
610
            uint32_t pml4e_addr, pml4e;
611
            int32_t sext;
612

    
613
            /* XXX: handle user + rw rights */
614
            /* XXX: handle NX flag */
615
            /* test virtual address sign extension */
616
            sext = (int64_t)addr >> 47;
617
            if (sext != 0 && sext != -1) {
618
                error_code = 0;
619
                goto do_fault;
620
            }
621
            
622
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
623
                env->a20_mask;
624
            pml4e = ldl_phys(pml4e_addr);
625
            if (!(pml4e & PG_PRESENT_MASK)) {
626
                error_code = 0;
627
                goto do_fault;
628
            }
629
            if (!(pml4e & PG_ACCESSED_MASK)) {
630
                pml4e |= PG_ACCESSED_MASK;
631
                stl_phys_notdirty(pml4e_addr, pml4e);
632
            }
633
            
634
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
635
                env->a20_mask;
636
            pdpe = ldl_phys(pdpe_addr);
637
            if (!(pdpe & PG_PRESENT_MASK)) {
638
                error_code = 0;
639
                goto do_fault;
640
            }
641
            if (!(pdpe & PG_ACCESSED_MASK)) {
642
                pdpe |= PG_ACCESSED_MASK;
643
                stl_phys_notdirty(pdpe_addr, pdpe);
644
            }
645
        } else 
646
#endif
647
        {
648
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
649
                env->a20_mask;
650
            pdpe = ldl_phys(pdpe_addr);
651
            if (!(pdpe & PG_PRESENT_MASK)) {
652
                error_code = 0;
653
                goto do_fault;
654
            }
655
        }
656

    
657
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
658
            env->a20_mask;
659
        pde = ldl_phys(pde_addr);
660
        if (!(pde & PG_PRESENT_MASK)) {
661
            error_code = 0;
662
            goto do_fault;
663
        }
664
        if (pde & PG_PSE_MASK) {
665
            /* 2 MB page */
666
            page_size = 2048 * 1024;
667
            goto handle_big_page;
668
        } else {
669
            /* 4 KB page */
670
            if (!(pde & PG_ACCESSED_MASK)) {
671
                pde |= PG_ACCESSED_MASK;
672
                stl_phys_notdirty(pde_addr, pde);
673
            }
674
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
675
                env->a20_mask;
676
            goto handle_4k_page;
677
        }
678
    } else {
679
        /* page directory entry */
680
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
681
            env->a20_mask;
682
        pde = ldl_phys(pde_addr);
683
        if (!(pde & PG_PRESENT_MASK)) {
684
            error_code = 0;
685
            goto do_fault;
686
        }
687
        /* if PSE bit is set, then we use a 4MB page */
688
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
689
            page_size = 4096 * 1024;
690
        handle_big_page:
691
            if (is_user) {
692
                if (!(pde & PG_USER_MASK))
693
                    goto do_fault_protect;
694
                if (is_write && !(pde & PG_RW_MASK))
695
                    goto do_fault_protect;
696
            } else {
697
                if ((env->cr[0] & CR0_WP_MASK) && 
698
                    is_write && !(pde & PG_RW_MASK)) 
699
                    goto do_fault_protect;
700
            }
701
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
702
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
703
                pde |= PG_ACCESSED_MASK;
704
                if (is_dirty)
705
                    pde |= PG_DIRTY_MASK;
706
                stl_phys_notdirty(pde_addr, pde);
707
            }
708
        
709
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
710
            ptep = pte;
711
            virt_addr = addr & ~(page_size - 1);
712
        } else {
713
            if (!(pde & PG_ACCESSED_MASK)) {
714
                pde |= PG_ACCESSED_MASK;
715
                stl_phys_notdirty(pde_addr, pde);
716
            }
717

    
718
            /* page directory entry */
719
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
720
                env->a20_mask;
721
        handle_4k_page:
722
            pte = ldl_phys(pte_addr);
723
            if (!(pte & PG_PRESENT_MASK)) {
724
                error_code = 0;
725
                goto do_fault;
726
            }
727
            /* combine pde and pte user and rw protections */
728
            ptep = pte & pde;
729
            if (is_user) {
730
                if (!(ptep & PG_USER_MASK))
731
                    goto do_fault_protect;
732
                if (is_write && !(ptep & PG_RW_MASK))
733
                    goto do_fault_protect;
734
            } else {
735
                if ((env->cr[0] & CR0_WP_MASK) &&
736
                    is_write && !(ptep & PG_RW_MASK)) 
737
                    goto do_fault_protect;
738
            }
739
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
740
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
741
                pte |= PG_ACCESSED_MASK;
742
                if (is_dirty)
743
                    pte |= PG_DIRTY_MASK;
744
                stl_phys_notdirty(pte_addr, pte);
745
            }
746
            page_size = 4096;
747
            virt_addr = addr & ~0xfff;
748
        }
749

    
750
        /* the page can be put in the TLB */
751
        prot = PAGE_READ;
752
        if (pte & PG_DIRTY_MASK) {
753
            /* only set write access if already dirty... otherwise wait
754
               for dirty access */
755
            if (is_user) {
756
                if (ptep & PG_RW_MASK)
757
                    prot |= PAGE_WRITE;
758
            } else {
759
                if (!(env->cr[0] & CR0_WP_MASK) ||
760
                    (ptep & PG_RW_MASK))
761
                    prot |= PAGE_WRITE;
762
            }
763
        }
764
    }
765
 do_mapping:
766
    pte = pte & env->a20_mask;
767

    
768
    /* Even if 4MB pages, we map only one 4KB page in the cache to
769
       avoid filling it too fast */
770
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
771
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
772
    vaddr = virt_addr + page_offset;
773
    
774
    ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
775
    return ret;
776
 do_fault_protect:
777
    error_code = PG_ERROR_P_MASK;
778
 do_fault:
779
    env->cr[2] = addr;
780
    env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
781
    if (is_user)
782
        env->error_code |= PG_ERROR_U_MASK;
783
    return 1;
784
}
785

    
786
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
787
{
788
    uint32_t pde_addr, pte_addr;
789
    uint32_t pde, pte, paddr, page_offset, page_size;
790

    
791
    if (env->cr[4] & CR4_PAE_MASK) {
792
        uint32_t pdpe_addr, pde_addr, pte_addr;
793
        uint32_t pdpe;
794

    
795
        /* XXX: we only use 32 bit physical addresses */
796
#ifdef TARGET_X86_64
797
        if (env->hflags & HF_LMA_MASK) {
798
            uint32_t pml4e_addr, pml4e;
799
            int32_t sext;
800

    
801
            /* test virtual address sign extension */
802
            sext = (int64_t)addr >> 47;
803
            if (sext != 0 && sext != -1)
804
                return -1;
805
            
806
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
807
                env->a20_mask;
808
            pml4e = ldl_phys(pml4e_addr);
809
            if (!(pml4e & PG_PRESENT_MASK))
810
                return -1;
811
            
812
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
813
                env->a20_mask;
814
            pdpe = ldl_phys(pdpe_addr);
815
            if (!(pdpe & PG_PRESENT_MASK))
816
                return -1;
817
        } else 
818
#endif
819
        {
820
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
821
                env->a20_mask;
822
            pdpe = ldl_phys(pdpe_addr);
823
            if (!(pdpe & PG_PRESENT_MASK))
824
                return -1;
825
        }
826

    
827
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
828
            env->a20_mask;
829
        pde = ldl_phys(pde_addr);
830
        if (!(pde & PG_PRESENT_MASK)) {
831
            return -1;
832
        }
833
        if (pde & PG_PSE_MASK) {
834
            /* 2 MB page */
835
            page_size = 2048 * 1024;
836
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
837
        } else {
838
            /* 4 KB page */
839
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
840
                env->a20_mask;
841
            page_size = 4096;
842
            pte = ldl_phys(pte_addr);
843
        }
844
    } else {
845
        if (!(env->cr[0] & CR0_PG_MASK)) {
846
            pte = addr;
847
            page_size = 4096;
848
        } else {
849
            /* page directory entry */
850
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
851
            pde = ldl_phys(pde_addr);
852
            if (!(pde & PG_PRESENT_MASK)) 
853
                return -1;
854
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
855
                pte = pde & ~0x003ff000; /* align to 4MB */
856
                page_size = 4096 * 1024;
857
            } else {
858
                /* page directory entry */
859
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
860
                pte = ldl_phys(pte_addr);
861
                if (!(pte & PG_PRESENT_MASK))
862
                    return -1;
863
                page_size = 4096;
864
            }
865
        }
866
        pte = pte & env->a20_mask;
867
    }
868

    
869
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
870
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
871
    return paddr;
872
}
873
#endif /* !CONFIG_USER_ONLY */
874

    
875
#if defined(USE_CODE_COPY)
876
struct fpstate {
877
    uint16_t fpuc;
878
    uint16_t dummy1;
879
    uint16_t fpus;
880
    uint16_t dummy2;
881
    uint16_t fptag;
882
    uint16_t dummy3;
883

    
884
    uint32_t fpip;
885
    uint32_t fpcs;
886
    uint32_t fpoo;
887
    uint32_t fpos;
888
    uint8_t fpregs1[8 * 10];
889
};
890

    
891
void restore_native_fp_state(CPUState *env)
892
{
893
    int fptag, i, j;
894
    struct fpstate fp1, *fp = &fp1;
895
    
896
    fp->fpuc = env->fpuc;
897
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
898
    fptag = 0;
899
    for (i=7; i>=0; i--) {
900
        fptag <<= 2;
901
        if (env->fptags[i]) {
902
            fptag |= 3;
903
        } else {
904
            /* the FPU automatically computes it */
905
        }
906
    }
907
    fp->fptag = fptag;
908
    j = env->fpstt;
909
    for(i = 0;i < 8; i++) {
910
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
911
        j = (j + 1) & 7;
912
    }
913
    asm volatile ("frstor %0" : "=m" (*fp));
914
    env->native_fp_regs = 1;
915
}
916
 
917
void save_native_fp_state(CPUState *env)
918
{
919
    int fptag, i, j;
920
    uint16_t fpuc;
921
    struct fpstate fp1, *fp = &fp1;
922

    
923
    asm volatile ("fsave %0" : : "m" (*fp));
924
    env->fpuc = fp->fpuc;
925
    env->fpstt = (fp->fpus >> 11) & 7;
926
    env->fpus = fp->fpus & ~0x3800;
927
    fptag = fp->fptag;
928
    for(i = 0;i < 8; i++) {
929
        env->fptags[i] = ((fptag & 3) == 3);
930
        fptag >>= 2;
931
    }
932
    j = env->fpstt;
933
    for(i = 0;i < 8; i++) {
934
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
935
        j = (j + 1) & 7;
936
    }
937
    /* we must restore the default rounding state */
938
    /* XXX: we do not restore the exception state */
939
    fpuc = 0x037f | (env->fpuc & (3 << 10));
940
    asm volatile("fldcw %0" : : "m" (fpuc));
941
    env->native_fp_regs = 0;
942
}
943
#endif