Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 8f091a59

History | View | Annotate | Download (28.8 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39

    
40
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41
#define modify_ldt_ldt_s user_desc
42
#endif
43
#endif /* USE_CODE_COPY */
44

    
45
CPUX86State *cpu_x86_init(void)
46
{
47
    CPUX86State *env;
48
    static int inited;
49

    
50
    cpu_exec_init();
51

    
52
    env = malloc(sizeof(CPUX86State));
53
    if (!env)
54
        return NULL;
55
    memset(env, 0, sizeof(CPUX86State));
56
    /* init various static tables */
57
    if (!inited) {
58
        inited = 1;
59
        optimize_flags_init();
60
    }
61
#ifdef USE_CODE_COPY
62
    /* testing code for code copy case */
63
    {
64
        struct modify_ldt_ldt_s ldt;
65

    
66
        ldt.entry_number = 1;
67
        ldt.base_addr = (unsigned long)env;
68
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69
        ldt.seg_32bit = 1;
70
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71
        ldt.read_exec_only = 0;
72
        ldt.limit_in_pages = 1;
73
        ldt.seg_not_present = 0;
74
        ldt.useable = 1;
75
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76
        
77
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78
    }
79
#endif
80
    {
81
        int family, model, stepping;
82
#ifdef TARGET_X86_64
83
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86
        family = 6;
87
        model = 2;
88
        stepping = 3;
89
#else
90
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93
#if 0
94
        /* pentium 75-200 */
95
        family = 5;
96
        model = 2;
97
        stepping = 11;
98
#else
99
        /* pentium pro */
100
        family = 6;
101
        model = 3;
102
        stepping = 3;
103
#endif
104
#endif
105
        env->cpuid_level = 2;
106
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
107
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
108
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
109
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
110
                               CPUID_PAT);
111
        env->pat = 0x0007040600070406ULL;
112
        env->cpuid_ext_features = 0;
113
        env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
114
        env->cpuid_xlevel = 0;
115
        {
116
            const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
117
            int c, len, i;
118
            len = strlen(model_id);
119
            for(i = 0; i < 48; i++) {
120
                if (i >= len)
121
                    c = '\0';
122
                else
123
                    c = model_id[i];
124
                env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
125
            }
126
        }
127
#ifdef TARGET_X86_64
128
        /* currently not enabled for std i386 because not fully tested */
129
        env->cpuid_features |= CPUID_APIC;
130
        env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
131
        env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL;
132
        env->cpuid_xlevel = 0x80000008;
133

    
134
        /* these features are needed for Win64 and aren't fully implemented */
135
        env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
136
#endif
137
    }
138
    cpu_single_env = env;
139
    cpu_reset(env);
140
#ifdef USE_KQEMU
141
    kqemu_init(env);
142
#endif
143
    return env;
144
}
145

    
146
/* NOTE: must be called outside the CPU execute loop */
147
void cpu_reset(CPUX86State *env)
148
{
149
    int i;
150

    
151
    memset(env, 0, offsetof(CPUX86State, breakpoints));
152

    
153
    tlb_flush(env, 1);
154

    
155
    /* init to reset state */
156

    
157
#ifdef CONFIG_SOFTMMU
158
    env->hflags |= HF_SOFTMMU_MASK;
159
#endif
160

    
161
    cpu_x86_update_cr0(env, 0x60000010);
162
    env->a20_mask = 0xffffffff;
163
    
164
    env->idt.limit = 0xffff;
165
    env->gdt.limit = 0xffff;
166
    env->ldt.limit = 0xffff;
167
    env->ldt.flags = DESC_P_MASK;
168
    env->tr.limit = 0xffff;
169
    env->tr.flags = DESC_P_MASK;
170
    
171
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
172
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
173
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
174
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
175
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
176
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
177
    
178
    env->eip = 0xfff0;
179
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
180
    
181
    env->eflags = 0x2;
182
    
183
    /* FPU init */
184
    for(i = 0;i < 8; i++)
185
        env->fptags[i] = 1;
186
    env->fpuc = 0x37f;
187

    
188
    env->mxcsr = 0x1f80;
189
}
190

    
191
void cpu_x86_close(CPUX86State *env)
192
{
193
    free(env);
194
}
195

    
196
/***********************************************************/
197
/* x86 debug */
198

    
199
static const char *cc_op_str[] = {
200
    "DYNAMIC",
201
    "EFLAGS",
202

    
203
    "MULB",
204
    "MULW",
205
    "MULL",
206
    "MULQ",
207

    
208
    "ADDB",
209
    "ADDW",
210
    "ADDL",
211
    "ADDQ",
212

    
213
    "ADCB",
214
    "ADCW",
215
    "ADCL",
216
    "ADCQ",
217

    
218
    "SUBB",
219
    "SUBW",
220
    "SUBL",
221
    "SUBQ",
222

    
223
    "SBBB",
224
    "SBBW",
225
    "SBBL",
226
    "SBBQ",
227

    
228
    "LOGICB",
229
    "LOGICW",
230
    "LOGICL",
231
    "LOGICQ",
232

    
233
    "INCB",
234
    "INCW",
235
    "INCL",
236
    "INCQ",
237

    
238
    "DECB",
239
    "DECW",
240
    "DECL",
241
    "DECQ",
242

    
243
    "SHLB",
244
    "SHLW",
245
    "SHLL",
246
    "SHLQ",
247

    
248
    "SARB",
249
    "SARW",
250
    "SARL",
251
    "SARQ",
252
};
253

    
254
void cpu_dump_state(CPUState *env, FILE *f, 
255
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
256
                    int flags)
257
{
258
    int eflags, i, nb;
259
    char cc_op_name[32];
260
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
261

    
262
    eflags = env->eflags;
263
#ifdef TARGET_X86_64
264
    if (env->hflags & HF_CS64_MASK) {
265
        cpu_fprintf(f, 
266
                    "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
267
                    "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
268
                    "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
269
                    "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
270
                    "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
271
                    env->regs[R_EAX], 
272
                    env->regs[R_EBX], 
273
                    env->regs[R_ECX], 
274
                    env->regs[R_EDX], 
275
                    env->regs[R_ESI], 
276
                    env->regs[R_EDI], 
277
                    env->regs[R_EBP], 
278
                    env->regs[R_ESP], 
279
                    env->regs[8], 
280
                    env->regs[9], 
281
                    env->regs[10], 
282
                    env->regs[11], 
283
                    env->regs[12], 
284
                    env->regs[13], 
285
                    env->regs[14], 
286
                    env->regs[15], 
287
                    env->eip, eflags,
288
                    eflags & DF_MASK ? 'D' : '-',
289
                    eflags & CC_O ? 'O' : '-',
290
                    eflags & CC_S ? 'S' : '-',
291
                    eflags & CC_Z ? 'Z' : '-',
292
                    eflags & CC_A ? 'A' : '-',
293
                    eflags & CC_P ? 'P' : '-',
294
                    eflags & CC_C ? 'C' : '-',
295
                    env->hflags & HF_CPL_MASK, 
296
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
297
                    (env->a20_mask >> 20) & 1);
298
    } else 
299
#endif
300
    {
301
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
302
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
303
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
304
                    (uint32_t)env->regs[R_EAX], 
305
                    (uint32_t)env->regs[R_EBX], 
306
                    (uint32_t)env->regs[R_ECX], 
307
                    (uint32_t)env->regs[R_EDX], 
308
                    (uint32_t)env->regs[R_ESI], 
309
                    (uint32_t)env->regs[R_EDI], 
310
                    (uint32_t)env->regs[R_EBP], 
311
                    (uint32_t)env->regs[R_ESP], 
312
                    (uint32_t)env->eip, eflags,
313
                    eflags & DF_MASK ? 'D' : '-',
314
                    eflags & CC_O ? 'O' : '-',
315
                    eflags & CC_S ? 'S' : '-',
316
                    eflags & CC_Z ? 'Z' : '-',
317
                    eflags & CC_A ? 'A' : '-',
318
                    eflags & CC_P ? 'P' : '-',
319
                    eflags & CC_C ? 'C' : '-',
320
                    env->hflags & HF_CPL_MASK, 
321
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
322
                    (env->a20_mask >> 20) & 1);
323
    }
324

    
325
#ifdef TARGET_X86_64
326
    if (env->hflags & HF_LMA_MASK) {
327
        for(i = 0; i < 6; i++) {
328
            SegmentCache *sc = &env->segs[i];
329
            cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
330
                        seg_name[i],
331
                        sc->selector,
332
                        sc->base,
333
                        sc->limit,
334
                        sc->flags);
335
        }
336
        cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
337
                    env->ldt.selector,
338
                    env->ldt.base,
339
                    env->ldt.limit,
340
                    env->ldt.flags);
341
        cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
342
                    env->tr.selector,
343
                    env->tr.base,
344
                    env->tr.limit,
345
                    env->tr.flags);
346
        cpu_fprintf(f, "GDT=     %016llx %08x\n",
347
                    env->gdt.base, env->gdt.limit);
348
        cpu_fprintf(f, "IDT=     %016llx %08x\n",
349
                    env->idt.base, env->idt.limit);
350
        cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
351
                    (uint32_t)env->cr[0], 
352
                    env->cr[2], 
353
                    env->cr[3], 
354
                    (uint32_t)env->cr[4]);
355
    } else
356
#endif
357
    {
358
        for(i = 0; i < 6; i++) {
359
            SegmentCache *sc = &env->segs[i];
360
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
361
                        seg_name[i],
362
                        sc->selector,
363
                        (uint32_t)sc->base,
364
                        sc->limit,
365
                        sc->flags);
366
        }
367
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
368
                    env->ldt.selector,
369
                    (uint32_t)env->ldt.base,
370
                    env->ldt.limit,
371
                    env->ldt.flags);
372
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
373
                    env->tr.selector,
374
                    (uint32_t)env->tr.base,
375
                    env->tr.limit,
376
                    env->tr.flags);
377
        cpu_fprintf(f, "GDT=     %08x %08x\n",
378
                    (uint32_t)env->gdt.base, env->gdt.limit);
379
        cpu_fprintf(f, "IDT=     %08x %08x\n",
380
                    (uint32_t)env->idt.base, env->idt.limit);
381
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
382
                    (uint32_t)env->cr[0], 
383
                    (uint32_t)env->cr[2], 
384
                    (uint32_t)env->cr[3], 
385
                    (uint32_t)env->cr[4]);
386
    }
387
    if (flags & X86_DUMP_CCOP) {
388
        if ((unsigned)env->cc_op < CC_OP_NB)
389
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
390
        else
391
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
392
#ifdef TARGET_X86_64
393
        if (env->hflags & HF_CS64_MASK) {
394
            cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
395
                        env->cc_src, env->cc_dst, 
396
                        cc_op_name);
397
        } else 
398
#endif
399
        {
400
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
401
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
402
                        cc_op_name);
403
        }
404
    }
405
    if (flags & X86_DUMP_FPU) {
406
        int fptag;
407
        fptag = 0;
408
        for(i = 0; i < 8; i++) {
409
            fptag |= ((!env->fptags[i]) << i);
410
        }
411
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
412
                    env->fpuc,
413
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
414
                    env->fpstt,
415
                    fptag,
416
                    env->mxcsr);
417
        for(i=0;i<8;i++) {
418
#if defined(USE_X86LDOUBLE)
419
            union {
420
                long double d;
421
                struct {
422
                    uint64_t lower;
423
                    uint16_t upper;
424
                } l;
425
            } tmp;
426
            tmp.d = env->fpregs[i].d;
427
            cpu_fprintf(f, "FPR%d=%016llx %04x",
428
                        i, tmp.l.lower, tmp.l.upper);
429
#else
430
            cpu_fprintf(f, "FPR%d=%016llx",
431
                        i, env->fpregs[i].mmx.q);
432
#endif
433
            if ((i & 1) == 1)
434
                cpu_fprintf(f, "\n");
435
            else
436
                cpu_fprintf(f, " ");
437
        }
438
        if (env->hflags & HF_CS64_MASK) 
439
            nb = 16;
440
        else
441
            nb = 8;
442
        for(i=0;i<nb;i++) {
443
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
444
                        i, 
445
                        env->xmm_regs[i].XMM_L(3),
446
                        env->xmm_regs[i].XMM_L(2),
447
                        env->xmm_regs[i].XMM_L(1),
448
                        env->xmm_regs[i].XMM_L(0));
449
            if ((i & 1) == 1)
450
                cpu_fprintf(f, "\n");
451
            else
452
                cpu_fprintf(f, " ");
453
        }
454
    }
455
}
456

    
457
/***********************************************************/
458
/* x86 mmu */
459
/* XXX: add PGE support */
460

    
461
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
462
{
463
    a20_state = (a20_state != 0);
464
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
465
#if defined(DEBUG_MMU)
466
        printf("A20 update: a20=%d\n", a20_state);
467
#endif
468
        /* if the cpu is currently executing code, we must unlink it and
469
           all the potentially executing TB */
470
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
471

    
472
        /* when a20 is changed, all the MMU mappings are invalid, so
473
           we must flush everything */
474
        tlb_flush(env, 1);
475
        env->a20_mask = 0xffefffff | (a20_state << 20);
476
    }
477
}
478

    
479
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
480
{
481
    int pe_state;
482

    
483
#if defined(DEBUG_MMU)
484
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
485
#endif
486
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
487
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
488
        tlb_flush(env, 1);
489
    }
490

    
491
#ifdef TARGET_X86_64
492
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
493
        (env->efer & MSR_EFER_LME)) {
494
        /* enter in long mode */
495
        /* XXX: generate an exception */
496
        if (!(env->cr[4] & CR4_PAE_MASK))
497
            return;
498
        env->efer |= MSR_EFER_LMA;
499
        env->hflags |= HF_LMA_MASK;
500
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
501
               (env->efer & MSR_EFER_LMA)) {
502
        /* exit long mode */
503
        env->efer &= ~MSR_EFER_LMA;
504
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
505
        env->eip &= 0xffffffff;
506
    }
507
#endif
508
    env->cr[0] = new_cr0 | CR0_ET_MASK;
509
    
510
    /* update PE flag in hidden flags */
511
    pe_state = (env->cr[0] & CR0_PE_MASK);
512
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
513
    /* ensure that ADDSEG is always set in real mode */
514
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
515
    /* update FPU flags */
516
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
517
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
518
}
519

    
520
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
521
   the PDPT */
522
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
523
{
524
    env->cr[3] = new_cr3;
525
    if (env->cr[0] & CR0_PG_MASK) {
526
#if defined(DEBUG_MMU)
527
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
528
#endif
529
        tlb_flush(env, 0);
530
    }
531
}
532

    
533
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
534
{
535
#if defined(DEBUG_MMU)
536
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
537
#endif
538
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
539
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
540
        tlb_flush(env, 1);
541
    }
542
    /* SSE handling */
543
    if (!(env->cpuid_features & CPUID_SSE))
544
        new_cr4 &= ~CR4_OSFXSR_MASK;
545
    if (new_cr4 & CR4_OSFXSR_MASK)
546
        env->hflags |= HF_OSFXSR_MASK;
547
    else
548
        env->hflags &= ~HF_OSFXSR_MASK;
549

    
550
    env->cr[4] = new_cr4;
551
}
552

    
553
/* XXX: also flush 4MB pages */
554
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
555
{
556
    tlb_flush_page(env, addr);
557
}
558

    
559
#if defined(CONFIG_USER_ONLY) 
560

    
561
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
562
                             int is_write, int is_user, int is_softmmu)
563
{
564
    /* user mode only emulation */
565
    is_write &= 1;
566
    env->cr[2] = addr;
567
    env->error_code = (is_write << PG_ERROR_W_BIT);
568
    env->error_code |= PG_ERROR_U_MASK;
569
    return 1;
570
}
571

    
572
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
573
{
574
    return addr;
575
}
576

    
577
#else
578

    
579
/* return value:
580
   -1 = cannot handle fault 
581
   0  = nothing more to do 
582
   1  = generate PF fault
583
   2  = soft MMU activation required for this block
584
*/
585
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
586
                             int is_write, int is_user, int is_softmmu)
587
{
588
    uint32_t pdpe_addr, pde_addr, pte_addr;
589
    uint32_t pde, pte, ptep, pdpe;
590
    int error_code, is_dirty, prot, page_size, ret;
591
    unsigned long paddr, page_offset;
592
    target_ulong vaddr, virt_addr;
593
    
594
#if defined(DEBUG_MMU)
595
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
596
           addr, is_write, is_user, env->eip);
597
#endif
598
    is_write &= 1;
599
    
600
    if (!(env->cr[0] & CR0_PG_MASK)) {
601
        pte = addr;
602
        virt_addr = addr & TARGET_PAGE_MASK;
603
        prot = PAGE_READ | PAGE_WRITE;
604
        page_size = 4096;
605
        goto do_mapping;
606
    }
607

    
608
    if (env->cr[4] & CR4_PAE_MASK) {
609
        /* XXX: we only use 32 bit physical addresses */
610
#ifdef TARGET_X86_64
611
        if (env->hflags & HF_LMA_MASK) {
612
            uint32_t pml4e_addr, pml4e;
613
            int32_t sext;
614

    
615
            /* XXX: handle user + rw rights */
616
            /* XXX: handle NX flag */
617
            /* test virtual address sign extension */
618
            sext = (int64_t)addr >> 47;
619
            if (sext != 0 && sext != -1) {
620
                error_code = 0;
621
                goto do_fault;
622
            }
623
            
624
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
625
                env->a20_mask;
626
            pml4e = ldl_phys(pml4e_addr);
627
            if (!(pml4e & PG_PRESENT_MASK)) {
628
                error_code = 0;
629
                goto do_fault;
630
            }
631
            if (!(pml4e & PG_ACCESSED_MASK)) {
632
                pml4e |= PG_ACCESSED_MASK;
633
                stl_phys_notdirty(pml4e_addr, pml4e);
634
            }
635
            
636
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
637
                env->a20_mask;
638
            pdpe = ldl_phys(pdpe_addr);
639
            if (!(pdpe & PG_PRESENT_MASK)) {
640
                error_code = 0;
641
                goto do_fault;
642
            }
643
            if (!(pdpe & PG_ACCESSED_MASK)) {
644
                pdpe |= PG_ACCESSED_MASK;
645
                stl_phys_notdirty(pdpe_addr, pdpe);
646
            }
647
        } else 
648
#endif
649
        {
650
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
651
                env->a20_mask;
652
            pdpe = ldl_phys(pdpe_addr);
653
            if (!(pdpe & PG_PRESENT_MASK)) {
654
                error_code = 0;
655
                goto do_fault;
656
            }
657
        }
658

    
659
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
660
            env->a20_mask;
661
        pde = ldl_phys(pde_addr);
662
        if (!(pde & PG_PRESENT_MASK)) {
663
            error_code = 0;
664
            goto do_fault;
665
        }
666
        if (pde & PG_PSE_MASK) {
667
            /* 2 MB page */
668
            page_size = 2048 * 1024;
669
            goto handle_big_page;
670
        } else {
671
            /* 4 KB page */
672
            if (!(pde & PG_ACCESSED_MASK)) {
673
                pde |= PG_ACCESSED_MASK;
674
                stl_phys_notdirty(pde_addr, pde);
675
            }
676
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
677
                env->a20_mask;
678
            goto handle_4k_page;
679
        }
680
    } else {
681
        /* page directory entry */
682
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
683
            env->a20_mask;
684
        pde = ldl_phys(pde_addr);
685
        if (!(pde & PG_PRESENT_MASK)) {
686
            error_code = 0;
687
            goto do_fault;
688
        }
689
        /* if PSE bit is set, then we use a 4MB page */
690
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
691
            page_size = 4096 * 1024;
692
        handle_big_page:
693
            if (is_user) {
694
                if (!(pde & PG_USER_MASK))
695
                    goto do_fault_protect;
696
                if (is_write && !(pde & PG_RW_MASK))
697
                    goto do_fault_protect;
698
            } else {
699
                if ((env->cr[0] & CR0_WP_MASK) && 
700
                    is_write && !(pde & PG_RW_MASK)) 
701
                    goto do_fault_protect;
702
            }
703
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
704
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
705
                pde |= PG_ACCESSED_MASK;
706
                if (is_dirty)
707
                    pde |= PG_DIRTY_MASK;
708
                stl_phys_notdirty(pde_addr, pde);
709
            }
710
        
711
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
712
            ptep = pte;
713
            virt_addr = addr & ~(page_size - 1);
714
        } else {
715
            if (!(pde & PG_ACCESSED_MASK)) {
716
                pde |= PG_ACCESSED_MASK;
717
                stl_phys_notdirty(pde_addr, pde);
718
            }
719

    
720
            /* page directory entry */
721
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
722
                env->a20_mask;
723
        handle_4k_page:
724
            pte = ldl_phys(pte_addr);
725
            if (!(pte & PG_PRESENT_MASK)) {
726
                error_code = 0;
727
                goto do_fault;
728
            }
729
            /* combine pde and pte user and rw protections */
730
            ptep = pte & pde;
731
            if (is_user) {
732
                if (!(ptep & PG_USER_MASK))
733
                    goto do_fault_protect;
734
                if (is_write && !(ptep & PG_RW_MASK))
735
                    goto do_fault_protect;
736
            } else {
737
                if ((env->cr[0] & CR0_WP_MASK) &&
738
                    is_write && !(ptep & PG_RW_MASK)) 
739
                    goto do_fault_protect;
740
            }
741
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
742
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
743
                pte |= PG_ACCESSED_MASK;
744
                if (is_dirty)
745
                    pte |= PG_DIRTY_MASK;
746
                stl_phys_notdirty(pte_addr, pte);
747
            }
748
            page_size = 4096;
749
            virt_addr = addr & ~0xfff;
750
        }
751

    
752
        /* the page can be put in the TLB */
753
        prot = PAGE_READ;
754
        if (pte & PG_DIRTY_MASK) {
755
            /* only set write access if already dirty... otherwise wait
756
               for dirty access */
757
            if (is_user) {
758
                if (ptep & PG_RW_MASK)
759
                    prot |= PAGE_WRITE;
760
            } else {
761
                if (!(env->cr[0] & CR0_WP_MASK) ||
762
                    (ptep & PG_RW_MASK))
763
                    prot |= PAGE_WRITE;
764
            }
765
        }
766
    }
767
 do_mapping:
768
    pte = pte & env->a20_mask;
769

    
770
    /* Even if 4MB pages, we map only one 4KB page in the cache to
771
       avoid filling it too fast */
772
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
773
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
774
    vaddr = virt_addr + page_offset;
775
    
776
    ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
777
    return ret;
778
 do_fault_protect:
779
    error_code = PG_ERROR_P_MASK;
780
 do_fault:
781
    env->cr[2] = addr;
782
    env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
783
    if (is_user)
784
        env->error_code |= PG_ERROR_U_MASK;
785
    return 1;
786
}
787

    
788
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
789
{
790
    uint32_t pde_addr, pte_addr;
791
    uint32_t pde, pte, paddr, page_offset, page_size;
792

    
793
    if (env->cr[4] & CR4_PAE_MASK) {
794
        uint32_t pdpe_addr, pde_addr, pte_addr;
795
        uint32_t pdpe;
796

    
797
        /* XXX: we only use 32 bit physical addresses */
798
#ifdef TARGET_X86_64
799
        if (env->hflags & HF_LMA_MASK) {
800
            uint32_t pml4e_addr, pml4e;
801
            int32_t sext;
802

    
803
            /* test virtual address sign extension */
804
            sext = (int64_t)addr >> 47;
805
            if (sext != 0 && sext != -1)
806
                return -1;
807
            
808
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
809
                env->a20_mask;
810
            pml4e = ldl_phys(pml4e_addr);
811
            if (!(pml4e & PG_PRESENT_MASK))
812
                return -1;
813
            
814
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
815
                env->a20_mask;
816
            pdpe = ldl_phys(pdpe_addr);
817
            if (!(pdpe & PG_PRESENT_MASK))
818
                return -1;
819
        } else 
820
#endif
821
        {
822
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
823
                env->a20_mask;
824
            pdpe = ldl_phys(pdpe_addr);
825
            if (!(pdpe & PG_PRESENT_MASK))
826
                return -1;
827
        }
828

    
829
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
830
            env->a20_mask;
831
        pde = ldl_phys(pde_addr);
832
        if (!(pde & PG_PRESENT_MASK)) {
833
            return -1;
834
        }
835
        if (pde & PG_PSE_MASK) {
836
            /* 2 MB page */
837
            page_size = 2048 * 1024;
838
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
839
        } else {
840
            /* 4 KB page */
841
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
842
                env->a20_mask;
843
            page_size = 4096;
844
            pte = ldl_phys(pte_addr);
845
        }
846
    } else {
847
        if (!(env->cr[0] & CR0_PG_MASK)) {
848
            pte = addr;
849
            page_size = 4096;
850
        } else {
851
            /* page directory entry */
852
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
853
            pde = ldl_phys(pde_addr);
854
            if (!(pde & PG_PRESENT_MASK)) 
855
                return -1;
856
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
857
                pte = pde & ~0x003ff000; /* align to 4MB */
858
                page_size = 4096 * 1024;
859
            } else {
860
                /* page directory entry */
861
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
862
                pte = ldl_phys(pte_addr);
863
                if (!(pte & PG_PRESENT_MASK))
864
                    return -1;
865
                page_size = 4096;
866
            }
867
        }
868
        pte = pte & env->a20_mask;
869
    }
870

    
871
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
872
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
873
    return paddr;
874
}
875
#endif /* !CONFIG_USER_ONLY */
876

    
877
#if defined(USE_CODE_COPY)
878
struct fpstate {
879
    uint16_t fpuc;
880
    uint16_t dummy1;
881
    uint16_t fpus;
882
    uint16_t dummy2;
883
    uint16_t fptag;
884
    uint16_t dummy3;
885

    
886
    uint32_t fpip;
887
    uint32_t fpcs;
888
    uint32_t fpoo;
889
    uint32_t fpos;
890
    uint8_t fpregs1[8 * 10];
891
};
892

    
893
void restore_native_fp_state(CPUState *env)
894
{
895
    int fptag, i, j;
896
    struct fpstate fp1, *fp = &fp1;
897
    
898
    fp->fpuc = env->fpuc;
899
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
900
    fptag = 0;
901
    for (i=7; i>=0; i--) {
902
        fptag <<= 2;
903
        if (env->fptags[i]) {
904
            fptag |= 3;
905
        } else {
906
            /* the FPU automatically computes it */
907
        }
908
    }
909
    fp->fptag = fptag;
910
    j = env->fpstt;
911
    for(i = 0;i < 8; i++) {
912
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
913
        j = (j + 1) & 7;
914
    }
915
    asm volatile ("frstor %0" : "=m" (*fp));
916
    env->native_fp_regs = 1;
917
}
918
 
919
void save_native_fp_state(CPUState *env)
920
{
921
    int fptag, i, j;
922
    uint16_t fpuc;
923
    struct fpstate fp1, *fp = &fp1;
924

    
925
    asm volatile ("fsave %0" : : "m" (*fp));
926
    env->fpuc = fp->fpuc;
927
    env->fpstt = (fp->fpus >> 11) & 7;
928
    env->fpus = fp->fpus & ~0x3800;
929
    fptag = fp->fptag;
930
    for(i = 0;i < 8; i++) {
931
        env->fptags[i] = ((fptag & 3) == 3);
932
        fptag >>= 2;
933
    }
934
    j = env->fpstt;
935
    for(i = 0;i < 8; i++) {
936
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
937
        j = (j + 1) & 7;
938
    }
939
    /* we must restore the default rounding state */
940
    /* XXX: we do not restore the exception state */
941
    fpuc = 0x037f | (env->fpuc & (3 << 10));
942
    asm volatile("fldcw %0" : : "m" (fpuc));
943
    env->native_fp_regs = 0;
944
}
945
#endif