Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 6f15b608

History | View | Annotate | Download (32.5 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
int modify_ldt(int func, void *ptr, unsigned long bytecount)
39
{
40
        return syscall(__NR_modify_ldt, func, ptr, bytecount);
41
}
42

    
43
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44
#define modify_ldt_ldt_s user_desc
45
#endif
46
#endif /* USE_CODE_COPY */
47

    
48
CPUX86State *cpu_x86_init(void)
49
{
50
    CPUX86State *env;
51
    static int inited;
52

    
53
    env = qemu_mallocz(sizeof(CPUX86State));
54
    if (!env)
55
        return NULL;
56
    cpu_exec_init(env);
57

    
58
    /* init various static tables */
59
    if (!inited) {
60
        inited = 1;
61
        optimize_flags_init();
62
    }
63
#ifdef USE_CODE_COPY
64
    /* testing code for code copy case */
65
    {
66
        struct modify_ldt_ldt_s ldt;
67

    
68
        ldt.entry_number = 1;
69
        ldt.base_addr = (unsigned long)env;
70
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
71
        ldt.seg_32bit = 1;
72
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73
        ldt.read_exec_only = 0;
74
        ldt.limit_in_pages = 1;
75
        ldt.seg_not_present = 0;
76
        ldt.useable = 1;
77
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
78
        
79
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
80
    }
81
#endif
82
    {
83
        int family, model, stepping;
84
#ifdef TARGET_X86_64
85
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
88
        family = 6;
89
        model = 2;
90
        stepping = 3;
91
#else
92
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
95
#if 0
96
        /* pentium 75-200 */
97
        family = 5;
98
        model = 2;
99
        stepping = 11;
100
#else
101
        /* pentium pro */
102
        family = 6;
103
        model = 3;
104
        stepping = 3;
105
#endif
106
#endif
107
        env->cpuid_level = 2;
108
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
109
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
111
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112
                               CPUID_PAT);
113
        env->pat = 0x0007040600070406ULL;
114
        env->cpuid_ext_features = CPUID_EXT_SSE3;
115
        env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
116
        env->cpuid_features |= CPUID_APIC;
117
        env->cpuid_xlevel = 0;
118
        {
119
            const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120
            int c, len, i;
121
            len = strlen(model_id);
122
            for(i = 0; i < 48; i++) {
123
                if (i >= len)
124
                    c = '\0';
125
                else
126
                    c = model_id[i];
127
                env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
128
            }
129
        }
130
#ifdef TARGET_X86_64
131
        /* currently not enabled for std i386 because not fully tested */
132
        env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
133
        env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
134
        env->cpuid_xlevel = 0x80000008;
135

    
136
        /* these features are needed for Win64 and aren't fully implemented */
137
        env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
138
#endif
139
    }
140
    cpu_reset(env);
141
#ifdef USE_KQEMU
142
    kqemu_init(env);
143
#endif
144
    return env;
145
}
146

    
147
/* NOTE: must be called outside the CPU execute loop */
148
void cpu_reset(CPUX86State *env)
149
{
150
    int i;
151

    
152
    memset(env, 0, offsetof(CPUX86State, breakpoints));
153

    
154
    tlb_flush(env, 1);
155

    
156
    /* init to reset state */
157

    
158
#ifdef CONFIG_SOFTMMU
159
    env->hflags |= HF_SOFTMMU_MASK;
160
#endif
161

    
162
    cpu_x86_update_cr0(env, 0x60000010);
163
    env->a20_mask = 0xffffffff;
164
    env->smbase = 0x30000;
165

    
166
    env->idt.limit = 0xffff;
167
    env->gdt.limit = 0xffff;
168
    env->ldt.limit = 0xffff;
169
    env->ldt.flags = DESC_P_MASK;
170
    env->tr.limit = 0xffff;
171
    env->tr.flags = DESC_P_MASK;
172
    
173
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); 
174
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
175
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
176
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
177
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
178
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
179
    
180
    env->eip = 0xfff0;
181
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
182
    
183
    env->eflags = 0x2;
184
    
185
    /* FPU init */
186
    for(i = 0;i < 8; i++)
187
        env->fptags[i] = 1;
188
    env->fpuc = 0x37f;
189

    
190
    env->mxcsr = 0x1f80;
191
}
192

    
193
void cpu_x86_close(CPUX86State *env)
194
{
195
    free(env);
196
}
197

    
198
/***********************************************************/
199
/* x86 debug */
200

    
201
static const char *cc_op_str[] = {
202
    "DYNAMIC",
203
    "EFLAGS",
204

    
205
    "MULB",
206
    "MULW",
207
    "MULL",
208
    "MULQ",
209

    
210
    "ADDB",
211
    "ADDW",
212
    "ADDL",
213
    "ADDQ",
214

    
215
    "ADCB",
216
    "ADCW",
217
    "ADCL",
218
    "ADCQ",
219

    
220
    "SUBB",
221
    "SUBW",
222
    "SUBL",
223
    "SUBQ",
224

    
225
    "SBBB",
226
    "SBBW",
227
    "SBBL",
228
    "SBBQ",
229

    
230
    "LOGICB",
231
    "LOGICW",
232
    "LOGICL",
233
    "LOGICQ",
234

    
235
    "INCB",
236
    "INCW",
237
    "INCL",
238
    "INCQ",
239

    
240
    "DECB",
241
    "DECW",
242
    "DECL",
243
    "DECQ",
244

    
245
    "SHLB",
246
    "SHLW",
247
    "SHLL",
248
    "SHLQ",
249

    
250
    "SARB",
251
    "SARW",
252
    "SARL",
253
    "SARQ",
254
};
255

    
256
void cpu_dump_state(CPUState *env, FILE *f, 
257
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
258
                    int flags)
259
{
260
    int eflags, i, nb;
261
    char cc_op_name[32];
262
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
263

    
264
    eflags = env->eflags;
265
#ifdef TARGET_X86_64
266
    if (env->hflags & HF_CS64_MASK) {
267
        cpu_fprintf(f, 
268
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
269
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
270
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
271
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
272
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
273
                    env->regs[R_EAX], 
274
                    env->regs[R_EBX], 
275
                    env->regs[R_ECX], 
276
                    env->regs[R_EDX], 
277
                    env->regs[R_ESI], 
278
                    env->regs[R_EDI], 
279
                    env->regs[R_EBP], 
280
                    env->regs[R_ESP], 
281
                    env->regs[8], 
282
                    env->regs[9], 
283
                    env->regs[10], 
284
                    env->regs[11], 
285
                    env->regs[12], 
286
                    env->regs[13], 
287
                    env->regs[14], 
288
                    env->regs[15], 
289
                    env->eip, eflags,
290
                    eflags & DF_MASK ? 'D' : '-',
291
                    eflags & CC_O ? 'O' : '-',
292
                    eflags & CC_S ? 'S' : '-',
293
                    eflags & CC_Z ? 'Z' : '-',
294
                    eflags & CC_A ? 'A' : '-',
295
                    eflags & CC_P ? 'P' : '-',
296
                    eflags & CC_C ? 'C' : '-',
297
                    env->hflags & HF_CPL_MASK, 
298
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
299
                    (env->a20_mask >> 20) & 1,
300
                    (env->hflags >> HF_SMM_SHIFT) & 1,
301
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
302
    } else 
303
#endif
304
    {
305
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
306
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
307
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
308
                    (uint32_t)env->regs[R_EAX], 
309
                    (uint32_t)env->regs[R_EBX], 
310
                    (uint32_t)env->regs[R_ECX], 
311
                    (uint32_t)env->regs[R_EDX], 
312
                    (uint32_t)env->regs[R_ESI], 
313
                    (uint32_t)env->regs[R_EDI], 
314
                    (uint32_t)env->regs[R_EBP], 
315
                    (uint32_t)env->regs[R_ESP], 
316
                    (uint32_t)env->eip, eflags,
317
                    eflags & DF_MASK ? 'D' : '-',
318
                    eflags & CC_O ? 'O' : '-',
319
                    eflags & CC_S ? 'S' : '-',
320
                    eflags & CC_Z ? 'Z' : '-',
321
                    eflags & CC_A ? 'A' : '-',
322
                    eflags & CC_P ? 'P' : '-',
323
                    eflags & CC_C ? 'C' : '-',
324
                    env->hflags & HF_CPL_MASK, 
325
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
326
                    (env->a20_mask >> 20) & 1,
327
                    (env->hflags >> HF_SMM_SHIFT) & 1,
328
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
329
    }
330

    
331
#ifdef TARGET_X86_64
332
    if (env->hflags & HF_LMA_MASK) {
333
        for(i = 0; i < 6; i++) {
334
            SegmentCache *sc = &env->segs[i];
335
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
336
                        seg_name[i],
337
                        sc->selector,
338
                        sc->base,
339
                        sc->limit,
340
                        sc->flags);
341
        }
342
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
343
                    env->ldt.selector,
344
                    env->ldt.base,
345
                    env->ldt.limit,
346
                    env->ldt.flags);
347
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
348
                    env->tr.selector,
349
                    env->tr.base,
350
                    env->tr.limit,
351
                    env->tr.flags);
352
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
353
                    env->gdt.base, env->gdt.limit);
354
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
355
                    env->idt.base, env->idt.limit);
356
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
357
                    (uint32_t)env->cr[0], 
358
                    env->cr[2], 
359
                    env->cr[3], 
360
                    (uint32_t)env->cr[4]);
361
    } else
362
#endif
363
    {
364
        for(i = 0; i < 6; i++) {
365
            SegmentCache *sc = &env->segs[i];
366
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
367
                        seg_name[i],
368
                        sc->selector,
369
                        (uint32_t)sc->base,
370
                        sc->limit,
371
                        sc->flags);
372
        }
373
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
374
                    env->ldt.selector,
375
                    (uint32_t)env->ldt.base,
376
                    env->ldt.limit,
377
                    env->ldt.flags);
378
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
379
                    env->tr.selector,
380
                    (uint32_t)env->tr.base,
381
                    env->tr.limit,
382
                    env->tr.flags);
383
        cpu_fprintf(f, "GDT=     %08x %08x\n",
384
                    (uint32_t)env->gdt.base, env->gdt.limit);
385
        cpu_fprintf(f, "IDT=     %08x %08x\n",
386
                    (uint32_t)env->idt.base, env->idt.limit);
387
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
388
                    (uint32_t)env->cr[0], 
389
                    (uint32_t)env->cr[2], 
390
                    (uint32_t)env->cr[3], 
391
                    (uint32_t)env->cr[4]);
392
    }
393
    if (flags & X86_DUMP_CCOP) {
394
        if ((unsigned)env->cc_op < CC_OP_NB)
395
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
396
        else
397
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
398
#ifdef TARGET_X86_64
399
        if (env->hflags & HF_CS64_MASK) {
400
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
401
                        env->cc_src, env->cc_dst, 
402
                        cc_op_name);
403
        } else 
404
#endif
405
        {
406
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
407
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 
408
                        cc_op_name);
409
        }
410
    }
411
    if (flags & X86_DUMP_FPU) {
412
        int fptag;
413
        fptag = 0;
414
        for(i = 0; i < 8; i++) {
415
            fptag |= ((!env->fptags[i]) << i);
416
        }
417
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
418
                    env->fpuc,
419
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
420
                    env->fpstt,
421
                    fptag,
422
                    env->mxcsr);
423
        for(i=0;i<8;i++) {
424
#if defined(USE_X86LDOUBLE)
425
            union {
426
                long double d;
427
                struct {
428
                    uint64_t lower;
429
                    uint16_t upper;
430
                } l;
431
            } tmp;
432
            tmp.d = env->fpregs[i].d;
433
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
434
                        i, tmp.l.lower, tmp.l.upper);
435
#else
436
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
437
                        i, env->fpregs[i].mmx.q);
438
#endif
439
            if ((i & 1) == 1)
440
                cpu_fprintf(f, "\n");
441
            else
442
                cpu_fprintf(f, " ");
443
        }
444
        if (env->hflags & HF_CS64_MASK) 
445
            nb = 16;
446
        else
447
            nb = 8;
448
        for(i=0;i<nb;i++) {
449
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
450
                        i, 
451
                        env->xmm_regs[i].XMM_L(3),
452
                        env->xmm_regs[i].XMM_L(2),
453
                        env->xmm_regs[i].XMM_L(1),
454
                        env->xmm_regs[i].XMM_L(0));
455
            if ((i & 1) == 1)
456
                cpu_fprintf(f, "\n");
457
            else
458
                cpu_fprintf(f, " ");
459
        }
460
    }
461
}
462

    
463
/***********************************************************/
464
/* x86 mmu */
465
/* XXX: add PGE support */
466

    
467
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
468
{
469
    a20_state = (a20_state != 0);
470
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
471
#if defined(DEBUG_MMU)
472
        printf("A20 update: a20=%d\n", a20_state);
473
#endif
474
        /* if the cpu is currently executing code, we must unlink it and
475
           all the potentially executing TB */
476
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
477

    
478
        /* when a20 is changed, all the MMU mappings are invalid, so
479
           we must flush everything */
480
        tlb_flush(env, 1);
481
        env->a20_mask = 0xffefffff | (a20_state << 20);
482
    }
483
}
484

    
485
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
486
{
487
    int pe_state;
488

    
489
#if defined(DEBUG_MMU)
490
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
491
#endif
492
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
493
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
494
        tlb_flush(env, 1);
495
    }
496

    
497
#ifdef TARGET_X86_64
498
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
499
        (env->efer & MSR_EFER_LME)) {
500
        /* enter in long mode */
501
        /* XXX: generate an exception */
502
        if (!(env->cr[4] & CR4_PAE_MASK))
503
            return;
504
        env->efer |= MSR_EFER_LMA;
505
        env->hflags |= HF_LMA_MASK;
506
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
507
               (env->efer & MSR_EFER_LMA)) {
508
        /* exit long mode */
509
        env->efer &= ~MSR_EFER_LMA;
510
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
511
        env->eip &= 0xffffffff;
512
    }
513
#endif
514
    env->cr[0] = new_cr0 | CR0_ET_MASK;
515
    
516
    /* update PE flag in hidden flags */
517
    pe_state = (env->cr[0] & CR0_PE_MASK);
518
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
519
    /* ensure that ADDSEG is always set in real mode */
520
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
521
    /* update FPU flags */
522
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
523
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
524
}
525

    
526
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
527
   the PDPT */
528
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
529
{
530
    env->cr[3] = new_cr3;
531
    if (env->cr[0] & CR0_PG_MASK) {
532
#if defined(DEBUG_MMU)
533
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
534
#endif
535
        tlb_flush(env, 0);
536
    }
537
}
538

    
539
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
540
{
541
#if defined(DEBUG_MMU)
542
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
543
#endif
544
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
545
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
546
        tlb_flush(env, 1);
547
    }
548
    /* SSE handling */
549
    if (!(env->cpuid_features & CPUID_SSE))
550
        new_cr4 &= ~CR4_OSFXSR_MASK;
551
    if (new_cr4 & CR4_OSFXSR_MASK)
552
        env->hflags |= HF_OSFXSR_MASK;
553
    else
554
        env->hflags &= ~HF_OSFXSR_MASK;
555

    
556
    env->cr[4] = new_cr4;
557
}
558

    
559
/* XXX: also flush 4MB pages */
560
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
561
{
562
    tlb_flush_page(env, addr);
563
}
564

    
565
#if defined(CONFIG_USER_ONLY) 
566

    
567
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
568
                             int is_write, int is_user, int is_softmmu)
569
{
570
    /* user mode only emulation */
571
    is_write &= 1;
572
    env->cr[2] = addr;
573
    env->error_code = (is_write << PG_ERROR_W_BIT);
574
    env->error_code |= PG_ERROR_U_MASK;
575
    env->exception_index = EXCP0E_PAGE;
576
    return 1;
577
}
578

    
579
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
580
{
581
    return addr;
582
}
583

    
584
#else
585

    
586
#define PHYS_ADDR_MASK 0xfffff000
587

    
588
/* return value:
589
   -1 = cannot handle fault 
590
   0  = nothing more to do 
591
   1  = generate PF fault
592
   2  = soft MMU activation required for this block
593
*/
594
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
595
                             int is_write1, int is_user, int is_softmmu)
596
{
597
    uint64_t ptep, pte;
598
    uint32_t pdpe_addr, pde_addr, pte_addr;
599
    int error_code, is_dirty, prot, page_size, ret, is_write;
600
    unsigned long paddr, page_offset;
601
    target_ulong vaddr, virt_addr;
602
    
603
#if defined(DEBUG_MMU)
604
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 
605
           addr, is_write1, is_user, env->eip);
606
#endif
607
    is_write = is_write1 & 1;
608
    
609
    if (!(env->cr[0] & CR0_PG_MASK)) {
610
        pte = addr;
611
        virt_addr = addr & TARGET_PAGE_MASK;
612
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
613
        page_size = 4096;
614
        goto do_mapping;
615
    }
616

    
617
    if (env->cr[4] & CR4_PAE_MASK) {
618
        uint64_t pde, pdpe;
619

    
620
        /* XXX: we only use 32 bit physical addresses */
621
#ifdef TARGET_X86_64
622
        if (env->hflags & HF_LMA_MASK) {
623
            uint32_t pml4e_addr;
624
            uint64_t pml4e;
625
            int32_t sext;
626

    
627
            /* test virtual address sign extension */
628
            sext = (int64_t)addr >> 47;
629
            if (sext != 0 && sext != -1) {
630
                env->error_code = 0;
631
                env->exception_index = EXCP0D_GPF;
632
                return 1;
633
            }
634
            
635
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
636
                env->a20_mask;
637
            pml4e = ldq_phys(pml4e_addr);
638
            if (!(pml4e & PG_PRESENT_MASK)) {
639
                error_code = 0;
640
                goto do_fault;
641
            }
642
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
643
                error_code = PG_ERROR_RSVD_MASK;
644
                goto do_fault;
645
            }
646
            if (!(pml4e & PG_ACCESSED_MASK)) {
647
                pml4e |= PG_ACCESSED_MASK;
648
                stl_phys_notdirty(pml4e_addr, pml4e);
649
            }
650
            ptep = pml4e ^ PG_NX_MASK;
651
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & 
652
                env->a20_mask;
653
            pdpe = ldq_phys(pdpe_addr);
654
            if (!(pdpe & PG_PRESENT_MASK)) {
655
                error_code = 0;
656
                goto do_fault;
657
            }
658
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
659
                error_code = PG_ERROR_RSVD_MASK;
660
                goto do_fault;
661
            }
662
            ptep &= pdpe ^ PG_NX_MASK;
663
            if (!(pdpe & PG_ACCESSED_MASK)) {
664
                pdpe |= PG_ACCESSED_MASK;
665
                stl_phys_notdirty(pdpe_addr, pdpe);
666
            }
667
        } else
668
#endif
669
        {
670
            /* XXX: load them when cr3 is loaded ? */
671
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
672
                env->a20_mask;
673
            pdpe = ldq_phys(pdpe_addr);
674
            if (!(pdpe & PG_PRESENT_MASK)) {
675
                error_code = 0;
676
                goto do_fault;
677
            }
678
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
679
        }
680

    
681
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
682
            env->a20_mask;
683
        pde = ldq_phys(pde_addr);
684
        if (!(pde & PG_PRESENT_MASK)) {
685
            error_code = 0;
686
            goto do_fault;
687
        }
688
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
689
            error_code = PG_ERROR_RSVD_MASK;
690
            goto do_fault;
691
        }
692
        ptep &= pde ^ PG_NX_MASK;
693
        if (pde & PG_PSE_MASK) {
694
            /* 2 MB page */
695
            page_size = 2048 * 1024;
696
            ptep ^= PG_NX_MASK;
697
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
698
                goto do_fault_protect;
699
            if (is_user) {
700
                if (!(ptep & PG_USER_MASK))
701
                    goto do_fault_protect;
702
                if (is_write && !(ptep & PG_RW_MASK))
703
                    goto do_fault_protect;
704
            } else {
705
                if ((env->cr[0] & CR0_WP_MASK) && 
706
                    is_write && !(ptep & PG_RW_MASK)) 
707
                    goto do_fault_protect;
708
            }
709
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
710
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
711
                pde |= PG_ACCESSED_MASK;
712
                if (is_dirty)
713
                    pde |= PG_DIRTY_MASK;
714
                stl_phys_notdirty(pde_addr, pde);
715
            }
716
            /* align to page_size */
717
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); 
718
            virt_addr = addr & ~(page_size - 1);
719
        } else {
720
            /* 4 KB page */
721
            if (!(pde & PG_ACCESSED_MASK)) {
722
                pde |= PG_ACCESSED_MASK;
723
                stl_phys_notdirty(pde_addr, pde);
724
            }
725
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
726
                env->a20_mask;
727
            pte = ldq_phys(pte_addr);
728
            if (!(pte & PG_PRESENT_MASK)) {
729
                error_code = 0;
730
                goto do_fault;
731
            }
732
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
733
                error_code = PG_ERROR_RSVD_MASK;
734
                goto do_fault;
735
            }
736
            /* combine pde and pte nx, user and rw protections */
737
            ptep &= pte ^ PG_NX_MASK;
738
            ptep ^= PG_NX_MASK;
739
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
740
                goto do_fault_protect; 
741
            if (is_user) {
742
                if (!(ptep & PG_USER_MASK))
743
                    goto do_fault_protect;
744
                if (is_write && !(ptep & PG_RW_MASK))
745
                    goto do_fault_protect;
746
            } else {
747
                if ((env->cr[0] & CR0_WP_MASK) &&
748
                    is_write && !(ptep & PG_RW_MASK)) 
749
                    goto do_fault_protect;
750
            }
751
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
752
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
753
                pte |= PG_ACCESSED_MASK;
754
                if (is_dirty)
755
                    pte |= PG_DIRTY_MASK;
756
                stl_phys_notdirty(pte_addr, pte);
757
            }
758
            page_size = 4096;
759
            virt_addr = addr & ~0xfff;
760
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
761
        }
762
    } else {
763
        uint32_t pde;
764

    
765
        /* page directory entry */
766
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 
767
            env->a20_mask;
768
        pde = ldl_phys(pde_addr);
769
        if (!(pde & PG_PRESENT_MASK)) {
770
            error_code = 0;
771
            goto do_fault;
772
        }
773
        /* if PSE bit is set, then we use a 4MB page */
774
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
775
            page_size = 4096 * 1024;
776
            if (is_user) {
777
                if (!(pde & PG_USER_MASK))
778
                    goto do_fault_protect;
779
                if (is_write && !(pde & PG_RW_MASK))
780
                    goto do_fault_protect;
781
            } else {
782
                if ((env->cr[0] & CR0_WP_MASK) && 
783
                    is_write && !(pde & PG_RW_MASK)) 
784
                    goto do_fault_protect;
785
            }
786
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
787
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
788
                pde |= PG_ACCESSED_MASK;
789
                if (is_dirty)
790
                    pde |= PG_DIRTY_MASK;
791
                stl_phys_notdirty(pde_addr, pde);
792
            }
793
        
794
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
795
            ptep = pte;
796
            virt_addr = addr & ~(page_size - 1);
797
        } else {
798
            if (!(pde & PG_ACCESSED_MASK)) {
799
                pde |= PG_ACCESSED_MASK;
800
                stl_phys_notdirty(pde_addr, pde);
801
            }
802

    
803
            /* page directory entry */
804
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 
805
                env->a20_mask;
806
            pte = ldl_phys(pte_addr);
807
            if (!(pte & PG_PRESENT_MASK)) {
808
                error_code = 0;
809
                goto do_fault;
810
            }
811
            /* combine pde and pte user and rw protections */
812
            ptep = pte & pde;
813
            if (is_user) {
814
                if (!(ptep & PG_USER_MASK))
815
                    goto do_fault_protect;
816
                if (is_write && !(ptep & PG_RW_MASK))
817
                    goto do_fault_protect;
818
            } else {
819
                if ((env->cr[0] & CR0_WP_MASK) &&
820
                    is_write && !(ptep & PG_RW_MASK)) 
821
                    goto do_fault_protect;
822
            }
823
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
824
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
825
                pte |= PG_ACCESSED_MASK;
826
                if (is_dirty)
827
                    pte |= PG_DIRTY_MASK;
828
                stl_phys_notdirty(pte_addr, pte);
829
            }
830
            page_size = 4096;
831
            virt_addr = addr & ~0xfff;
832
        }
833
    }
834
    /* the page can be put in the TLB */
835
    prot = PAGE_READ;
836
    if (!(ptep & PG_NX_MASK))
837
        prot |= PAGE_EXEC;
838
    if (pte & PG_DIRTY_MASK) {
839
        /* only set write access if already dirty... otherwise wait
840
           for dirty access */
841
        if (is_user) {
842
            if (ptep & PG_RW_MASK)
843
                prot |= PAGE_WRITE;
844
        } else {
845
            if (!(env->cr[0] & CR0_WP_MASK) ||
846
                (ptep & PG_RW_MASK))
847
                prot |= PAGE_WRITE;
848
        }
849
    }
850
 do_mapping:
851
    pte = pte & env->a20_mask;
852

    
853
    /* Even if 4MB pages, we map only one 4KB page in the cache to
854
       avoid filling it too fast */
855
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
856
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
857
    vaddr = virt_addr + page_offset;
858
    
859
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
860
    return ret;
861
 do_fault_protect:
862
    error_code = PG_ERROR_P_MASK;
863
 do_fault:
864
    env->cr[2] = addr;
865
    error_code |= (is_write << PG_ERROR_W_BIT);
866
    if (is_user)
867
        error_code |= PG_ERROR_U_MASK;
868
    if (is_write1 == 2 && 
869
        (env->efer & MSR_EFER_NXE) && 
870
        (env->cr[4] & CR4_PAE_MASK))
871
        error_code |= PG_ERROR_I_D_MASK;
872
    env->error_code = error_code;
873
    env->exception_index = EXCP0E_PAGE;
874
    return 1;
875
}
876

    
877
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
878
{
879
    uint32_t pde_addr, pte_addr;
880
    uint32_t pde, pte, paddr, page_offset, page_size;
881

    
882
    if (env->cr[4] & CR4_PAE_MASK) {
883
        uint32_t pdpe_addr, pde_addr, pte_addr;
884
        uint32_t pdpe;
885

    
886
        /* XXX: we only use 32 bit physical addresses */
887
#ifdef TARGET_X86_64
888
        if (env->hflags & HF_LMA_MASK) {
889
            uint32_t pml4e_addr, pml4e;
890
            int32_t sext;
891

    
892
            /* test virtual address sign extension */
893
            sext = (int64_t)addr >> 47;
894
            if (sext != 0 && sext != -1)
895
                return -1;
896
            
897
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 
898
                env->a20_mask;
899
            pml4e = ldl_phys(pml4e_addr);
900
            if (!(pml4e & PG_PRESENT_MASK))
901
                return -1;
902
            
903
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 
904
                env->a20_mask;
905
            pdpe = ldl_phys(pdpe_addr);
906
            if (!(pdpe & PG_PRESENT_MASK))
907
                return -1;
908
        } else 
909
#endif
910
        {
911
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 
912
                env->a20_mask;
913
            pdpe = ldl_phys(pdpe_addr);
914
            if (!(pdpe & PG_PRESENT_MASK))
915
                return -1;
916
        }
917

    
918
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
919
            env->a20_mask;
920
        pde = ldl_phys(pde_addr);
921
        if (!(pde & PG_PRESENT_MASK)) {
922
            return -1;
923
        }
924
        if (pde & PG_PSE_MASK) {
925
            /* 2 MB page */
926
            page_size = 2048 * 1024;
927
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
928
        } else {
929
            /* 4 KB page */
930
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
931
                env->a20_mask;
932
            page_size = 4096;
933
            pte = ldl_phys(pte_addr);
934
        }
935
    } else {
936
        if (!(env->cr[0] & CR0_PG_MASK)) {
937
            pte = addr;
938
            page_size = 4096;
939
        } else {
940
            /* page directory entry */
941
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
942
            pde = ldl_phys(pde_addr);
943
            if (!(pde & PG_PRESENT_MASK)) 
944
                return -1;
945
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
946
                pte = pde & ~0x003ff000; /* align to 4MB */
947
                page_size = 4096 * 1024;
948
            } else {
949
                /* page directory entry */
950
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
951
                pte = ldl_phys(pte_addr);
952
                if (!(pte & PG_PRESENT_MASK))
953
                    return -1;
954
                page_size = 4096;
955
            }
956
        }
957
        pte = pte & env->a20_mask;
958
    }
959

    
960
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
961
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
962
    return paddr;
963
}
964
#endif /* !CONFIG_USER_ONLY */
965

    
966
#if defined(USE_CODE_COPY)
967
struct fpstate {
968
    uint16_t fpuc;
969
    uint16_t dummy1;
970
    uint16_t fpus;
971
    uint16_t dummy2;
972
    uint16_t fptag;
973
    uint16_t dummy3;
974

    
975
    uint32_t fpip;
976
    uint32_t fpcs;
977
    uint32_t fpoo;
978
    uint32_t fpos;
979
    uint8_t fpregs1[8 * 10];
980
};
981

    
982
void restore_native_fp_state(CPUState *env)
983
{
984
    int fptag, i, j;
985
    struct fpstate fp1, *fp = &fp1;
986
    
987
    fp->fpuc = env->fpuc;
988
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
989
    fptag = 0;
990
    for (i=7; i>=0; i--) {
991
        fptag <<= 2;
992
        if (env->fptags[i]) {
993
            fptag |= 3;
994
        } else {
995
            /* the FPU automatically computes it */
996
        }
997
    }
998
    fp->fptag = fptag;
999
    j = env->fpstt;
1000
    for(i = 0;i < 8; i++) {
1001
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1002
        j = (j + 1) & 7;
1003
    }
1004
    asm volatile ("frstor %0" : "=m" (*fp));
1005
    env->native_fp_regs = 1;
1006
}
1007
 
1008
void save_native_fp_state(CPUState *env)
1009
{
1010
    int fptag, i, j;
1011
    uint16_t fpuc;
1012
    struct fpstate fp1, *fp = &fp1;
1013

    
1014
    asm volatile ("fsave %0" : : "m" (*fp));
1015
    env->fpuc = fp->fpuc;
1016
    env->fpstt = (fp->fpus >> 11) & 7;
1017
    env->fpus = fp->fpus & ~0x3800;
1018
    fptag = fp->fptag;
1019
    for(i = 0;i < 8; i++) {
1020
        env->fptags[i] = ((fptag & 3) == 3);
1021
        fptag >>= 2;
1022
    }
1023
    j = env->fpstt;
1024
    for(i = 0;i < 8; i++) {
1025
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1026
        j = (j + 1) & 7;
1027
    }
1028
    /* we must restore the default rounding state */
1029
    /* XXX: we do not restore the exception state */
1030
    fpuc = 0x037f | (env->fpuc & (3 << 10));
1031
    asm volatile("fldcw %0" : : "m" (fpuc));
1032
    env->native_fp_regs = 0;
1033
}
1034
#endif