Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 80210bcd

History | View | Annotate | Download (32.9 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "svm.h"
31

    
32
//#define DEBUG_MMU
33

    
34
#ifdef USE_CODE_COPY
35
#include <unistd.h>
36
#include <asm/ldt.h>
37
#include <linux/unistd.h>
38
#include <linux/version.h>
39

    
40
int modify_ldt(int func, void *ptr, unsigned long bytecount)
41
{
42
        return syscall(__NR_modify_ldt, func, ptr, bytecount);
43
}
44

    
45
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
46
#define modify_ldt_ldt_s user_desc
47
#endif
48
#endif /* USE_CODE_COPY */
49

    
50
CPUX86State *cpu_x86_init(void)
51
{
52
    CPUX86State *env;
53
    static int inited;
54

    
55
    env = qemu_mallocz(sizeof(CPUX86State));
56
    if (!env)
57
        return NULL;
58
    cpu_exec_init(env);
59

    
60
    /* init various static tables */
61
    if (!inited) {
62
        inited = 1;
63
        optimize_flags_init();
64
    }
65
#ifdef USE_CODE_COPY
66
    /* testing code for code copy case */
67
    {
68
        struct modify_ldt_ldt_s ldt;
69

    
70
        ldt.entry_number = 1;
71
        ldt.base_addr = (unsigned long)env;
72
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
73
        ldt.seg_32bit = 1;
74
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
75
        ldt.read_exec_only = 0;
76
        ldt.limit_in_pages = 1;
77
        ldt.seg_not_present = 0;
78
        ldt.useable = 1;
79
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
80

    
81
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
82
    }
83
#endif
84
    {
85
        int family, model, stepping;
86
#ifdef TARGET_X86_64
87
        env->cpuid_vendor1 = 0x68747541; /* "Auth" */
88
        env->cpuid_vendor2 = 0x69746e65; /* "enti" */
89
        env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
90
        family = 6;
91
        model = 2;
92
        stepping = 3;
93
#else
94
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
95
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
96
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
97
#if 0
98
        /* pentium 75-200 */
99
        family = 5;
100
        model = 2;
101
        stepping = 11;
102
#else
103
        /* pentium pro */
104
        family = 6;
105
        model = 3;
106
        stepping = 3;
107
#endif
108
#endif
109
        env->cpuid_level = 2;
110
        env->cpuid_version = (family << 8) | (model << 4) | stepping;
111
        env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
112
                               CPUID_TSC | CPUID_MSR | CPUID_MCE |
113
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
114
                               CPUID_PAT);
115
        env->pat = 0x0007040600070406ULL;
116
        env->cpuid_ext3_features = CPUID_EXT3_SVM;
117
        env->cpuid_ext_features = CPUID_EXT_SSE3;
118
        env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
119
        env->cpuid_features |= CPUID_APIC;
120
        env->cpuid_xlevel = 0x8000000e;
121
        {
122
            const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
123
            int c, len, i;
124
            len = strlen(model_id);
125
            for(i = 0; i < 48; i++) {
126
                if (i >= len)
127
                    c = '\0';
128
                else
129
                    c = model_id[i];
130
                env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
131
            }
132
        }
133
#ifdef TARGET_X86_64
134
        /* currently not enabled for std i386 because not fully tested */
135
        env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
136
        env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
137

    
138
        /* these features are needed for Win64 and aren't fully implemented */
139
        env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
140
        /* this feature is needed for Solaris and isn't fully implemented */
141
        env->cpuid_features |= CPUID_PSE36;
142
#endif
143
    }
144
    cpu_reset(env);
145
#ifdef USE_KQEMU
146
    kqemu_init(env);
147
#endif
148
    return env;
149
}
150

    
151
/* NOTE: must be called outside the CPU execute loop */
152
void cpu_reset(CPUX86State *env)
153
{
154
    int i;
155

    
156
    memset(env, 0, offsetof(CPUX86State, breakpoints));
157

    
158
    tlb_flush(env, 1);
159

    
160
    env->old_exception = -1;
161

    
162
    /* init to reset state */
163

    
164
#ifdef CONFIG_SOFTMMU
165
    env->hflags |= HF_SOFTMMU_MASK;
166
#endif
167
    env->hflags |= HF_GIF_MASK;
168

    
169
    cpu_x86_update_cr0(env, 0x60000010);
170
    env->a20_mask = 0xffffffff;
171
    env->smbase = 0x30000;
172

    
173
    env->idt.limit = 0xffff;
174
    env->gdt.limit = 0xffff;
175
    env->ldt.limit = 0xffff;
176
    env->ldt.flags = DESC_P_MASK;
177
    env->tr.limit = 0xffff;
178
    env->tr.flags = DESC_P_MASK;
179

    
180
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
181
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
182
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
183
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
184
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
185
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
186

    
187
    env->eip = 0xfff0;
188
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
189

    
190
    env->eflags = 0x2;
191

    
192
    /* FPU init */
193
    for(i = 0;i < 8; i++)
194
        env->fptags[i] = 1;
195
    env->fpuc = 0x37f;
196

    
197
    env->mxcsr = 0x1f80;
198
}
199

    
200
void cpu_x86_close(CPUX86State *env)
201
{
202
    free(env);
203
}
204

    
205
/***********************************************************/
206
/* x86 debug */
207

    
208
static const char *cc_op_str[] = {
209
    "DYNAMIC",
210
    "EFLAGS",
211

    
212
    "MULB",
213
    "MULW",
214
    "MULL",
215
    "MULQ",
216

    
217
    "ADDB",
218
    "ADDW",
219
    "ADDL",
220
    "ADDQ",
221

    
222
    "ADCB",
223
    "ADCW",
224
    "ADCL",
225
    "ADCQ",
226

    
227
    "SUBB",
228
    "SUBW",
229
    "SUBL",
230
    "SUBQ",
231

    
232
    "SBBB",
233
    "SBBW",
234
    "SBBL",
235
    "SBBQ",
236

    
237
    "LOGICB",
238
    "LOGICW",
239
    "LOGICL",
240
    "LOGICQ",
241

    
242
    "INCB",
243
    "INCW",
244
    "INCL",
245
    "INCQ",
246

    
247
    "DECB",
248
    "DECW",
249
    "DECL",
250
    "DECQ",
251

    
252
    "SHLB",
253
    "SHLW",
254
    "SHLL",
255
    "SHLQ",
256

    
257
    "SARB",
258
    "SARW",
259
    "SARL",
260
    "SARQ",
261
};
262

    
263
void cpu_dump_state(CPUState *env, FILE *f,
264
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
265
                    int flags)
266
{
267
    int eflags, i, nb;
268
    char cc_op_name[32];
269
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
270

    
271
    eflags = env->eflags;
272
#ifdef TARGET_X86_64
273
    if (env->hflags & HF_CS64_MASK) {
274
        cpu_fprintf(f,
275
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
276
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
277
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
278
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
279
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
280
                    env->regs[R_EAX],
281
                    env->regs[R_EBX],
282
                    env->regs[R_ECX],
283
                    env->regs[R_EDX],
284
                    env->regs[R_ESI],
285
                    env->regs[R_EDI],
286
                    env->regs[R_EBP],
287
                    env->regs[R_ESP],
288
                    env->regs[8],
289
                    env->regs[9],
290
                    env->regs[10],
291
                    env->regs[11],
292
                    env->regs[12],
293
                    env->regs[13],
294
                    env->regs[14],
295
                    env->regs[15],
296
                    env->eip, eflags,
297
                    eflags & DF_MASK ? 'D' : '-',
298
                    eflags & CC_O ? 'O' : '-',
299
                    eflags & CC_S ? 'S' : '-',
300
                    eflags & CC_Z ? 'Z' : '-',
301
                    eflags & CC_A ? 'A' : '-',
302
                    eflags & CC_P ? 'P' : '-',
303
                    eflags & CC_C ? 'C' : '-',
304
                    env->hflags & HF_CPL_MASK,
305
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
306
                    (env->a20_mask >> 20) & 1,
307
                    (env->hflags >> HF_SMM_SHIFT) & 1,
308
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
309
    } else
310
#endif
311
    {
312
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
313
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
314
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
315
                    (uint32_t)env->regs[R_EAX],
316
                    (uint32_t)env->regs[R_EBX],
317
                    (uint32_t)env->regs[R_ECX],
318
                    (uint32_t)env->regs[R_EDX],
319
                    (uint32_t)env->regs[R_ESI],
320
                    (uint32_t)env->regs[R_EDI],
321
                    (uint32_t)env->regs[R_EBP],
322
                    (uint32_t)env->regs[R_ESP],
323
                    (uint32_t)env->eip, eflags,
324
                    eflags & DF_MASK ? 'D' : '-',
325
                    eflags & CC_O ? 'O' : '-',
326
                    eflags & CC_S ? 'S' : '-',
327
                    eflags & CC_Z ? 'Z' : '-',
328
                    eflags & CC_A ? 'A' : '-',
329
                    eflags & CC_P ? 'P' : '-',
330
                    eflags & CC_C ? 'C' : '-',
331
                    env->hflags & HF_CPL_MASK,
332
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
333
                    (env->a20_mask >> 20) & 1,
334
                    (env->hflags >> HF_SMM_SHIFT) & 1,
335
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
336
    }
337

    
338
#ifdef TARGET_X86_64
339
    if (env->hflags & HF_LMA_MASK) {
340
        for(i = 0; i < 6; i++) {
341
            SegmentCache *sc = &env->segs[i];
342
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
343
                        seg_name[i],
344
                        sc->selector,
345
                        sc->base,
346
                        sc->limit,
347
                        sc->flags);
348
        }
349
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
350
                    env->ldt.selector,
351
                    env->ldt.base,
352
                    env->ldt.limit,
353
                    env->ldt.flags);
354
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
355
                    env->tr.selector,
356
                    env->tr.base,
357
                    env->tr.limit,
358
                    env->tr.flags);
359
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
360
                    env->gdt.base, env->gdt.limit);
361
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
362
                    env->idt.base, env->idt.limit);
363
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
364
                    (uint32_t)env->cr[0],
365
                    env->cr[2],
366
                    env->cr[3],
367
                    (uint32_t)env->cr[4]);
368
    } else
369
#endif
370
    {
371
        for(i = 0; i < 6; i++) {
372
            SegmentCache *sc = &env->segs[i];
373
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
374
                        seg_name[i],
375
                        sc->selector,
376
                        (uint32_t)sc->base,
377
                        sc->limit,
378
                        sc->flags);
379
        }
380
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
381
                    env->ldt.selector,
382
                    (uint32_t)env->ldt.base,
383
                    env->ldt.limit,
384
                    env->ldt.flags);
385
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
386
                    env->tr.selector,
387
                    (uint32_t)env->tr.base,
388
                    env->tr.limit,
389
                    env->tr.flags);
390
        cpu_fprintf(f, "GDT=     %08x %08x\n",
391
                    (uint32_t)env->gdt.base, env->gdt.limit);
392
        cpu_fprintf(f, "IDT=     %08x %08x\n",
393
                    (uint32_t)env->idt.base, env->idt.limit);
394
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
395
                    (uint32_t)env->cr[0],
396
                    (uint32_t)env->cr[2],
397
                    (uint32_t)env->cr[3],
398
                    (uint32_t)env->cr[4]);
399
    }
400
    if (flags & X86_DUMP_CCOP) {
401
        if ((unsigned)env->cc_op < CC_OP_NB)
402
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
403
        else
404
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
405
#ifdef TARGET_X86_64
406
        if (env->hflags & HF_CS64_MASK) {
407
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
408
                        env->cc_src, env->cc_dst,
409
                        cc_op_name);
410
        } else
411
#endif
412
        {
413
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
414
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
415
                        cc_op_name);
416
        }
417
    }
418
    if (flags & X86_DUMP_FPU) {
419
        int fptag;
420
        fptag = 0;
421
        for(i = 0; i < 8; i++) {
422
            fptag |= ((!env->fptags[i]) << i);
423
        }
424
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
425
                    env->fpuc,
426
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
427
                    env->fpstt,
428
                    fptag,
429
                    env->mxcsr);
430
        for(i=0;i<8;i++) {
431
#if defined(USE_X86LDOUBLE)
432
            union {
433
                long double d;
434
                struct {
435
                    uint64_t lower;
436
                    uint16_t upper;
437
                } l;
438
            } tmp;
439
            tmp.d = env->fpregs[i].d;
440
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
441
                        i, tmp.l.lower, tmp.l.upper);
442
#else
443
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
444
                        i, env->fpregs[i].mmx.q);
445
#endif
446
            if ((i & 1) == 1)
447
                cpu_fprintf(f, "\n");
448
            else
449
                cpu_fprintf(f, " ");
450
        }
451
        if (env->hflags & HF_CS64_MASK)
452
            nb = 16;
453
        else
454
            nb = 8;
455
        for(i=0;i<nb;i++) {
456
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
457
                        i,
458
                        env->xmm_regs[i].XMM_L(3),
459
                        env->xmm_regs[i].XMM_L(2),
460
                        env->xmm_regs[i].XMM_L(1),
461
                        env->xmm_regs[i].XMM_L(0));
462
            if ((i & 1) == 1)
463
                cpu_fprintf(f, "\n");
464
            else
465
                cpu_fprintf(f, " ");
466
        }
467
    }
468
}
469

    
470
/***********************************************************/
471
/* x86 mmu */
472
/* XXX: add PGE support */
473

    
474
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
475
{
476
    a20_state = (a20_state != 0);
477
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
478
#if defined(DEBUG_MMU)
479
        printf("A20 update: a20=%d\n", a20_state);
480
#endif
481
        /* if the cpu is currently executing code, we must unlink it and
482
           all the potentially executing TB */
483
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
484

    
485
        /* when a20 is changed, all the MMU mappings are invalid, so
486
           we must flush everything */
487
        tlb_flush(env, 1);
488
        env->a20_mask = 0xffefffff | (a20_state << 20);
489
    }
490
}
491

    
492
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
493
{
494
    int pe_state;
495

    
496
#if defined(DEBUG_MMU)
497
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
498
#endif
499
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
500
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
501
        tlb_flush(env, 1);
502
    }
503

    
504
#ifdef TARGET_X86_64
505
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
506
        (env->efer & MSR_EFER_LME)) {
507
        /* enter in long mode */
508
        /* XXX: generate an exception */
509
        if (!(env->cr[4] & CR4_PAE_MASK))
510
            return;
511
        env->efer |= MSR_EFER_LMA;
512
        env->hflags |= HF_LMA_MASK;
513
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
514
               (env->efer & MSR_EFER_LMA)) {
515
        /* exit long mode */
516
        env->efer &= ~MSR_EFER_LMA;
517
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
518
        env->eip &= 0xffffffff;
519
    }
520
#endif
521
    env->cr[0] = new_cr0 | CR0_ET_MASK;
522

    
523
    /* update PE flag in hidden flags */
524
    pe_state = (env->cr[0] & CR0_PE_MASK);
525
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
526
    /* ensure that ADDSEG is always set in real mode */
527
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
528
    /* update FPU flags */
529
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
530
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
531
}
532

    
533
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
534
   the PDPT */
535
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
536
{
537
    env->cr[3] = new_cr3;
538
    if (env->cr[0] & CR0_PG_MASK) {
539
#if defined(DEBUG_MMU)
540
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
541
#endif
542
        tlb_flush(env, 0);
543
    }
544
}
545

    
546
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
547
{
548
#if defined(DEBUG_MMU)
549
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
550
#endif
551
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
552
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
553
        tlb_flush(env, 1);
554
    }
555
    /* SSE handling */
556
    if (!(env->cpuid_features & CPUID_SSE))
557
        new_cr4 &= ~CR4_OSFXSR_MASK;
558
    if (new_cr4 & CR4_OSFXSR_MASK)
559
        env->hflags |= HF_OSFXSR_MASK;
560
    else
561
        env->hflags &= ~HF_OSFXSR_MASK;
562

    
563
    env->cr[4] = new_cr4;
564
}
565

    
566
/* XXX: also flush 4MB pages */
567
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
568
{
569
    tlb_flush_page(env, addr);
570
}
571

    
572
#if defined(CONFIG_USER_ONLY)
573

    
574
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
575
                             int is_write, int mmu_idx, int is_softmmu)
576
{
577
    /* user mode only emulation */
578
    is_write &= 1;
579
    env->cr[2] = addr;
580
    env->error_code = (is_write << PG_ERROR_W_BIT);
581
    env->error_code |= PG_ERROR_U_MASK;
582
    env->exception_index = EXCP0E_PAGE;
583
    return 1;
584
}
585

    
586
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
587
{
588
    return addr;
589
}
590

    
591
#else
592

    
593
#define PHYS_ADDR_MASK 0xfffff000
594

    
595
/* return value:
596
   -1 = cannot handle fault
597
   0  = nothing more to do
598
   1  = generate PF fault
599
   2  = soft MMU activation required for this block
600
*/
601
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
602
                             int is_write1, int mmu_idx, int is_softmmu)
603
{
604
    uint64_t ptep, pte;
605
    uint32_t pdpe_addr, pde_addr, pte_addr;
606
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
607
    unsigned long paddr, page_offset;
608
    target_ulong vaddr, virt_addr;
609

    
610
    is_user = mmu_idx == MMU_USER_IDX;
611
#if defined(DEBUG_MMU)
612
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
613
           addr, is_write1, is_user, env->eip);
614
#endif
615
    is_write = is_write1 & 1;
616

    
617
    if (!(env->cr[0] & CR0_PG_MASK)) {
618
        pte = addr;
619
        virt_addr = addr & TARGET_PAGE_MASK;
620
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
621
        page_size = 4096;
622
        goto do_mapping;
623
    }
624

    
625
    if (env->cr[4] & CR4_PAE_MASK) {
626
        uint64_t pde, pdpe;
627

    
628
        /* XXX: we only use 32 bit physical addresses */
629
#ifdef TARGET_X86_64
630
        if (env->hflags & HF_LMA_MASK) {
631
            uint32_t pml4e_addr;
632
            uint64_t pml4e;
633
            int32_t sext;
634

    
635
            /* test virtual address sign extension */
636
            sext = (int64_t)addr >> 47;
637
            if (sext != 0 && sext != -1) {
638
                env->error_code = 0;
639
                env->exception_index = EXCP0D_GPF;
640
                return 1;
641
            }
642

    
643
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
644
                env->a20_mask;
645
            pml4e = ldq_phys(pml4e_addr);
646
            if (!(pml4e & PG_PRESENT_MASK)) {
647
                error_code = 0;
648
                goto do_fault;
649
            }
650
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
651
                error_code = PG_ERROR_RSVD_MASK;
652
                goto do_fault;
653
            }
654
            if (!(pml4e & PG_ACCESSED_MASK)) {
655
                pml4e |= PG_ACCESSED_MASK;
656
                stl_phys_notdirty(pml4e_addr, pml4e);
657
            }
658
            ptep = pml4e ^ PG_NX_MASK;
659
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
660
                env->a20_mask;
661
            pdpe = ldq_phys(pdpe_addr);
662
            if (!(pdpe & PG_PRESENT_MASK)) {
663
                error_code = 0;
664
                goto do_fault;
665
            }
666
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
667
                error_code = PG_ERROR_RSVD_MASK;
668
                goto do_fault;
669
            }
670
            ptep &= pdpe ^ PG_NX_MASK;
671
            if (!(pdpe & PG_ACCESSED_MASK)) {
672
                pdpe |= PG_ACCESSED_MASK;
673
                stl_phys_notdirty(pdpe_addr, pdpe);
674
            }
675
        } else
676
#endif
677
        {
678
            /* XXX: load them when cr3 is loaded ? */
679
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
680
                env->a20_mask;
681
            pdpe = ldq_phys(pdpe_addr);
682
            if (!(pdpe & PG_PRESENT_MASK)) {
683
                error_code = 0;
684
                goto do_fault;
685
            }
686
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
687
        }
688

    
689
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
690
            env->a20_mask;
691
        pde = ldq_phys(pde_addr);
692
        if (!(pde & PG_PRESENT_MASK)) {
693
            error_code = 0;
694
            goto do_fault;
695
        }
696
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
697
            error_code = PG_ERROR_RSVD_MASK;
698
            goto do_fault;
699
        }
700
        ptep &= pde ^ PG_NX_MASK;
701
        if (pde & PG_PSE_MASK) {
702
            /* 2 MB page */
703
            page_size = 2048 * 1024;
704
            ptep ^= PG_NX_MASK;
705
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
706
                goto do_fault_protect;
707
            if (is_user) {
708
                if (!(ptep & PG_USER_MASK))
709
                    goto do_fault_protect;
710
                if (is_write && !(ptep & PG_RW_MASK))
711
                    goto do_fault_protect;
712
            } else {
713
                if ((env->cr[0] & CR0_WP_MASK) &&
714
                    is_write && !(ptep & PG_RW_MASK))
715
                    goto do_fault_protect;
716
            }
717
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
718
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
719
                pde |= PG_ACCESSED_MASK;
720
                if (is_dirty)
721
                    pde |= PG_DIRTY_MASK;
722
                stl_phys_notdirty(pde_addr, pde);
723
            }
724
            /* align to page_size */
725
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
726
            virt_addr = addr & ~(page_size - 1);
727
        } else {
728
            /* 4 KB page */
729
            if (!(pde & PG_ACCESSED_MASK)) {
730
                pde |= PG_ACCESSED_MASK;
731
                stl_phys_notdirty(pde_addr, pde);
732
            }
733
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
734
                env->a20_mask;
735
            pte = ldq_phys(pte_addr);
736
            if (!(pte & PG_PRESENT_MASK)) {
737
                error_code = 0;
738
                goto do_fault;
739
            }
740
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
741
                error_code = PG_ERROR_RSVD_MASK;
742
                goto do_fault;
743
            }
744
            /* combine pde and pte nx, user and rw protections */
745
            ptep &= pte ^ PG_NX_MASK;
746
            ptep ^= PG_NX_MASK;
747
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
748
                goto do_fault_protect;
749
            if (is_user) {
750
                if (!(ptep & PG_USER_MASK))
751
                    goto do_fault_protect;
752
                if (is_write && !(ptep & PG_RW_MASK))
753
                    goto do_fault_protect;
754
            } else {
755
                if ((env->cr[0] & CR0_WP_MASK) &&
756
                    is_write && !(ptep & PG_RW_MASK))
757
                    goto do_fault_protect;
758
            }
759
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
760
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
761
                pte |= PG_ACCESSED_MASK;
762
                if (is_dirty)
763
                    pte |= PG_DIRTY_MASK;
764
                stl_phys_notdirty(pte_addr, pte);
765
            }
766
            page_size = 4096;
767
            virt_addr = addr & ~0xfff;
768
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
769
        }
770
    } else {
771
        uint32_t pde;
772

    
773
        /* page directory entry */
774
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
775
            env->a20_mask;
776
        pde = ldl_phys(pde_addr);
777
        if (!(pde & PG_PRESENT_MASK)) {
778
            error_code = 0;
779
            goto do_fault;
780
        }
781
        /* if PSE bit is set, then we use a 4MB page */
782
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
783
            page_size = 4096 * 1024;
784
            if (is_user) {
785
                if (!(pde & PG_USER_MASK))
786
                    goto do_fault_protect;
787
                if (is_write && !(pde & PG_RW_MASK))
788
                    goto do_fault_protect;
789
            } else {
790
                if ((env->cr[0] & CR0_WP_MASK) &&
791
                    is_write && !(pde & PG_RW_MASK))
792
                    goto do_fault_protect;
793
            }
794
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
795
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
796
                pde |= PG_ACCESSED_MASK;
797
                if (is_dirty)
798
                    pde |= PG_DIRTY_MASK;
799
                stl_phys_notdirty(pde_addr, pde);
800
            }
801

    
802
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
803
            ptep = pte;
804
            virt_addr = addr & ~(page_size - 1);
805
        } else {
806
            if (!(pde & PG_ACCESSED_MASK)) {
807
                pde |= PG_ACCESSED_MASK;
808
                stl_phys_notdirty(pde_addr, pde);
809
            }
810

    
811
            /* page directory entry */
812
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
813
                env->a20_mask;
814
            pte = ldl_phys(pte_addr);
815
            if (!(pte & PG_PRESENT_MASK)) {
816
                error_code = 0;
817
                goto do_fault;
818
            }
819
            /* combine pde and pte user and rw protections */
820
            ptep = pte & pde;
821
            if (is_user) {
822
                if (!(ptep & PG_USER_MASK))
823
                    goto do_fault_protect;
824
                if (is_write && !(ptep & PG_RW_MASK))
825
                    goto do_fault_protect;
826
            } else {
827
                if ((env->cr[0] & CR0_WP_MASK) &&
828
                    is_write && !(ptep & PG_RW_MASK))
829
                    goto do_fault_protect;
830
            }
831
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
832
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
833
                pte |= PG_ACCESSED_MASK;
834
                if (is_dirty)
835
                    pte |= PG_DIRTY_MASK;
836
                stl_phys_notdirty(pte_addr, pte);
837
            }
838
            page_size = 4096;
839
            virt_addr = addr & ~0xfff;
840
        }
841
    }
842
    /* the page can be put in the TLB */
843
    prot = PAGE_READ;
844
    if (!(ptep & PG_NX_MASK))
845
        prot |= PAGE_EXEC;
846
    if (pte & PG_DIRTY_MASK) {
847
        /* only set write access if already dirty... otherwise wait
848
           for dirty access */
849
        if (is_user) {
850
            if (ptep & PG_RW_MASK)
851
                prot |= PAGE_WRITE;
852
        } else {
853
            if (!(env->cr[0] & CR0_WP_MASK) ||
854
                (ptep & PG_RW_MASK))
855
                prot |= PAGE_WRITE;
856
        }
857
    }
858
 do_mapping:
859
    pte = pte & env->a20_mask;
860

    
861
    /* Even if 4MB pages, we map only one 4KB page in the cache to
862
       avoid filling it too fast */
863
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
864
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
865
    vaddr = virt_addr + page_offset;
866

    
867
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
868
    return ret;
869
 do_fault_protect:
870
    error_code = PG_ERROR_P_MASK;
871
 do_fault:
872
    error_code |= (is_write << PG_ERROR_W_BIT);
873
    if (is_user)
874
        error_code |= PG_ERROR_U_MASK;
875
    if (is_write1 == 2 &&
876
        (env->efer & MSR_EFER_NXE) &&
877
        (env->cr[4] & CR4_PAE_MASK))
878
        error_code |= PG_ERROR_I_D_MASK;
879
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
880
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
881
    } else {
882
        env->cr[2] = addr;
883
    }
884
    env->error_code = error_code;
885
    env->exception_index = EXCP0E_PAGE;
886
    /* the VMM will handle this */
887
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
888
        return 2;
889
    return 1;
890
}
891

    
892
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
893
{
894
    uint32_t pde_addr, pte_addr;
895
    uint32_t pde, pte, paddr, page_offset, page_size;
896

    
897
    if (env->cr[4] & CR4_PAE_MASK) {
898
        uint32_t pdpe_addr, pde_addr, pte_addr;
899
        uint32_t pdpe;
900

    
901
        /* XXX: we only use 32 bit physical addresses */
902
#ifdef TARGET_X86_64
903
        if (env->hflags & HF_LMA_MASK) {
904
            uint32_t pml4e_addr, pml4e;
905
            int32_t sext;
906

    
907
            /* test virtual address sign extension */
908
            sext = (int64_t)addr >> 47;
909
            if (sext != 0 && sext != -1)
910
                return -1;
911

    
912
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
913
                env->a20_mask;
914
            pml4e = ldl_phys(pml4e_addr);
915
            if (!(pml4e & PG_PRESENT_MASK))
916
                return -1;
917

    
918
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
919
                env->a20_mask;
920
            pdpe = ldl_phys(pdpe_addr);
921
            if (!(pdpe & PG_PRESENT_MASK))
922
                return -1;
923
        } else
924
#endif
925
        {
926
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
927
                env->a20_mask;
928
            pdpe = ldl_phys(pdpe_addr);
929
            if (!(pdpe & PG_PRESENT_MASK))
930
                return -1;
931
        }
932

    
933
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
934
            env->a20_mask;
935
        pde = ldl_phys(pde_addr);
936
        if (!(pde & PG_PRESENT_MASK)) {
937
            return -1;
938
        }
939
        if (pde & PG_PSE_MASK) {
940
            /* 2 MB page */
941
            page_size = 2048 * 1024;
942
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
943
        } else {
944
            /* 4 KB page */
945
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
946
                env->a20_mask;
947
            page_size = 4096;
948
            pte = ldl_phys(pte_addr);
949
        }
950
    } else {
951
        if (!(env->cr[0] & CR0_PG_MASK)) {
952
            pte = addr;
953
            page_size = 4096;
954
        } else {
955
            /* page directory entry */
956
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
957
            pde = ldl_phys(pde_addr);
958
            if (!(pde & PG_PRESENT_MASK))
959
                return -1;
960
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
961
                pte = pde & ~0x003ff000; /* align to 4MB */
962
                page_size = 4096 * 1024;
963
            } else {
964
                /* page directory entry */
965
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
966
                pte = ldl_phys(pte_addr);
967
                if (!(pte & PG_PRESENT_MASK))
968
                    return -1;
969
                page_size = 4096;
970
            }
971
        }
972
        pte = pte & env->a20_mask;
973
    }
974

    
975
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
976
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
977
    return paddr;
978
}
979
#endif /* !CONFIG_USER_ONLY */
980

    
981
#if defined(USE_CODE_COPY)
982
struct fpstate {
983
    uint16_t fpuc;
984
    uint16_t dummy1;
985
    uint16_t fpus;
986
    uint16_t dummy2;
987
    uint16_t fptag;
988
    uint16_t dummy3;
989

    
990
    uint32_t fpip;
991
    uint32_t fpcs;
992
    uint32_t fpoo;
993
    uint32_t fpos;
994
    uint8_t fpregs1[8 * 10];
995
};
996

    
997
void restore_native_fp_state(CPUState *env)
998
{
999
    int fptag, i, j;
1000
    struct fpstate fp1, *fp = &fp1;
1001

    
1002
    fp->fpuc = env->fpuc;
1003
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1004
    fptag = 0;
1005
    for (i=7; i>=0; i--) {
1006
        fptag <<= 2;
1007
        if (env->fptags[i]) {
1008
            fptag |= 3;
1009
        } else {
1010
            /* the FPU automatically computes it */
1011
        }
1012
    }
1013
    fp->fptag = fptag;
1014
    j = env->fpstt;
1015
    for(i = 0;i < 8; i++) {
1016
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1017
        j = (j + 1) & 7;
1018
    }
1019
    asm volatile ("frstor %0" : "=m" (*fp));
1020
    env->native_fp_regs = 1;
1021
}
1022

    
1023
void save_native_fp_state(CPUState *env)
1024
{
1025
    int fptag, i, j;
1026
    uint16_t fpuc;
1027
    struct fpstate fp1, *fp = &fp1;
1028

    
1029
    asm volatile ("fsave %0" : : "m" (*fp));
1030
    env->fpuc = fp->fpuc;
1031
    env->fpstt = (fp->fpus >> 11) & 7;
1032
    env->fpus = fp->fpus & ~0x3800;
1033
    fptag = fp->fptag;
1034
    for(i = 0;i < 8; i++) {
1035
        env->fptags[i] = ((fptag & 3) == 3);
1036
        fptag >>= 2;
1037
    }
1038
    j = env->fpstt;
1039
    for(i = 0;i < 8; i++) {
1040
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1041
        j = (j + 1) & 7;
1042
    }
1043
    /* we must restore the default rounding state */
1044
    /* XXX: we do not restore the exception state */
1045
    fpuc = 0x037f | (env->fpuc & (3 << 10));
1046
    asm volatile("fldcw %0" : : "m" (fpuc));
1047
    env->native_fp_regs = 0;
1048
}
1049
#endif