Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ a049de61

History | View | Annotate | Download (40.2 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "svm.h"
31

    
32
//#define DEBUG_MMU
33

    
34
#ifdef USE_CODE_COPY
35
#include <unistd.h>
36
#include <asm/ldt.h>
37
#include <linux/unistd.h>
38
#include <linux/version.h>
39

    
40
int modify_ldt(int func, void *ptr, unsigned long bytecount)
41
{
42
        return syscall(__NR_modify_ldt, func, ptr, bytecount);
43
}
44

    
45
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
46
#define modify_ldt_ldt_s user_desc
47
#endif
48
#endif /* USE_CODE_COPY */
49

    
50
static struct x86_def_t *x86_cpu_def;
51
typedef struct x86_def_t x86_def_t;
52
static int cpu_x86_register (CPUX86State *env, const x86_def_t *def);
53

    
54
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
55
                                    uint32_t *ext_features, 
56
                                    uint32_t *ext2_features, 
57
                                    uint32_t *ext3_features)
58
{
59
    int i;
60
    /* feature flags taken from "Intel Processor Identification and the CPUID
61
     * Instruction" and AMD's "CPUID Specification". In cases of disagreement 
62
     * about feature names, the Linux name is used. */
63
    const char *feature_name[] = {
64
        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
65
        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
66
        "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
67
        "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
68
    };
69
    const char *ext_feature_name[] = {
70
       "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
71
       "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
72
       NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
73
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74
    };
75
    const char *ext2_feature_name[] = {
76
       "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
77
       "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
78
       "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
79
       "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
80
    };
81
    const char *ext3_feature_name[] = {
82
       "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
83
       "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
84
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
85
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
86
    };
87

    
88
    for ( i = 0 ; i < 32 ; i++ ) 
89
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
90
            *features |= 1 << i;
91
            return;
92
        }
93
    for ( i = 0 ; i < 32 ; i++ ) 
94
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
95
            *ext_features |= 1 << i;
96
            return;
97
        }
98
    for ( i = 0 ; i < 32 ; i++ ) 
99
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
100
            *ext2_features |= 1 << i;
101
            return;
102
        }
103
    for ( i = 0 ; i < 32 ; i++ ) 
104
        if (ext3_features[i] && !strcmp (flagname, ext3_feature_name[i])) {
105
            *ext3_features |= 1 << i;
106
            return;
107
        }
108
    fprintf(stderr, "CPU feature %s not found\n", flagname);
109
}
110

    
111
CPUX86State *cpu_x86_init(void)
112
{
113
    CPUX86State *env;
114
    static int inited;
115

    
116
    env = qemu_mallocz(sizeof(CPUX86State));
117
    if (!env)
118
        return NULL;
119
    cpu_exec_init(env);
120

    
121
    /* init various static tables */
122
    if (!inited) {
123
        inited = 1;
124
        optimize_flags_init();
125
    }
126
#ifdef USE_CODE_COPY
127
    /* testing code for code copy case */
128
    {
129
        struct modify_ldt_ldt_s ldt;
130

    
131
        ldt.entry_number = 1;
132
        ldt.base_addr = (unsigned long)env;
133
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
134
        ldt.seg_32bit = 1;
135
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
136
        ldt.read_exec_only = 0;
137
        ldt.limit_in_pages = 1;
138
        ldt.seg_not_present = 0;
139
        ldt.useable = 1;
140
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
141

    
142
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
143
    }
144
#endif
145
    cpu_x86_register(env, x86_cpu_def);
146
    cpu_reset(env);
147
#ifdef USE_KQEMU
148
    kqemu_init(env);
149
#endif
150
    return env;
151
}
152

    
153
struct x86_def_t {
154
    const char *name;
155
    uint32_t vendor1, vendor2, vendor3;
156
    int family;
157
    int model;
158
    int stepping;
159
    uint32_t features, ext_features, ext2_features, ext3_features;
160
    uint32_t xlevel;
161
};
162

    
163
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
164
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
165
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
166
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
167
static x86_def_t x86_defs[] = {
168
#ifdef TARGET_X86_64
169
    {
170
        .name = "qemu64",
171
        .vendor1 = 0x68747541, /* "Auth" */
172
        .vendor2 = 0x69746e65, /* "enti" */
173
        .vendor3 = 0x444d4163, /* "cAMD" */
174
        .family = 6,
175
        .model = 2,
176
        .stepping = 3,
177
        .features = PPRO_FEATURES | 
178
        /* these features are needed for Win64 and aren't fully implemented */
179
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180
        /* this feature is needed for Solaris and isn't fully implemented */
181
            CPUID_PSE36,
182
        .ext_features = CPUID_EXT_SSE3,
183
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
184
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
185
        .ext3_features = CPUID_EXT3_SVM,
186
        .xlevel = 0x80000008,
187
    },
188
#endif
189
    {
190
        .name = "qemu32",
191
        .family = 6,
192
        .model = 3,
193
        .stepping = 3,
194
        .features = PPRO_FEATURES,
195
        .ext_features = CPUID_EXT_SSE3,
196
        .xlevel = 0,
197
    },
198
    {
199
        .name = "486",
200
        .family = 4,
201
        .model = 0,
202
        .stepping = 0,
203
        .features = 0x0000000B,
204
        .xlevel = 0,
205
    },
206
    {
207
        .name = "pentium",
208
        .family = 5,
209
        .model = 4,
210
        .stepping = 3,
211
        .features = 0x008001BF,
212
        .xlevel = 0,
213
    },
214
    {
215
        .name = "pentium2",
216
        .family = 6,
217
        .model = 5,
218
        .stepping = 2,
219
        .features = 0x0183F9FF,
220
        .xlevel = 0,
221
    },
222
    {
223
        .name = "pentium3",
224
        .family = 6,
225
        .model = 7,
226
        .stepping = 3,
227
        .features = 0x0383F9FF,
228
        .xlevel = 0,
229
    },
230
};
231

    
232
int x86_find_cpu_by_name(const unsigned char *cpu_model)
233
{
234
    int ret;
235
    unsigned int i;
236

    
237
    char *s = strdup(cpu_model);
238
    char *featurestr, *name = strtok(s, ",");
239
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
240
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
241
    int family = -1, model = -1, stepping = -1;
242

    
243
    ret = -1;
244
    x86_cpu_def = NULL;
245
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
246
        if (strcmp(name, x86_defs[i].name) == 0) {
247
            x86_cpu_def = &x86_defs[i];
248
            ret = 0;
249
            break;
250
        }
251
    }
252
    if (!x86_cpu_def)
253
        goto error;
254

    
255
    featurestr = strtok(NULL, ",");
256

    
257
    while (featurestr) {
258
        char *val;
259
        if (featurestr[0] == '+') {
260
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
261
        } else if (featurestr[0] == '-') {
262
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
263
        } else if ((val = strchr(featurestr, '='))) {
264
            *val = 0; val++;
265
            if (!strcmp(featurestr, "family")) {
266
                char *err;
267
                family = strtol(val, &err, 10);
268
                if (!*val || *err || family < 0) {
269
                    fprintf(stderr, "bad numerical value %s\n", val);
270
                    x86_cpu_def = 0;
271
                    goto error;
272
                }
273
                x86_cpu_def->family = family;
274
            } else if (!strcmp(featurestr, "model")) {
275
                char *err;
276
                model = strtol(val, &err, 10);
277
                if (!*val || *err || model < 0 || model > 0xf) {
278
                    fprintf(stderr, "bad numerical value %s\n", val);
279
                    x86_cpu_def = 0;
280
                    goto error;
281
                }
282
                x86_cpu_def->model = model;
283
            } else if (!strcmp(featurestr, "stepping")) {
284
                char *err;
285
                stepping = strtol(val, &err, 10);
286
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
287
                    fprintf(stderr, "bad numerical value %s\n", val);
288
                    x86_cpu_def = 0;
289
                    goto error;
290
                }
291
                x86_cpu_def->stepping = stepping;
292
            } else {
293
                fprintf(stderr, "unregnized feature %s\n", featurestr);
294
                x86_cpu_def = 0;
295
                goto error;
296
            }
297
        } else {
298
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
299
            x86_cpu_def = 0;
300
            goto error;
301
        }
302
        featurestr = strtok(NULL, ",");
303
    }
304
    x86_cpu_def->features |= plus_features;
305
    x86_cpu_def->ext_features |= plus_ext_features;
306
    x86_cpu_def->ext2_features |= plus_ext2_features;
307
    x86_cpu_def->ext3_features |= plus_ext3_features;
308
    x86_cpu_def->features &= ~minus_features;
309
    x86_cpu_def->ext_features &= ~minus_ext_features;
310
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
311
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
312

    
313
error:
314
    free(s);
315
    return ret;
316
}
317

    
318
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
319
{
320
    unsigned int i;
321

    
322
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
323
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
324
}
325

    
326
int cpu_x86_register (CPUX86State *env, const x86_def_t *def)
327
{
328
    if (def->vendor1) {
329
        env->cpuid_vendor1 = def->vendor1;
330
        env->cpuid_vendor2 = def->vendor2;
331
        env->cpuid_vendor3 = def->vendor3;
332
    } else {
333
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
334
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
335
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
336
    }
337
    env->cpuid_level = 2;
338
    env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
339
    env->cpuid_features = def->features;
340
    env->pat = 0x0007040600070406ULL;
341
    env->cpuid_ext_features = def->ext_features;
342
    env->cpuid_ext2_features = def->ext2_features;
343
    env->cpuid_xlevel = def->xlevel;
344
    env->cpuid_ext3_features = def->ext3_features;
345
    {
346
        const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
347
        int c, len, i;
348
        len = strlen(model_id);
349
        for(i = 0; i < 48; i++) {
350
            if (i >= len)
351
                c = '\0';
352
            else
353
                c = model_id[i];
354
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
355
        }
356
    }
357
    return 0;
358
}
359

    
360
/* NOTE: must be called outside the CPU execute loop */
361
void cpu_reset(CPUX86State *env)
362
{
363
    int i;
364

    
365
    memset(env, 0, offsetof(CPUX86State, breakpoints));
366

    
367
    tlb_flush(env, 1);
368

    
369
    env->old_exception = -1;
370

    
371
    /* init to reset state */
372

    
373
#ifdef CONFIG_SOFTMMU
374
    env->hflags |= HF_SOFTMMU_MASK;
375
#endif
376
    env->hflags |= HF_GIF_MASK;
377

    
378
    cpu_x86_update_cr0(env, 0x60000010);
379
    env->a20_mask = 0xffffffff;
380
    env->smbase = 0x30000;
381

    
382
    env->idt.limit = 0xffff;
383
    env->gdt.limit = 0xffff;
384
    env->ldt.limit = 0xffff;
385
    env->ldt.flags = DESC_P_MASK;
386
    env->tr.limit = 0xffff;
387
    env->tr.flags = DESC_P_MASK;
388

    
389
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
390
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
391
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
392
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
393
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
394
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
395

    
396
    env->eip = 0xfff0;
397
    env->regs[R_EDX] = env->cpuid_version;
398

    
399
    env->eflags = 0x2;
400

    
401
    /* FPU init */
402
    for(i = 0;i < 8; i++)
403
        env->fptags[i] = 1;
404
    env->fpuc = 0x37f;
405

    
406
    env->mxcsr = 0x1f80;
407
}
408

    
409
void cpu_x86_close(CPUX86State *env)
410
{
411
    free(env);
412
}
413

    
414
/***********************************************************/
415
/* x86 debug */
416

    
417
static const char *cc_op_str[] = {
418
    "DYNAMIC",
419
    "EFLAGS",
420

    
421
    "MULB",
422
    "MULW",
423
    "MULL",
424
    "MULQ",
425

    
426
    "ADDB",
427
    "ADDW",
428
    "ADDL",
429
    "ADDQ",
430

    
431
    "ADCB",
432
    "ADCW",
433
    "ADCL",
434
    "ADCQ",
435

    
436
    "SUBB",
437
    "SUBW",
438
    "SUBL",
439
    "SUBQ",
440

    
441
    "SBBB",
442
    "SBBW",
443
    "SBBL",
444
    "SBBQ",
445

    
446
    "LOGICB",
447
    "LOGICW",
448
    "LOGICL",
449
    "LOGICQ",
450

    
451
    "INCB",
452
    "INCW",
453
    "INCL",
454
    "INCQ",
455

    
456
    "DECB",
457
    "DECW",
458
    "DECL",
459
    "DECQ",
460

    
461
    "SHLB",
462
    "SHLW",
463
    "SHLL",
464
    "SHLQ",
465

    
466
    "SARB",
467
    "SARW",
468
    "SARL",
469
    "SARQ",
470
};
471

    
472
void cpu_dump_state(CPUState *env, FILE *f,
473
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
474
                    int flags)
475
{
476
    int eflags, i, nb;
477
    char cc_op_name[32];
478
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
479

    
480
    eflags = env->eflags;
481
#ifdef TARGET_X86_64
482
    if (env->hflags & HF_CS64_MASK) {
483
        cpu_fprintf(f,
484
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
485
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
486
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
487
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
488
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
489
                    env->regs[R_EAX],
490
                    env->regs[R_EBX],
491
                    env->regs[R_ECX],
492
                    env->regs[R_EDX],
493
                    env->regs[R_ESI],
494
                    env->regs[R_EDI],
495
                    env->regs[R_EBP],
496
                    env->regs[R_ESP],
497
                    env->regs[8],
498
                    env->regs[9],
499
                    env->regs[10],
500
                    env->regs[11],
501
                    env->regs[12],
502
                    env->regs[13],
503
                    env->regs[14],
504
                    env->regs[15],
505
                    env->eip, eflags,
506
                    eflags & DF_MASK ? 'D' : '-',
507
                    eflags & CC_O ? 'O' : '-',
508
                    eflags & CC_S ? 'S' : '-',
509
                    eflags & CC_Z ? 'Z' : '-',
510
                    eflags & CC_A ? 'A' : '-',
511
                    eflags & CC_P ? 'P' : '-',
512
                    eflags & CC_C ? 'C' : '-',
513
                    env->hflags & HF_CPL_MASK,
514
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
515
                    (env->a20_mask >> 20) & 1,
516
                    (env->hflags >> HF_SMM_SHIFT) & 1,
517
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
518
    } else
519
#endif
520
    {
521
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
522
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
523
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
524
                    (uint32_t)env->regs[R_EAX],
525
                    (uint32_t)env->regs[R_EBX],
526
                    (uint32_t)env->regs[R_ECX],
527
                    (uint32_t)env->regs[R_EDX],
528
                    (uint32_t)env->regs[R_ESI],
529
                    (uint32_t)env->regs[R_EDI],
530
                    (uint32_t)env->regs[R_EBP],
531
                    (uint32_t)env->regs[R_ESP],
532
                    (uint32_t)env->eip, eflags,
533
                    eflags & DF_MASK ? 'D' : '-',
534
                    eflags & CC_O ? 'O' : '-',
535
                    eflags & CC_S ? 'S' : '-',
536
                    eflags & CC_Z ? 'Z' : '-',
537
                    eflags & CC_A ? 'A' : '-',
538
                    eflags & CC_P ? 'P' : '-',
539
                    eflags & CC_C ? 'C' : '-',
540
                    env->hflags & HF_CPL_MASK,
541
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
542
                    (env->a20_mask >> 20) & 1,
543
                    (env->hflags >> HF_SMM_SHIFT) & 1,
544
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
545
    }
546

    
547
#ifdef TARGET_X86_64
548
    if (env->hflags & HF_LMA_MASK) {
549
        for(i = 0; i < 6; i++) {
550
            SegmentCache *sc = &env->segs[i];
551
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
552
                        seg_name[i],
553
                        sc->selector,
554
                        sc->base,
555
                        sc->limit,
556
                        sc->flags);
557
        }
558
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
559
                    env->ldt.selector,
560
                    env->ldt.base,
561
                    env->ldt.limit,
562
                    env->ldt.flags);
563
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
564
                    env->tr.selector,
565
                    env->tr.base,
566
                    env->tr.limit,
567
                    env->tr.flags);
568
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
569
                    env->gdt.base, env->gdt.limit);
570
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
571
                    env->idt.base, env->idt.limit);
572
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
573
                    (uint32_t)env->cr[0],
574
                    env->cr[2],
575
                    env->cr[3],
576
                    (uint32_t)env->cr[4]);
577
    } else
578
#endif
579
    {
580
        for(i = 0; i < 6; i++) {
581
            SegmentCache *sc = &env->segs[i];
582
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
583
                        seg_name[i],
584
                        sc->selector,
585
                        (uint32_t)sc->base,
586
                        sc->limit,
587
                        sc->flags);
588
        }
589
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
590
                    env->ldt.selector,
591
                    (uint32_t)env->ldt.base,
592
                    env->ldt.limit,
593
                    env->ldt.flags);
594
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
595
                    env->tr.selector,
596
                    (uint32_t)env->tr.base,
597
                    env->tr.limit,
598
                    env->tr.flags);
599
        cpu_fprintf(f, "GDT=     %08x %08x\n",
600
                    (uint32_t)env->gdt.base, env->gdt.limit);
601
        cpu_fprintf(f, "IDT=     %08x %08x\n",
602
                    (uint32_t)env->idt.base, env->idt.limit);
603
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
604
                    (uint32_t)env->cr[0],
605
                    (uint32_t)env->cr[2],
606
                    (uint32_t)env->cr[3],
607
                    (uint32_t)env->cr[4]);
608
    }
609
    if (flags & X86_DUMP_CCOP) {
610
        if ((unsigned)env->cc_op < CC_OP_NB)
611
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
612
        else
613
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
614
#ifdef TARGET_X86_64
615
        if (env->hflags & HF_CS64_MASK) {
616
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
617
                        env->cc_src, env->cc_dst,
618
                        cc_op_name);
619
        } else
620
#endif
621
        {
622
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
623
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
624
                        cc_op_name);
625
        }
626
    }
627
    if (flags & X86_DUMP_FPU) {
628
        int fptag;
629
        fptag = 0;
630
        for(i = 0; i < 8; i++) {
631
            fptag |= ((!env->fptags[i]) << i);
632
        }
633
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
634
                    env->fpuc,
635
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
636
                    env->fpstt,
637
                    fptag,
638
                    env->mxcsr);
639
        for(i=0;i<8;i++) {
640
#if defined(USE_X86LDOUBLE)
641
            union {
642
                long double d;
643
                struct {
644
                    uint64_t lower;
645
                    uint16_t upper;
646
                } l;
647
            } tmp;
648
            tmp.d = env->fpregs[i].d;
649
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
650
                        i, tmp.l.lower, tmp.l.upper);
651
#else
652
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
653
                        i, env->fpregs[i].mmx.q);
654
#endif
655
            if ((i & 1) == 1)
656
                cpu_fprintf(f, "\n");
657
            else
658
                cpu_fprintf(f, " ");
659
        }
660
        if (env->hflags & HF_CS64_MASK)
661
            nb = 16;
662
        else
663
            nb = 8;
664
        for(i=0;i<nb;i++) {
665
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
666
                        i,
667
                        env->xmm_regs[i].XMM_L(3),
668
                        env->xmm_regs[i].XMM_L(2),
669
                        env->xmm_regs[i].XMM_L(1),
670
                        env->xmm_regs[i].XMM_L(0));
671
            if ((i & 1) == 1)
672
                cpu_fprintf(f, "\n");
673
            else
674
                cpu_fprintf(f, " ");
675
        }
676
    }
677
}
678

    
679
/***********************************************************/
680
/* x86 mmu */
681
/* XXX: add PGE support */
682

    
683
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
684
{
685
    a20_state = (a20_state != 0);
686
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
687
#if defined(DEBUG_MMU)
688
        printf("A20 update: a20=%d\n", a20_state);
689
#endif
690
        /* if the cpu is currently executing code, we must unlink it and
691
           all the potentially executing TB */
692
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
693

    
694
        /* when a20 is changed, all the MMU mappings are invalid, so
695
           we must flush everything */
696
        tlb_flush(env, 1);
697
        env->a20_mask = 0xffefffff | (a20_state << 20);
698
    }
699
}
700

    
701
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
702
{
703
    int pe_state;
704

    
705
#if defined(DEBUG_MMU)
706
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
707
#endif
708
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
709
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
710
        tlb_flush(env, 1);
711
    }
712

    
713
#ifdef TARGET_X86_64
714
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
715
        (env->efer & MSR_EFER_LME)) {
716
        /* enter in long mode */
717
        /* XXX: generate an exception */
718
        if (!(env->cr[4] & CR4_PAE_MASK))
719
            return;
720
        env->efer |= MSR_EFER_LMA;
721
        env->hflags |= HF_LMA_MASK;
722
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
723
               (env->efer & MSR_EFER_LMA)) {
724
        /* exit long mode */
725
        env->efer &= ~MSR_EFER_LMA;
726
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
727
        env->eip &= 0xffffffff;
728
    }
729
#endif
730
    env->cr[0] = new_cr0 | CR0_ET_MASK;
731

    
732
    /* update PE flag in hidden flags */
733
    pe_state = (env->cr[0] & CR0_PE_MASK);
734
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
735
    /* ensure that ADDSEG is always set in real mode */
736
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
737
    /* update FPU flags */
738
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
739
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
740
}
741

    
742
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
743
   the PDPT */
744
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
745
{
746
    env->cr[3] = new_cr3;
747
    if (env->cr[0] & CR0_PG_MASK) {
748
#if defined(DEBUG_MMU)
749
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
750
#endif
751
        tlb_flush(env, 0);
752
    }
753
}
754

    
755
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
756
{
757
#if defined(DEBUG_MMU)
758
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
759
#endif
760
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
761
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
762
        tlb_flush(env, 1);
763
    }
764
    /* SSE handling */
765
    if (!(env->cpuid_features & CPUID_SSE))
766
        new_cr4 &= ~CR4_OSFXSR_MASK;
767
    if (new_cr4 & CR4_OSFXSR_MASK)
768
        env->hflags |= HF_OSFXSR_MASK;
769
    else
770
        env->hflags &= ~HF_OSFXSR_MASK;
771

    
772
    env->cr[4] = new_cr4;
773
}
774

    
775
/* XXX: also flush 4MB pages */
776
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
777
{
778
    tlb_flush_page(env, addr);
779
}
780

    
781
#if defined(CONFIG_USER_ONLY)
782

    
783
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
784
                             int is_write, int mmu_idx, int is_softmmu)
785
{
786
    /* user mode only emulation */
787
    is_write &= 1;
788
    env->cr[2] = addr;
789
    env->error_code = (is_write << PG_ERROR_W_BIT);
790
    env->error_code |= PG_ERROR_U_MASK;
791
    env->exception_index = EXCP0E_PAGE;
792
    return 1;
793
}
794

    
795
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
796
{
797
    return addr;
798
}
799

    
800
#else
801

    
802
#define PHYS_ADDR_MASK 0xfffff000
803

    
804
/* return value:
805
   -1 = cannot handle fault
806
   0  = nothing more to do
807
   1  = generate PF fault
808
   2  = soft MMU activation required for this block
809
*/
810
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
811
                             int is_write1, int mmu_idx, int is_softmmu)
812
{
813
    uint64_t ptep, pte;
814
    uint32_t pdpe_addr, pde_addr, pte_addr;
815
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
816
    unsigned long paddr, page_offset;
817
    target_ulong vaddr, virt_addr;
818

    
819
    is_user = mmu_idx == MMU_USER_IDX;
820
#if defined(DEBUG_MMU)
821
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
822
           addr, is_write1, is_user, env->eip);
823
#endif
824
    is_write = is_write1 & 1;
825

    
826
    if (!(env->cr[0] & CR0_PG_MASK)) {
827
        pte = addr;
828
        virt_addr = addr & TARGET_PAGE_MASK;
829
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
830
        page_size = 4096;
831
        goto do_mapping;
832
    }
833

    
834
    if (env->cr[4] & CR4_PAE_MASK) {
835
        uint64_t pde, pdpe;
836

    
837
        /* XXX: we only use 32 bit physical addresses */
838
#ifdef TARGET_X86_64
839
        if (env->hflags & HF_LMA_MASK) {
840
            uint32_t pml4e_addr;
841
            uint64_t pml4e;
842
            int32_t sext;
843

    
844
            /* test virtual address sign extension */
845
            sext = (int64_t)addr >> 47;
846
            if (sext != 0 && sext != -1) {
847
                env->error_code = 0;
848
                env->exception_index = EXCP0D_GPF;
849
                return 1;
850
            }
851

    
852
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
853
                env->a20_mask;
854
            pml4e = ldq_phys(pml4e_addr);
855
            if (!(pml4e & PG_PRESENT_MASK)) {
856
                error_code = 0;
857
                goto do_fault;
858
            }
859
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
860
                error_code = PG_ERROR_RSVD_MASK;
861
                goto do_fault;
862
            }
863
            if (!(pml4e & PG_ACCESSED_MASK)) {
864
                pml4e |= PG_ACCESSED_MASK;
865
                stl_phys_notdirty(pml4e_addr, pml4e);
866
            }
867
            ptep = pml4e ^ PG_NX_MASK;
868
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
869
                env->a20_mask;
870
            pdpe = ldq_phys(pdpe_addr);
871
            if (!(pdpe & PG_PRESENT_MASK)) {
872
                error_code = 0;
873
                goto do_fault;
874
            }
875
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
876
                error_code = PG_ERROR_RSVD_MASK;
877
                goto do_fault;
878
            }
879
            ptep &= pdpe ^ PG_NX_MASK;
880
            if (!(pdpe & PG_ACCESSED_MASK)) {
881
                pdpe |= PG_ACCESSED_MASK;
882
                stl_phys_notdirty(pdpe_addr, pdpe);
883
            }
884
        } else
885
#endif
886
        {
887
            /* XXX: load them when cr3 is loaded ? */
888
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
889
                env->a20_mask;
890
            pdpe = ldq_phys(pdpe_addr);
891
            if (!(pdpe & PG_PRESENT_MASK)) {
892
                error_code = 0;
893
                goto do_fault;
894
            }
895
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
896
        }
897

    
898
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
899
            env->a20_mask;
900
        pde = ldq_phys(pde_addr);
901
        if (!(pde & PG_PRESENT_MASK)) {
902
            error_code = 0;
903
            goto do_fault;
904
        }
905
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
906
            error_code = PG_ERROR_RSVD_MASK;
907
            goto do_fault;
908
        }
909
        ptep &= pde ^ PG_NX_MASK;
910
        if (pde & PG_PSE_MASK) {
911
            /* 2 MB page */
912
            page_size = 2048 * 1024;
913
            ptep ^= PG_NX_MASK;
914
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
915
                goto do_fault_protect;
916
            if (is_user) {
917
                if (!(ptep & PG_USER_MASK))
918
                    goto do_fault_protect;
919
                if (is_write && !(ptep & PG_RW_MASK))
920
                    goto do_fault_protect;
921
            } else {
922
                if ((env->cr[0] & CR0_WP_MASK) &&
923
                    is_write && !(ptep & PG_RW_MASK))
924
                    goto do_fault_protect;
925
            }
926
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
927
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
928
                pde |= PG_ACCESSED_MASK;
929
                if (is_dirty)
930
                    pde |= PG_DIRTY_MASK;
931
                stl_phys_notdirty(pde_addr, pde);
932
            }
933
            /* align to page_size */
934
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
935
            virt_addr = addr & ~(page_size - 1);
936
        } else {
937
            /* 4 KB page */
938
            if (!(pde & PG_ACCESSED_MASK)) {
939
                pde |= PG_ACCESSED_MASK;
940
                stl_phys_notdirty(pde_addr, pde);
941
            }
942
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
943
                env->a20_mask;
944
            pte = ldq_phys(pte_addr);
945
            if (!(pte & PG_PRESENT_MASK)) {
946
                error_code = 0;
947
                goto do_fault;
948
            }
949
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
950
                error_code = PG_ERROR_RSVD_MASK;
951
                goto do_fault;
952
            }
953
            /* combine pde and pte nx, user and rw protections */
954
            ptep &= pte ^ PG_NX_MASK;
955
            ptep ^= PG_NX_MASK;
956
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
957
                goto do_fault_protect;
958
            if (is_user) {
959
                if (!(ptep & PG_USER_MASK))
960
                    goto do_fault_protect;
961
                if (is_write && !(ptep & PG_RW_MASK))
962
                    goto do_fault_protect;
963
            } else {
964
                if ((env->cr[0] & CR0_WP_MASK) &&
965
                    is_write && !(ptep & PG_RW_MASK))
966
                    goto do_fault_protect;
967
            }
968
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
969
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
970
                pte |= PG_ACCESSED_MASK;
971
                if (is_dirty)
972
                    pte |= PG_DIRTY_MASK;
973
                stl_phys_notdirty(pte_addr, pte);
974
            }
975
            page_size = 4096;
976
            virt_addr = addr & ~0xfff;
977
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
978
        }
979
    } else {
980
        uint32_t pde;
981

    
982
        /* page directory entry */
983
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
984
            env->a20_mask;
985
        pde = ldl_phys(pde_addr);
986
        if (!(pde & PG_PRESENT_MASK)) {
987
            error_code = 0;
988
            goto do_fault;
989
        }
990
        /* if PSE bit is set, then we use a 4MB page */
991
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
992
            page_size = 4096 * 1024;
993
            if (is_user) {
994
                if (!(pde & PG_USER_MASK))
995
                    goto do_fault_protect;
996
                if (is_write && !(pde & PG_RW_MASK))
997
                    goto do_fault_protect;
998
            } else {
999
                if ((env->cr[0] & CR0_WP_MASK) &&
1000
                    is_write && !(pde & PG_RW_MASK))
1001
                    goto do_fault_protect;
1002
            }
1003
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1004
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1005
                pde |= PG_ACCESSED_MASK;
1006
                if (is_dirty)
1007
                    pde |= PG_DIRTY_MASK;
1008
                stl_phys_notdirty(pde_addr, pde);
1009
            }
1010

    
1011
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1012
            ptep = pte;
1013
            virt_addr = addr & ~(page_size - 1);
1014
        } else {
1015
            if (!(pde & PG_ACCESSED_MASK)) {
1016
                pde |= PG_ACCESSED_MASK;
1017
                stl_phys_notdirty(pde_addr, pde);
1018
            }
1019

    
1020
            /* page directory entry */
1021
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1022
                env->a20_mask;
1023
            pte = ldl_phys(pte_addr);
1024
            if (!(pte & PG_PRESENT_MASK)) {
1025
                error_code = 0;
1026
                goto do_fault;
1027
            }
1028
            /* combine pde and pte user and rw protections */
1029
            ptep = pte & pde;
1030
            if (is_user) {
1031
                if (!(ptep & PG_USER_MASK))
1032
                    goto do_fault_protect;
1033
                if (is_write && !(ptep & PG_RW_MASK))
1034
                    goto do_fault_protect;
1035
            } else {
1036
                if ((env->cr[0] & CR0_WP_MASK) &&
1037
                    is_write && !(ptep & PG_RW_MASK))
1038
                    goto do_fault_protect;
1039
            }
1040
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1041
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1042
                pte |= PG_ACCESSED_MASK;
1043
                if (is_dirty)
1044
                    pte |= PG_DIRTY_MASK;
1045
                stl_phys_notdirty(pte_addr, pte);
1046
            }
1047
            page_size = 4096;
1048
            virt_addr = addr & ~0xfff;
1049
        }
1050
    }
1051
    /* the page can be put in the TLB */
1052
    prot = PAGE_READ;
1053
    if (!(ptep & PG_NX_MASK))
1054
        prot |= PAGE_EXEC;
1055
    if (pte & PG_DIRTY_MASK) {
1056
        /* only set write access if already dirty... otherwise wait
1057
           for dirty access */
1058
        if (is_user) {
1059
            if (ptep & PG_RW_MASK)
1060
                prot |= PAGE_WRITE;
1061
        } else {
1062
            if (!(env->cr[0] & CR0_WP_MASK) ||
1063
                (ptep & PG_RW_MASK))
1064
                prot |= PAGE_WRITE;
1065
        }
1066
    }
1067
 do_mapping:
1068
    pte = pte & env->a20_mask;
1069

    
1070
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1071
       avoid filling it too fast */
1072
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1073
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1074
    vaddr = virt_addr + page_offset;
1075

    
1076
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1077
    return ret;
1078
 do_fault_protect:
1079
    error_code = PG_ERROR_P_MASK;
1080
 do_fault:
1081
    error_code |= (is_write << PG_ERROR_W_BIT);
1082
    if (is_user)
1083
        error_code |= PG_ERROR_U_MASK;
1084
    if (is_write1 == 2 &&
1085
        (env->efer & MSR_EFER_NXE) &&
1086
        (env->cr[4] & CR4_PAE_MASK))
1087
        error_code |= PG_ERROR_I_D_MASK;
1088
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1089
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1090
    } else {
1091
        env->cr[2] = addr;
1092
    }
1093
    env->error_code = error_code;
1094
    env->exception_index = EXCP0E_PAGE;
1095
    /* the VMM will handle this */
1096
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1097
        return 2;
1098
    return 1;
1099
}
1100

    
1101
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1102
{
1103
    uint32_t pde_addr, pte_addr;
1104
    uint32_t pde, pte, paddr, page_offset, page_size;
1105

    
1106
    if (env->cr[4] & CR4_PAE_MASK) {
1107
        uint32_t pdpe_addr, pde_addr, pte_addr;
1108
        uint32_t pdpe;
1109

    
1110
        /* XXX: we only use 32 bit physical addresses */
1111
#ifdef TARGET_X86_64
1112
        if (env->hflags & HF_LMA_MASK) {
1113
            uint32_t pml4e_addr, pml4e;
1114
            int32_t sext;
1115

    
1116
            /* test virtual address sign extension */
1117
            sext = (int64_t)addr >> 47;
1118
            if (sext != 0 && sext != -1)
1119
                return -1;
1120

    
1121
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1122
                env->a20_mask;
1123
            pml4e = ldl_phys(pml4e_addr);
1124
            if (!(pml4e & PG_PRESENT_MASK))
1125
                return -1;
1126

    
1127
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1128
                env->a20_mask;
1129
            pdpe = ldl_phys(pdpe_addr);
1130
            if (!(pdpe & PG_PRESENT_MASK))
1131
                return -1;
1132
        } else
1133
#endif
1134
        {
1135
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1136
                env->a20_mask;
1137
            pdpe = ldl_phys(pdpe_addr);
1138
            if (!(pdpe & PG_PRESENT_MASK))
1139
                return -1;
1140
        }
1141

    
1142
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1143
            env->a20_mask;
1144
        pde = ldl_phys(pde_addr);
1145
        if (!(pde & PG_PRESENT_MASK)) {
1146
            return -1;
1147
        }
1148
        if (pde & PG_PSE_MASK) {
1149
            /* 2 MB page */
1150
            page_size = 2048 * 1024;
1151
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1152
        } else {
1153
            /* 4 KB page */
1154
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1155
                env->a20_mask;
1156
            page_size = 4096;
1157
            pte = ldl_phys(pte_addr);
1158
        }
1159
    } else {
1160
        if (!(env->cr[0] & CR0_PG_MASK)) {
1161
            pte = addr;
1162
            page_size = 4096;
1163
        } else {
1164
            /* page directory entry */
1165
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1166
            pde = ldl_phys(pde_addr);
1167
            if (!(pde & PG_PRESENT_MASK))
1168
                return -1;
1169
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1170
                pte = pde & ~0x003ff000; /* align to 4MB */
1171
                page_size = 4096 * 1024;
1172
            } else {
1173
                /* page directory entry */
1174
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1175
                pte = ldl_phys(pte_addr);
1176
                if (!(pte & PG_PRESENT_MASK))
1177
                    return -1;
1178
                page_size = 4096;
1179
            }
1180
        }
1181
        pte = pte & env->a20_mask;
1182
    }
1183

    
1184
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1185
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1186
    return paddr;
1187
}
1188
#endif /* !CONFIG_USER_ONLY */
1189

    
1190
#if defined(USE_CODE_COPY)
1191
struct fpstate {
1192
    uint16_t fpuc;
1193
    uint16_t dummy1;
1194
    uint16_t fpus;
1195
    uint16_t dummy2;
1196
    uint16_t fptag;
1197
    uint16_t dummy3;
1198

    
1199
    uint32_t fpip;
1200
    uint32_t fpcs;
1201
    uint32_t fpoo;
1202
    uint32_t fpos;
1203
    uint8_t fpregs1[8 * 10];
1204
};
1205

    
1206
void restore_native_fp_state(CPUState *env)
1207
{
1208
    int fptag, i, j;
1209
    struct fpstate fp1, *fp = &fp1;
1210

    
1211
    fp->fpuc = env->fpuc;
1212
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1213
    fptag = 0;
1214
    for (i=7; i>=0; i--) {
1215
        fptag <<= 2;
1216
        if (env->fptags[i]) {
1217
            fptag |= 3;
1218
        } else {
1219
            /* the FPU automatically computes it */
1220
        }
1221
    }
1222
    fp->fptag = fptag;
1223
    j = env->fpstt;
1224
    for(i = 0;i < 8; i++) {
1225
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1226
        j = (j + 1) & 7;
1227
    }
1228
    asm volatile ("frstor %0" : "=m" (*fp));
1229
    env->native_fp_regs = 1;
1230
}
1231

    
1232
void save_native_fp_state(CPUState *env)
1233
{
1234
    int fptag, i, j;
1235
    uint16_t fpuc;
1236
    struct fpstate fp1, *fp = &fp1;
1237

    
1238
    asm volatile ("fsave %0" : : "m" (*fp));
1239
    env->fpuc = fp->fpuc;
1240
    env->fpstt = (fp->fpus >> 11) & 7;
1241
    env->fpus = fp->fpus & ~0x3800;
1242
    fptag = fp->fptag;
1243
    for(i = 0;i < 8; i++) {
1244
        env->fptags[i] = ((fptag & 3) == 3);
1245
        fptag >>= 2;
1246
    }
1247
    j = env->fpstt;
1248
    for(i = 0;i < 8; i++) {
1249
        memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1250
        j = (j + 1) & 7;
1251
    }
1252
    /* we must restore the default rounding state */
1253
    /* XXX: we do not restore the exception state */
1254
    fpuc = 0x037f | (env->fpuc & (3 << 10));
1255
    asm volatile("fldcw %0" : : "m" (fpuc));
1256
    env->native_fp_regs = 0;
1257
}
1258
#endif