Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ a35f3ec7

History | View | Annotate | Download (38.5 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "svm.h"
31

    
32
//#define DEBUG_MMU
33

    
34
static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
35

    
36
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
37
                                    uint32_t *ext_features, 
38
                                    uint32_t *ext2_features, 
39
                                    uint32_t *ext3_features)
40
{
41
    int i;
42
    /* feature flags taken from "Intel Processor Identification and the CPUID
43
     * Instruction" and AMD's "CPUID Specification". In cases of disagreement 
44
     * about feature names, the Linux name is used. */
45
    const char *feature_name[] = {
46
        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
47
        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
48
        "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
49
        "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
50
    };
51
    const char *ext_feature_name[] = {
52
       "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
53
       "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
54
       NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
55
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56
    };
57
    const char *ext2_feature_name[] = {
58
       "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
59
       "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
60
       "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
61
       "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
62
    };
63
    const char *ext3_feature_name[] = {
64
       "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
65
       "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
66
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
67
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68
    };
69

    
70
    for ( i = 0 ; i < 32 ; i++ ) 
71
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
72
            *features |= 1 << i;
73
            return;
74
        }
75
    for ( i = 0 ; i < 32 ; i++ ) 
76
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
77
            *ext_features |= 1 << i;
78
            return;
79
        }
80
    for ( i = 0 ; i < 32 ; i++ ) 
81
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
82
            *ext2_features |= 1 << i;
83
            return;
84
        }
85
    for ( i = 0 ; i < 32 ; i++ ) 
86
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
87
            *ext3_features |= 1 << i;
88
            return;
89
        }
90
    fprintf(stderr, "CPU feature %s not found\n", flagname);
91
}
92

    
93
CPUX86State *cpu_x86_init(const char *cpu_model)
94
{
95
    CPUX86State *env;
96
    static int inited;
97

    
98
    env = qemu_mallocz(sizeof(CPUX86State));
99
    if (!env)
100
        return NULL;
101
    cpu_exec_init(env);
102
    env->cpu_model_str = cpu_model;
103

    
104
    /* init various static tables */
105
    if (!inited) {
106
        inited = 1;
107
        optimize_flags_init();
108
    }
109
    if (cpu_x86_register(env, cpu_model) < 0) {
110
        cpu_x86_close(env);
111
        return NULL;
112
    }
113
    cpu_reset(env);
114
#ifdef USE_KQEMU
115
    kqemu_init(env);
116
#endif
117
    return env;
118
}
119

    
120
typedef struct x86_def_t {
121
    const char *name;
122
    uint32_t level;
123
    uint32_t vendor1, vendor2, vendor3;
124
    int family;
125
    int model;
126
    int stepping;
127
    uint32_t features, ext_features, ext2_features, ext3_features;
128
    uint32_t xlevel;
129
} x86_def_t;
130

    
131
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
132
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
133
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
134
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
135
static x86_def_t x86_defs[] = {
136
#ifdef TARGET_X86_64
137
    {
138
        .name = "qemu64",
139
        .level = 2,
140
        .vendor1 = 0x68747541, /* "Auth" */
141
        .vendor2 = 0x69746e65, /* "enti" */
142
        .vendor3 = 0x444d4163, /* "cAMD" */
143
        .family = 6,
144
        .model = 2,
145
        .stepping = 3,
146
        .features = PPRO_FEATURES | 
147
        /* these features are needed for Win64 and aren't fully implemented */
148
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
149
        /* this feature is needed for Solaris and isn't fully implemented */
150
            CPUID_PSE36,
151
        .ext_features = CPUID_EXT_SSE3,
152
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
153
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
154
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
155
        .ext3_features = CPUID_EXT3_SVM,
156
        .xlevel = 0x8000000A,
157
    },
158
#endif
159
    {
160
        .name = "qemu32",
161
        .level = 2,
162
        .family = 6,
163
        .model = 3,
164
        .stepping = 3,
165
        .features = PPRO_FEATURES,
166
        .ext_features = CPUID_EXT_SSE3,
167
        .xlevel = 0,
168
    },
169
    {
170
        .name = "486",
171
        .level = 0,
172
        .family = 4,
173
        .model = 0,
174
        .stepping = 0,
175
        .features = 0x0000000B,
176
        .xlevel = 0,
177
    },
178
    {
179
        .name = "pentium",
180
        .level = 1,
181
        .family = 5,
182
        .model = 4,
183
        .stepping = 3,
184
        .features = 0x008001BF,
185
        .xlevel = 0,
186
    },
187
    {
188
        .name = "pentium2",
189
        .level = 2,
190
        .family = 6,
191
        .model = 5,
192
        .stepping = 2,
193
        .features = 0x0183F9FF,
194
        .xlevel = 0,
195
    },
196
    {
197
        .name = "pentium3",
198
        .level = 2,
199
        .family = 6,
200
        .model = 7,
201
        .stepping = 3,
202
        .features = 0x0383F9FF,
203
        .xlevel = 0,
204
    },
205
    {
206
        .name = "athlon",
207
        .level = 2,
208
        .vendor1 = 0x68747541, /* "Auth" */
209
        .vendor2 = 0x69746e65, /* "enti" */
210
        .vendor3 = 0x444d4163, /* "cAMD" */
211
        .family = 6,
212
        .model = 2,
213
        .stepping = 3,
214
        .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
215
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
216
        .xlevel = 0x80000008,
217
    },
218
};
219

    
220
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
221
{
222
    unsigned int i;
223
    x86_def_t *def;
224

    
225
    char *s = strdup(cpu_model);
226
    char *featurestr, *name = strtok(s, ",");
227
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
228
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
229
    int family = -1, model = -1, stepping = -1;
230

    
231
    def = NULL;
232
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
233
        if (strcmp(name, x86_defs[i].name) == 0) {
234
            def = &x86_defs[i];
235
            break;
236
        }
237
    }
238
    if (!def)
239
        goto error;
240
    memcpy(x86_cpu_def, def, sizeof(*def));
241

    
242
    featurestr = strtok(NULL, ",");
243

    
244
    while (featurestr) {
245
        char *val;
246
        if (featurestr[0] == '+') {
247
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
248
        } else if (featurestr[0] == '-') {
249
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
250
        } else if ((val = strchr(featurestr, '='))) {
251
            *val = 0; val++;
252
            if (!strcmp(featurestr, "family")) {
253
                char *err;
254
                family = strtol(val, &err, 10);
255
                if (!*val || *err || family < 0) {
256
                    fprintf(stderr, "bad numerical value %s\n", val);
257
                    x86_cpu_def = 0;
258
                    goto error;
259
                }
260
                x86_cpu_def->family = family;
261
            } else if (!strcmp(featurestr, "model")) {
262
                char *err;
263
                model = strtol(val, &err, 10);
264
                if (!*val || *err || model < 0 || model > 0xf) {
265
                    fprintf(stderr, "bad numerical value %s\n", val);
266
                    x86_cpu_def = 0;
267
                    goto error;
268
                }
269
                x86_cpu_def->model = model;
270
            } else if (!strcmp(featurestr, "stepping")) {
271
                char *err;
272
                stepping = strtol(val, &err, 10);
273
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
274
                    fprintf(stderr, "bad numerical value %s\n", val);
275
                    x86_cpu_def = 0;
276
                    goto error;
277
                }
278
                x86_cpu_def->stepping = stepping;
279
            } else {
280
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
281
                x86_cpu_def = 0;
282
                goto error;
283
            }
284
        } else {
285
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
286
            x86_cpu_def = 0;
287
            goto error;
288
        }
289
        featurestr = strtok(NULL, ",");
290
    }
291
    x86_cpu_def->features |= plus_features;
292
    x86_cpu_def->ext_features |= plus_ext_features;
293
    x86_cpu_def->ext2_features |= plus_ext2_features;
294
    x86_cpu_def->ext3_features |= plus_ext3_features;
295
    x86_cpu_def->features &= ~minus_features;
296
    x86_cpu_def->ext_features &= ~minus_ext_features;
297
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
298
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
299
    free(s);
300
    return 0;
301

    
302
error:
303
    free(s);
304
    return -1;
305
}
306

    
307
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
308
{
309
    unsigned int i;
310

    
311
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
312
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
313
}
314

    
315
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
316
{
317
    x86_def_t def1, *def = &def1;
318

    
319
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
320
        return -1;
321
    if (def->vendor1) {
322
        env->cpuid_vendor1 = def->vendor1;
323
        env->cpuid_vendor2 = def->vendor2;
324
        env->cpuid_vendor3 = def->vendor3;
325
    } else {
326
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
327
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
328
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
329
    }
330
    env->cpuid_level = def->level;
331
    env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
332
    env->cpuid_features = def->features;
333
    env->pat = 0x0007040600070406ULL;
334
    env->cpuid_ext_features = def->ext_features;
335
    env->cpuid_ext2_features = def->ext2_features;
336
    env->cpuid_xlevel = def->xlevel;
337
    env->cpuid_ext3_features = def->ext3_features;
338
    {
339
        const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
340
        int c, len, i;
341
        len = strlen(model_id);
342
        for(i = 0; i < 48; i++) {
343
            if (i >= len)
344
                c = '\0';
345
            else
346
                c = model_id[i];
347
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
348
        }
349
    }
350
    return 0;
351
}
352

    
353
/* NOTE: must be called outside the CPU execute loop */
354
void cpu_reset(CPUX86State *env)
355
{
356
    int i;
357

    
358
    memset(env, 0, offsetof(CPUX86State, breakpoints));
359

    
360
    tlb_flush(env, 1);
361

    
362
    env->old_exception = -1;
363

    
364
    /* init to reset state */
365

    
366
#ifdef CONFIG_SOFTMMU
367
    env->hflags |= HF_SOFTMMU_MASK;
368
#endif
369
    env->hflags |= HF_GIF_MASK;
370

    
371
    cpu_x86_update_cr0(env, 0x60000010);
372
    env->a20_mask = 0xffffffff;
373
    env->smbase = 0x30000;
374

    
375
    env->idt.limit = 0xffff;
376
    env->gdt.limit = 0xffff;
377
    env->ldt.limit = 0xffff;
378
    env->ldt.flags = DESC_P_MASK;
379
    env->tr.limit = 0xffff;
380
    env->tr.flags = DESC_P_MASK;
381

    
382
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
383
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
384
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
385
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
386
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
387
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
388

    
389
    env->eip = 0xfff0;
390
    env->regs[R_EDX] = env->cpuid_version;
391

    
392
    env->eflags = 0x2;
393

    
394
    /* FPU init */
395
    for(i = 0;i < 8; i++)
396
        env->fptags[i] = 1;
397
    env->fpuc = 0x37f;
398

    
399
    env->mxcsr = 0x1f80;
400
}
401

    
402
void cpu_x86_close(CPUX86State *env)
403
{
404
    free(env);
405
}
406

    
407
/***********************************************************/
408
/* x86 debug */
409

    
410
static const char *cc_op_str[] = {
411
    "DYNAMIC",
412
    "EFLAGS",
413

    
414
    "MULB",
415
    "MULW",
416
    "MULL",
417
    "MULQ",
418

    
419
    "ADDB",
420
    "ADDW",
421
    "ADDL",
422
    "ADDQ",
423

    
424
    "ADCB",
425
    "ADCW",
426
    "ADCL",
427
    "ADCQ",
428

    
429
    "SUBB",
430
    "SUBW",
431
    "SUBL",
432
    "SUBQ",
433

    
434
    "SBBB",
435
    "SBBW",
436
    "SBBL",
437
    "SBBQ",
438

    
439
    "LOGICB",
440
    "LOGICW",
441
    "LOGICL",
442
    "LOGICQ",
443

    
444
    "INCB",
445
    "INCW",
446
    "INCL",
447
    "INCQ",
448

    
449
    "DECB",
450
    "DECW",
451
    "DECL",
452
    "DECQ",
453

    
454
    "SHLB",
455
    "SHLW",
456
    "SHLL",
457
    "SHLQ",
458

    
459
    "SARB",
460
    "SARW",
461
    "SARL",
462
    "SARQ",
463
};
464

    
465
void cpu_dump_state(CPUState *env, FILE *f,
466
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
467
                    int flags)
468
{
469
    int eflags, i, nb;
470
    char cc_op_name[32];
471
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
472

    
473
    eflags = env->eflags;
474
#ifdef TARGET_X86_64
475
    if (env->hflags & HF_CS64_MASK) {
476
        cpu_fprintf(f,
477
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
478
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
479
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
480
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
481
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
482
                    env->regs[R_EAX],
483
                    env->regs[R_EBX],
484
                    env->regs[R_ECX],
485
                    env->regs[R_EDX],
486
                    env->regs[R_ESI],
487
                    env->regs[R_EDI],
488
                    env->regs[R_EBP],
489
                    env->regs[R_ESP],
490
                    env->regs[8],
491
                    env->regs[9],
492
                    env->regs[10],
493
                    env->regs[11],
494
                    env->regs[12],
495
                    env->regs[13],
496
                    env->regs[14],
497
                    env->regs[15],
498
                    env->eip, eflags,
499
                    eflags & DF_MASK ? 'D' : '-',
500
                    eflags & CC_O ? 'O' : '-',
501
                    eflags & CC_S ? 'S' : '-',
502
                    eflags & CC_Z ? 'Z' : '-',
503
                    eflags & CC_A ? 'A' : '-',
504
                    eflags & CC_P ? 'P' : '-',
505
                    eflags & CC_C ? 'C' : '-',
506
                    env->hflags & HF_CPL_MASK,
507
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
508
                    (env->a20_mask >> 20) & 1,
509
                    (env->hflags >> HF_SMM_SHIFT) & 1,
510
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
511
    } else
512
#endif
513
    {
514
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
515
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
516
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
517
                    (uint32_t)env->regs[R_EAX],
518
                    (uint32_t)env->regs[R_EBX],
519
                    (uint32_t)env->regs[R_ECX],
520
                    (uint32_t)env->regs[R_EDX],
521
                    (uint32_t)env->regs[R_ESI],
522
                    (uint32_t)env->regs[R_EDI],
523
                    (uint32_t)env->regs[R_EBP],
524
                    (uint32_t)env->regs[R_ESP],
525
                    (uint32_t)env->eip, eflags,
526
                    eflags & DF_MASK ? 'D' : '-',
527
                    eflags & CC_O ? 'O' : '-',
528
                    eflags & CC_S ? 'S' : '-',
529
                    eflags & CC_Z ? 'Z' : '-',
530
                    eflags & CC_A ? 'A' : '-',
531
                    eflags & CC_P ? 'P' : '-',
532
                    eflags & CC_C ? 'C' : '-',
533
                    env->hflags & HF_CPL_MASK,
534
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
535
                    (env->a20_mask >> 20) & 1,
536
                    (env->hflags >> HF_SMM_SHIFT) & 1,
537
                    (env->hflags >> HF_HALTED_SHIFT) & 1);
538
    }
539

    
540
#ifdef TARGET_X86_64
541
    if (env->hflags & HF_LMA_MASK) {
542
        for(i = 0; i < 6; i++) {
543
            SegmentCache *sc = &env->segs[i];
544
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
545
                        seg_name[i],
546
                        sc->selector,
547
                        sc->base,
548
                        sc->limit,
549
                        sc->flags);
550
        }
551
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
552
                    env->ldt.selector,
553
                    env->ldt.base,
554
                    env->ldt.limit,
555
                    env->ldt.flags);
556
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
557
                    env->tr.selector,
558
                    env->tr.base,
559
                    env->tr.limit,
560
                    env->tr.flags);
561
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
562
                    env->gdt.base, env->gdt.limit);
563
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
564
                    env->idt.base, env->idt.limit);
565
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
566
                    (uint32_t)env->cr[0],
567
                    env->cr[2],
568
                    env->cr[3],
569
                    (uint32_t)env->cr[4]);
570
    } else
571
#endif
572
    {
573
        for(i = 0; i < 6; i++) {
574
            SegmentCache *sc = &env->segs[i];
575
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
576
                        seg_name[i],
577
                        sc->selector,
578
                        (uint32_t)sc->base,
579
                        sc->limit,
580
                        sc->flags);
581
        }
582
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
583
                    env->ldt.selector,
584
                    (uint32_t)env->ldt.base,
585
                    env->ldt.limit,
586
                    env->ldt.flags);
587
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
588
                    env->tr.selector,
589
                    (uint32_t)env->tr.base,
590
                    env->tr.limit,
591
                    env->tr.flags);
592
        cpu_fprintf(f, "GDT=     %08x %08x\n",
593
                    (uint32_t)env->gdt.base, env->gdt.limit);
594
        cpu_fprintf(f, "IDT=     %08x %08x\n",
595
                    (uint32_t)env->idt.base, env->idt.limit);
596
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
597
                    (uint32_t)env->cr[0],
598
                    (uint32_t)env->cr[2],
599
                    (uint32_t)env->cr[3],
600
                    (uint32_t)env->cr[4]);
601
    }
602
    if (flags & X86_DUMP_CCOP) {
603
        if ((unsigned)env->cc_op < CC_OP_NB)
604
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
605
        else
606
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
607
#ifdef TARGET_X86_64
608
        if (env->hflags & HF_CS64_MASK) {
609
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
610
                        env->cc_src, env->cc_dst,
611
                        cc_op_name);
612
        } else
613
#endif
614
        {
615
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
616
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
617
                        cc_op_name);
618
        }
619
    }
620
    if (flags & X86_DUMP_FPU) {
621
        int fptag;
622
        fptag = 0;
623
        for(i = 0; i < 8; i++) {
624
            fptag |= ((!env->fptags[i]) << i);
625
        }
626
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
627
                    env->fpuc,
628
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
629
                    env->fpstt,
630
                    fptag,
631
                    env->mxcsr);
632
        for(i=0;i<8;i++) {
633
#if defined(USE_X86LDOUBLE)
634
            union {
635
                long double d;
636
                struct {
637
                    uint64_t lower;
638
                    uint16_t upper;
639
                } l;
640
            } tmp;
641
            tmp.d = env->fpregs[i].d;
642
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
643
                        i, tmp.l.lower, tmp.l.upper);
644
#else
645
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
646
                        i, env->fpregs[i].mmx.q);
647
#endif
648
            if ((i & 1) == 1)
649
                cpu_fprintf(f, "\n");
650
            else
651
                cpu_fprintf(f, " ");
652
        }
653
        if (env->hflags & HF_CS64_MASK)
654
            nb = 16;
655
        else
656
            nb = 8;
657
        for(i=0;i<nb;i++) {
658
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
659
                        i,
660
                        env->xmm_regs[i].XMM_L(3),
661
                        env->xmm_regs[i].XMM_L(2),
662
                        env->xmm_regs[i].XMM_L(1),
663
                        env->xmm_regs[i].XMM_L(0));
664
            if ((i & 1) == 1)
665
                cpu_fprintf(f, "\n");
666
            else
667
                cpu_fprintf(f, " ");
668
        }
669
    }
670
}
671

    
672
/***********************************************************/
673
/* x86 mmu */
674
/* XXX: add PGE support */
675

    
676
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
677
{
678
    a20_state = (a20_state != 0);
679
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
680
#if defined(DEBUG_MMU)
681
        printf("A20 update: a20=%d\n", a20_state);
682
#endif
683
        /* if the cpu is currently executing code, we must unlink it and
684
           all the potentially executing TB */
685
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
686

    
687
        /* when a20 is changed, all the MMU mappings are invalid, so
688
           we must flush everything */
689
        tlb_flush(env, 1);
690
        env->a20_mask = 0xffefffff | (a20_state << 20);
691
    }
692
}
693

    
694
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
695
{
696
    int pe_state;
697

    
698
#if defined(DEBUG_MMU)
699
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
700
#endif
701
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
702
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
703
        tlb_flush(env, 1);
704
    }
705

    
706
#ifdef TARGET_X86_64
707
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
708
        (env->efer & MSR_EFER_LME)) {
709
        /* enter in long mode */
710
        /* XXX: generate an exception */
711
        if (!(env->cr[4] & CR4_PAE_MASK))
712
            return;
713
        env->efer |= MSR_EFER_LMA;
714
        env->hflags |= HF_LMA_MASK;
715
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
716
               (env->efer & MSR_EFER_LMA)) {
717
        /* exit long mode */
718
        env->efer &= ~MSR_EFER_LMA;
719
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
720
        env->eip &= 0xffffffff;
721
    }
722
#endif
723
    env->cr[0] = new_cr0 | CR0_ET_MASK;
724

    
725
    /* update PE flag in hidden flags */
726
    pe_state = (env->cr[0] & CR0_PE_MASK);
727
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
728
    /* ensure that ADDSEG is always set in real mode */
729
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
730
    /* update FPU flags */
731
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
732
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
733
}
734

    
735
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
736
   the PDPT */
737
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
738
{
739
    env->cr[3] = new_cr3;
740
    if (env->cr[0] & CR0_PG_MASK) {
741
#if defined(DEBUG_MMU)
742
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
743
#endif
744
        tlb_flush(env, 0);
745
    }
746
}
747

    
748
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
749
{
750
#if defined(DEBUG_MMU)
751
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
752
#endif
753
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
754
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
755
        tlb_flush(env, 1);
756
    }
757
    /* SSE handling */
758
    if (!(env->cpuid_features & CPUID_SSE))
759
        new_cr4 &= ~CR4_OSFXSR_MASK;
760
    if (new_cr4 & CR4_OSFXSR_MASK)
761
        env->hflags |= HF_OSFXSR_MASK;
762
    else
763
        env->hflags &= ~HF_OSFXSR_MASK;
764

    
765
    env->cr[4] = new_cr4;
766
}
767

    
768
/* XXX: also flush 4MB pages */
769
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
770
{
771
    tlb_flush_page(env, addr);
772
}
773

    
774
#if defined(CONFIG_USER_ONLY)
775

    
776
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
777
                             int is_write, int mmu_idx, int is_softmmu)
778
{
779
    /* user mode only emulation */
780
    is_write &= 1;
781
    env->cr[2] = addr;
782
    env->error_code = (is_write << PG_ERROR_W_BIT);
783
    env->error_code |= PG_ERROR_U_MASK;
784
    env->exception_index = EXCP0E_PAGE;
785
    return 1;
786
}
787

    
788
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
789
{
790
    return addr;
791
}
792

    
793
#else
794

    
795
#define PHYS_ADDR_MASK 0xfffff000
796

    
797
/* return value:
798
   -1 = cannot handle fault
799
   0  = nothing more to do
800
   1  = generate PF fault
801
   2  = soft MMU activation required for this block
802
*/
803
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
804
                             int is_write1, int mmu_idx, int is_softmmu)
805
{
806
    uint64_t ptep, pte;
807
    uint32_t pdpe_addr, pde_addr, pte_addr;
808
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
809
    unsigned long paddr, page_offset;
810
    target_ulong vaddr, virt_addr;
811

    
812
    is_user = mmu_idx == MMU_USER_IDX;
813
#if defined(DEBUG_MMU)
814
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
815
           addr, is_write1, is_user, env->eip);
816
#endif
817
    is_write = is_write1 & 1;
818

    
819
    if (!(env->cr[0] & CR0_PG_MASK)) {
820
        pte = addr;
821
        virt_addr = addr & TARGET_PAGE_MASK;
822
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
823
        page_size = 4096;
824
        goto do_mapping;
825
    }
826

    
827
    if (env->cr[4] & CR4_PAE_MASK) {
828
        uint64_t pde, pdpe;
829

    
830
        /* XXX: we only use 32 bit physical addresses */
831
#ifdef TARGET_X86_64
832
        if (env->hflags & HF_LMA_MASK) {
833
            uint32_t pml4e_addr;
834
            uint64_t pml4e;
835
            int32_t sext;
836

    
837
            /* test virtual address sign extension */
838
            sext = (int64_t)addr >> 47;
839
            if (sext != 0 && sext != -1) {
840
                env->error_code = 0;
841
                env->exception_index = EXCP0D_GPF;
842
                return 1;
843
            }
844

    
845
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
846
                env->a20_mask;
847
            pml4e = ldq_phys(pml4e_addr);
848
            if (!(pml4e & PG_PRESENT_MASK)) {
849
                error_code = 0;
850
                goto do_fault;
851
            }
852
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
853
                error_code = PG_ERROR_RSVD_MASK;
854
                goto do_fault;
855
            }
856
            if (!(pml4e & PG_ACCESSED_MASK)) {
857
                pml4e |= PG_ACCESSED_MASK;
858
                stl_phys_notdirty(pml4e_addr, pml4e);
859
            }
860
            ptep = pml4e ^ PG_NX_MASK;
861
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
862
                env->a20_mask;
863
            pdpe = ldq_phys(pdpe_addr);
864
            if (!(pdpe & PG_PRESENT_MASK)) {
865
                error_code = 0;
866
                goto do_fault;
867
            }
868
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
869
                error_code = PG_ERROR_RSVD_MASK;
870
                goto do_fault;
871
            }
872
            ptep &= pdpe ^ PG_NX_MASK;
873
            if (!(pdpe & PG_ACCESSED_MASK)) {
874
                pdpe |= PG_ACCESSED_MASK;
875
                stl_phys_notdirty(pdpe_addr, pdpe);
876
            }
877
        } else
878
#endif
879
        {
880
            /* XXX: load them when cr3 is loaded ? */
881
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
882
                env->a20_mask;
883
            pdpe = ldq_phys(pdpe_addr);
884
            if (!(pdpe & PG_PRESENT_MASK)) {
885
                error_code = 0;
886
                goto do_fault;
887
            }
888
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
889
        }
890

    
891
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
892
            env->a20_mask;
893
        pde = ldq_phys(pde_addr);
894
        if (!(pde & PG_PRESENT_MASK)) {
895
            error_code = 0;
896
            goto do_fault;
897
        }
898
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
899
            error_code = PG_ERROR_RSVD_MASK;
900
            goto do_fault;
901
        }
902
        ptep &= pde ^ PG_NX_MASK;
903
        if (pde & PG_PSE_MASK) {
904
            /* 2 MB page */
905
            page_size = 2048 * 1024;
906
            ptep ^= PG_NX_MASK;
907
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
908
                goto do_fault_protect;
909
            if (is_user) {
910
                if (!(ptep & PG_USER_MASK))
911
                    goto do_fault_protect;
912
                if (is_write && !(ptep & PG_RW_MASK))
913
                    goto do_fault_protect;
914
            } else {
915
                if ((env->cr[0] & CR0_WP_MASK) &&
916
                    is_write && !(ptep & PG_RW_MASK))
917
                    goto do_fault_protect;
918
            }
919
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
920
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
921
                pde |= PG_ACCESSED_MASK;
922
                if (is_dirty)
923
                    pde |= PG_DIRTY_MASK;
924
                stl_phys_notdirty(pde_addr, pde);
925
            }
926
            /* align to page_size */
927
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
928
            virt_addr = addr & ~(page_size - 1);
929
        } else {
930
            /* 4 KB page */
931
            if (!(pde & PG_ACCESSED_MASK)) {
932
                pde |= PG_ACCESSED_MASK;
933
                stl_phys_notdirty(pde_addr, pde);
934
            }
935
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
936
                env->a20_mask;
937
            pte = ldq_phys(pte_addr);
938
            if (!(pte & PG_PRESENT_MASK)) {
939
                error_code = 0;
940
                goto do_fault;
941
            }
942
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
943
                error_code = PG_ERROR_RSVD_MASK;
944
                goto do_fault;
945
            }
946
            /* combine pde and pte nx, user and rw protections */
947
            ptep &= pte ^ PG_NX_MASK;
948
            ptep ^= PG_NX_MASK;
949
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
950
                goto do_fault_protect;
951
            if (is_user) {
952
                if (!(ptep & PG_USER_MASK))
953
                    goto do_fault_protect;
954
                if (is_write && !(ptep & PG_RW_MASK))
955
                    goto do_fault_protect;
956
            } else {
957
                if ((env->cr[0] & CR0_WP_MASK) &&
958
                    is_write && !(ptep & PG_RW_MASK))
959
                    goto do_fault_protect;
960
            }
961
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
962
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
963
                pte |= PG_ACCESSED_MASK;
964
                if (is_dirty)
965
                    pte |= PG_DIRTY_MASK;
966
                stl_phys_notdirty(pte_addr, pte);
967
            }
968
            page_size = 4096;
969
            virt_addr = addr & ~0xfff;
970
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
971
        }
972
    } else {
973
        uint32_t pde;
974

    
975
        /* page directory entry */
976
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
977
            env->a20_mask;
978
        pde = ldl_phys(pde_addr);
979
        if (!(pde & PG_PRESENT_MASK)) {
980
            error_code = 0;
981
            goto do_fault;
982
        }
983
        /* if PSE bit is set, then we use a 4MB page */
984
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
985
            page_size = 4096 * 1024;
986
            if (is_user) {
987
                if (!(pde & PG_USER_MASK))
988
                    goto do_fault_protect;
989
                if (is_write && !(pde & PG_RW_MASK))
990
                    goto do_fault_protect;
991
            } else {
992
                if ((env->cr[0] & CR0_WP_MASK) &&
993
                    is_write && !(pde & PG_RW_MASK))
994
                    goto do_fault_protect;
995
            }
996
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
997
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
998
                pde |= PG_ACCESSED_MASK;
999
                if (is_dirty)
1000
                    pde |= PG_DIRTY_MASK;
1001
                stl_phys_notdirty(pde_addr, pde);
1002
            }
1003

    
1004
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1005
            ptep = pte;
1006
            virt_addr = addr & ~(page_size - 1);
1007
        } else {
1008
            if (!(pde & PG_ACCESSED_MASK)) {
1009
                pde |= PG_ACCESSED_MASK;
1010
                stl_phys_notdirty(pde_addr, pde);
1011
            }
1012

    
1013
            /* page directory entry */
1014
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1015
                env->a20_mask;
1016
            pte = ldl_phys(pte_addr);
1017
            if (!(pte & PG_PRESENT_MASK)) {
1018
                error_code = 0;
1019
                goto do_fault;
1020
            }
1021
            /* combine pde and pte user and rw protections */
1022
            ptep = pte & pde;
1023
            if (is_user) {
1024
                if (!(ptep & PG_USER_MASK))
1025
                    goto do_fault_protect;
1026
                if (is_write && !(ptep & PG_RW_MASK))
1027
                    goto do_fault_protect;
1028
            } else {
1029
                if ((env->cr[0] & CR0_WP_MASK) &&
1030
                    is_write && !(ptep & PG_RW_MASK))
1031
                    goto do_fault_protect;
1032
            }
1033
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1034
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1035
                pte |= PG_ACCESSED_MASK;
1036
                if (is_dirty)
1037
                    pte |= PG_DIRTY_MASK;
1038
                stl_phys_notdirty(pte_addr, pte);
1039
            }
1040
            page_size = 4096;
1041
            virt_addr = addr & ~0xfff;
1042
        }
1043
    }
1044
    /* the page can be put in the TLB */
1045
    prot = PAGE_READ;
1046
    if (!(ptep & PG_NX_MASK))
1047
        prot |= PAGE_EXEC;
1048
    if (pte & PG_DIRTY_MASK) {
1049
        /* only set write access if already dirty... otherwise wait
1050
           for dirty access */
1051
        if (is_user) {
1052
            if (ptep & PG_RW_MASK)
1053
                prot |= PAGE_WRITE;
1054
        } else {
1055
            if (!(env->cr[0] & CR0_WP_MASK) ||
1056
                (ptep & PG_RW_MASK))
1057
                prot |= PAGE_WRITE;
1058
        }
1059
    }
1060
 do_mapping:
1061
    pte = pte & env->a20_mask;
1062

    
1063
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1064
       avoid filling it too fast */
1065
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1066
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1067
    vaddr = virt_addr + page_offset;
1068

    
1069
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1070
    return ret;
1071
 do_fault_protect:
1072
    error_code = PG_ERROR_P_MASK;
1073
 do_fault:
1074
    error_code |= (is_write << PG_ERROR_W_BIT);
1075
    if (is_user)
1076
        error_code |= PG_ERROR_U_MASK;
1077
    if (is_write1 == 2 &&
1078
        (env->efer & MSR_EFER_NXE) &&
1079
        (env->cr[4] & CR4_PAE_MASK))
1080
        error_code |= PG_ERROR_I_D_MASK;
1081
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1082
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1083
    } else {
1084
        env->cr[2] = addr;
1085
    }
1086
    env->error_code = error_code;
1087
    env->exception_index = EXCP0E_PAGE;
1088
    /* the VMM will handle this */
1089
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1090
        return 2;
1091
    return 1;
1092
}
1093

    
1094
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1095
{
1096
    uint32_t pde_addr, pte_addr;
1097
    uint32_t pde, pte, paddr, page_offset, page_size;
1098

    
1099
    if (env->cr[4] & CR4_PAE_MASK) {
1100
        uint32_t pdpe_addr, pde_addr, pte_addr;
1101
        uint32_t pdpe;
1102

    
1103
        /* XXX: we only use 32 bit physical addresses */
1104
#ifdef TARGET_X86_64
1105
        if (env->hflags & HF_LMA_MASK) {
1106
            uint32_t pml4e_addr, pml4e;
1107
            int32_t sext;
1108

    
1109
            /* test virtual address sign extension */
1110
            sext = (int64_t)addr >> 47;
1111
            if (sext != 0 && sext != -1)
1112
                return -1;
1113

    
1114
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1115
                env->a20_mask;
1116
            pml4e = ldl_phys(pml4e_addr);
1117
            if (!(pml4e & PG_PRESENT_MASK))
1118
                return -1;
1119

    
1120
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1121
                env->a20_mask;
1122
            pdpe = ldl_phys(pdpe_addr);
1123
            if (!(pdpe & PG_PRESENT_MASK))
1124
                return -1;
1125
        } else
1126
#endif
1127
        {
1128
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1129
                env->a20_mask;
1130
            pdpe = ldl_phys(pdpe_addr);
1131
            if (!(pdpe & PG_PRESENT_MASK))
1132
                return -1;
1133
        }
1134

    
1135
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1136
            env->a20_mask;
1137
        pde = ldl_phys(pde_addr);
1138
        if (!(pde & PG_PRESENT_MASK)) {
1139
            return -1;
1140
        }
1141
        if (pde & PG_PSE_MASK) {
1142
            /* 2 MB page */
1143
            page_size = 2048 * 1024;
1144
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1145
        } else {
1146
            /* 4 KB page */
1147
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1148
                env->a20_mask;
1149
            page_size = 4096;
1150
            pte = ldl_phys(pte_addr);
1151
        }
1152
    } else {
1153
        if (!(env->cr[0] & CR0_PG_MASK)) {
1154
            pte = addr;
1155
            page_size = 4096;
1156
        } else {
1157
            /* page directory entry */
1158
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1159
            pde = ldl_phys(pde_addr);
1160
            if (!(pde & PG_PRESENT_MASK))
1161
                return -1;
1162
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1163
                pte = pde & ~0x003ff000; /* align to 4MB */
1164
                page_size = 4096 * 1024;
1165
            } else {
1166
                /* page directory entry */
1167
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1168
                pte = ldl_phys(pte_addr);
1169
                if (!(pte & PG_PRESENT_MASK))
1170
                    return -1;
1171
                page_size = 4096;
1172
            }
1173
        }
1174
        pte = pte & env->a20_mask;
1175
    }
1176

    
1177
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1178
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1179
    return paddr;
1180
}
1181
#endif /* !CONFIG_USER_ONLY */