Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 23e6c399

History | View | Annotate | Download (40.5 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "svm.h"
31
#include "qemu-common.h"
32

    
33
//#define DEBUG_MMU
34

    
35
static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
36

    
37
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
38
                                    uint32_t *ext_features, 
39
                                    uint32_t *ext2_features, 
40
                                    uint32_t *ext3_features)
41
{
42
    int i;
43
    /* feature flags taken from "Intel Processor Identification and the CPUID
44
     * Instruction" and AMD's "CPUID Specification". In cases of disagreement 
45
     * about feature names, the Linux name is used. */
46
    static const char *feature_name[] = {
47
        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48
        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49
        "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50
        "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
51
    };
52
    static const char *ext_feature_name[] = {
53
       "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54
       "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55
       NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
57
    };
58
    static const char *ext2_feature_name[] = {
59
       "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60
       "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61
       "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62
       "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
63
    };
64
    static const char *ext3_feature_name[] = {
65
       "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66
       "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69
    };
70

    
71
    for ( i = 0 ; i < 32 ; i++ ) 
72
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73
            *features |= 1 << i;
74
            return;
75
        }
76
    for ( i = 0 ; i < 32 ; i++ ) 
77
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78
            *ext_features |= 1 << i;
79
            return;
80
        }
81
    for ( i = 0 ; i < 32 ; i++ ) 
82
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83
            *ext2_features |= 1 << i;
84
            return;
85
        }
86
    for ( i = 0 ; i < 32 ; i++ ) 
87
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88
            *ext3_features |= 1 << i;
89
            return;
90
        }
91
    fprintf(stderr, "CPU feature %s not found\n", flagname);
92
}
93

    
94
CPUX86State *cpu_x86_init(const char *cpu_model)
95
{
96
    CPUX86State *env;
97
    static int inited;
98

    
99
    env = qemu_mallocz(sizeof(CPUX86State));
100
    if (!env)
101
        return NULL;
102
    cpu_exec_init(env);
103
    env->cpu_model_str = cpu_model;
104

    
105
    /* init various static tables */
106
    if (!inited) {
107
        inited = 1;
108
        optimize_flags_init();
109
    }
110
    if (cpu_x86_register(env, cpu_model) < 0) {
111
        cpu_x86_close(env);
112
        return NULL;
113
    }
114
    cpu_reset(env);
115
#ifdef USE_KQEMU
116
    kqemu_init(env);
117
#endif
118
    return env;
119
}
120

    
121
typedef struct x86_def_t {
122
    const char *name;
123
    uint32_t level;
124
    uint32_t vendor1, vendor2, vendor3;
125
    int family;
126
    int model;
127
    int stepping;
128
    uint32_t features, ext_features, ext2_features, ext3_features;
129
    uint32_t xlevel;
130
    char model_id[48];
131
} x86_def_t;
132

    
133
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
134
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
135
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
136
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
137
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
138
          CPUID_PSE36 | CPUID_FXSR)
139
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
140
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
141
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
142
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
143
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
144
static x86_def_t x86_defs[] = {
145
#ifdef TARGET_X86_64
146
    {
147
        .name = "qemu64",
148
        .level = 2,
149
        .vendor1 = 0x68747541, /* "Auth" */
150
        .vendor2 = 0x69746e65, /* "enti" */
151
        .vendor3 = 0x444d4163, /* "cAMD" */
152
        .family = 6,
153
        .model = 2,
154
        .stepping = 3,
155
        .features = PPRO_FEATURES | 
156
        /* these features are needed for Win64 and aren't fully implemented */
157
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
158
        /* this feature is needed for Solaris and isn't fully implemented */
159
            CPUID_PSE36,
160
        .ext_features = CPUID_EXT_SSE3,
161
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
162
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
164
        .ext3_features = CPUID_EXT3_SVM,
165
        .xlevel = 0x8000000A,
166
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
167
    },
168
#endif
169
    {
170
        .name = "qemu32",
171
        .level = 2,
172
        .family = 6,
173
        .model = 3,
174
        .stepping = 3,
175
        .features = PPRO_FEATURES,
176
        .ext_features = CPUID_EXT_SSE3,
177
        .xlevel = 0,
178
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
179
    },
180
    {
181
        .name = "486",
182
        .level = 0,
183
        .family = 4,
184
        .model = 0,
185
        .stepping = 0,
186
        .features = I486_FEATURES,
187
        .xlevel = 0,
188
    },
189
    {
190
        .name = "pentium",
191
        .level = 1,
192
        .family = 5,
193
        .model = 4,
194
        .stepping = 3,
195
        .features = PENTIUM_FEATURES,
196
        .xlevel = 0,
197
    },
198
    {
199
        .name = "pentium2",
200
        .level = 2,
201
        .family = 6,
202
        .model = 5,
203
        .stepping = 2,
204
        .features = PENTIUM2_FEATURES,
205
        .xlevel = 0,
206
    },
207
    {
208
        .name = "pentium3",
209
        .level = 2,
210
        .family = 6,
211
        .model = 7,
212
        .stepping = 3,
213
        .features = PENTIUM3_FEATURES,
214
        .xlevel = 0,
215
    },
216
    {
217
        .name = "athlon",
218
        .level = 2,
219
        .vendor1 = 0x68747541, /* "Auth" */
220
        .vendor2 = 0x69746e65, /* "enti" */
221
        .vendor3 = 0x444d4163, /* "cAMD" */
222
        .family = 6,
223
        .model = 2,
224
        .stepping = 3,
225
        .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
226
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
227
        .xlevel = 0x80000008,
228
        /* XXX: put another string ? */
229
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
230
    },
231
};
232

    
233
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
234
{
235
    unsigned int i;
236
    x86_def_t *def;
237

    
238
    char *s = strdup(cpu_model);
239
    char *featurestr, *name = strtok(s, ",");
240
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
241
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
242
    int family = -1, model = -1, stepping = -1;
243

    
244
    def = NULL;
245
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
246
        if (strcmp(name, x86_defs[i].name) == 0) {
247
            def = &x86_defs[i];
248
            break;
249
        }
250
    }
251
    if (!def)
252
        goto error;
253
    memcpy(x86_cpu_def, def, sizeof(*def));
254

    
255
    featurestr = strtok(NULL, ",");
256

    
257
    while (featurestr) {
258
        char *val;
259
        if (featurestr[0] == '+') {
260
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
261
        } else if (featurestr[0] == '-') {
262
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
263
        } else if ((val = strchr(featurestr, '='))) {
264
            *val = 0; val++;
265
            if (!strcmp(featurestr, "family")) {
266
                char *err;
267
                family = strtol(val, &err, 10);
268
                if (!*val || *err || family < 0) {
269
                    fprintf(stderr, "bad numerical value %s\n", val);
270
                    goto error;
271
                }
272
                x86_cpu_def->family = family;
273
            } else if (!strcmp(featurestr, "model")) {
274
                char *err;
275
                model = strtol(val, &err, 10);
276
                if (!*val || *err || model < 0 || model > 0xf) {
277
                    fprintf(stderr, "bad numerical value %s\n", val);
278
                    goto error;
279
                }
280
                x86_cpu_def->model = model;
281
            } else if (!strcmp(featurestr, "stepping")) {
282
                char *err;
283
                stepping = strtol(val, &err, 10);
284
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
285
                    fprintf(stderr, "bad numerical value %s\n", val);
286
                    goto error;
287
                }
288
                x86_cpu_def->stepping = stepping;
289
            } else if (!strcmp(featurestr, "vendor")) {
290
                if (strlen(val) != 12) {
291
                    fprintf(stderr, "vendor string must be 12 chars long\n");
292
                    goto error;
293
                }
294
                x86_cpu_def->vendor1 = 0;
295
                x86_cpu_def->vendor2 = 0;
296
                x86_cpu_def->vendor3 = 0;
297
                for(i = 0; i < 4; i++) {
298
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
299
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
300
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
301
                }
302
            } else if (!strcmp(featurestr, "model_id")) {
303
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
304
                        val);
305
            } else {
306
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
307
                goto error;
308
            }
309
        } else {
310
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
311
            goto error;
312
        }
313
        featurestr = strtok(NULL, ",");
314
    }
315
    x86_cpu_def->features |= plus_features;
316
    x86_cpu_def->ext_features |= plus_ext_features;
317
    x86_cpu_def->ext2_features |= plus_ext2_features;
318
    x86_cpu_def->ext3_features |= plus_ext3_features;
319
    x86_cpu_def->features &= ~minus_features;
320
    x86_cpu_def->ext_features &= ~minus_ext_features;
321
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
322
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
323
    free(s);
324
    return 0;
325

    
326
error:
327
    free(s);
328
    return -1;
329
}
330

    
331
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
332
{
333
    unsigned int i;
334

    
335
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
336
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
337
}
338

    
339
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
340
{
341
    x86_def_t def1, *def = &def1;
342

    
343
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
344
        return -1;
345
    if (def->vendor1) {
346
        env->cpuid_vendor1 = def->vendor1;
347
        env->cpuid_vendor2 = def->vendor2;
348
        env->cpuid_vendor3 = def->vendor3;
349
    } else {
350
        env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
351
        env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
352
        env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
353
    }
354
    env->cpuid_level = def->level;
355
    env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
356
    env->cpuid_features = def->features;
357
    env->pat = 0x0007040600070406ULL;
358
    env->cpuid_ext_features = def->ext_features;
359
    env->cpuid_ext2_features = def->ext2_features;
360
    env->cpuid_xlevel = def->xlevel;
361
    env->cpuid_ext3_features = def->ext3_features;
362
    {
363
        const char *model_id = def->model_id;
364
        int c, len, i;
365
        if (!model_id)
366
            model_id = "";
367
        len = strlen(model_id);
368
        for(i = 0; i < 48; i++) {
369
            if (i >= len)
370
                c = '\0';
371
            else
372
                c = (uint8_t)model_id[i];
373
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
374
        }
375
    }
376
    return 0;
377
}
378

    
379
/* NOTE: must be called outside the CPU execute loop */
380
void cpu_reset(CPUX86State *env)
381
{
382
    int i;
383

    
384
    memset(env, 0, offsetof(CPUX86State, breakpoints));
385

    
386
    tlb_flush(env, 1);
387

    
388
    env->old_exception = -1;
389

    
390
    /* init to reset state */
391

    
392
#ifdef CONFIG_SOFTMMU
393
    env->hflags |= HF_SOFTMMU_MASK;
394
#endif
395
    env->hflags2 |= HF2_GIF_MASK;
396

    
397
    cpu_x86_update_cr0(env, 0x60000010);
398
    env->a20_mask = ~0x0;
399
    env->smbase = 0x30000;
400

    
401
    env->idt.limit = 0xffff;
402
    env->gdt.limit = 0xffff;
403
    env->ldt.limit = 0xffff;
404
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
405
    env->tr.limit = 0xffff;
406
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
407

    
408
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
409
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
410
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
411
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
412
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
413
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
414
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
415
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
416
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
417
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
418
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
419
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
420

    
421
    env->eip = 0xfff0;
422
    env->regs[R_EDX] = env->cpuid_version;
423

    
424
    env->eflags = 0x2;
425

    
426
    /* FPU init */
427
    for(i = 0;i < 8; i++)
428
        env->fptags[i] = 1;
429
    env->fpuc = 0x37f;
430

    
431
    env->mxcsr = 0x1f80;
432
}
433

    
434
void cpu_x86_close(CPUX86State *env)
435
{
436
    free(env);
437
}
438

    
439
/***********************************************************/
440
/* x86 debug */
441

    
442
static const char *cc_op_str[] = {
443
    "DYNAMIC",
444
    "EFLAGS",
445

    
446
    "MULB",
447
    "MULW",
448
    "MULL",
449
    "MULQ",
450

    
451
    "ADDB",
452
    "ADDW",
453
    "ADDL",
454
    "ADDQ",
455

    
456
    "ADCB",
457
    "ADCW",
458
    "ADCL",
459
    "ADCQ",
460

    
461
    "SUBB",
462
    "SUBW",
463
    "SUBL",
464
    "SUBQ",
465

    
466
    "SBBB",
467
    "SBBW",
468
    "SBBL",
469
    "SBBQ",
470

    
471
    "LOGICB",
472
    "LOGICW",
473
    "LOGICL",
474
    "LOGICQ",
475

    
476
    "INCB",
477
    "INCW",
478
    "INCL",
479
    "INCQ",
480

    
481
    "DECB",
482
    "DECW",
483
    "DECL",
484
    "DECQ",
485

    
486
    "SHLB",
487
    "SHLW",
488
    "SHLL",
489
    "SHLQ",
490

    
491
    "SARB",
492
    "SARW",
493
    "SARL",
494
    "SARQ",
495
};
496

    
497
void cpu_dump_state(CPUState *env, FILE *f,
498
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
499
                    int flags)
500
{
501
    int eflags, i, nb;
502
    char cc_op_name[32];
503
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
504

    
505
    eflags = env->eflags;
506
#ifdef TARGET_X86_64
507
    if (env->hflags & HF_CS64_MASK) {
508
        cpu_fprintf(f,
509
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
510
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
511
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
512
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
513
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
514
                    env->regs[R_EAX],
515
                    env->regs[R_EBX],
516
                    env->regs[R_ECX],
517
                    env->regs[R_EDX],
518
                    env->regs[R_ESI],
519
                    env->regs[R_EDI],
520
                    env->regs[R_EBP],
521
                    env->regs[R_ESP],
522
                    env->regs[8],
523
                    env->regs[9],
524
                    env->regs[10],
525
                    env->regs[11],
526
                    env->regs[12],
527
                    env->regs[13],
528
                    env->regs[14],
529
                    env->regs[15],
530
                    env->eip, eflags,
531
                    eflags & DF_MASK ? 'D' : '-',
532
                    eflags & CC_O ? 'O' : '-',
533
                    eflags & CC_S ? 'S' : '-',
534
                    eflags & CC_Z ? 'Z' : '-',
535
                    eflags & CC_A ? 'A' : '-',
536
                    eflags & CC_P ? 'P' : '-',
537
                    eflags & CC_C ? 'C' : '-',
538
                    env->hflags & HF_CPL_MASK,
539
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
540
                    (int)(env->a20_mask >> 20) & 1,
541
                    (env->hflags >> HF_SMM_SHIFT) & 1,
542
                    env->halted);
543
    } else
544
#endif
545
    {
546
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
547
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
548
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
549
                    (uint32_t)env->regs[R_EAX],
550
                    (uint32_t)env->regs[R_EBX],
551
                    (uint32_t)env->regs[R_ECX],
552
                    (uint32_t)env->regs[R_EDX],
553
                    (uint32_t)env->regs[R_ESI],
554
                    (uint32_t)env->regs[R_EDI],
555
                    (uint32_t)env->regs[R_EBP],
556
                    (uint32_t)env->regs[R_ESP],
557
                    (uint32_t)env->eip, eflags,
558
                    eflags & DF_MASK ? 'D' : '-',
559
                    eflags & CC_O ? 'O' : '-',
560
                    eflags & CC_S ? 'S' : '-',
561
                    eflags & CC_Z ? 'Z' : '-',
562
                    eflags & CC_A ? 'A' : '-',
563
                    eflags & CC_P ? 'P' : '-',
564
                    eflags & CC_C ? 'C' : '-',
565
                    env->hflags & HF_CPL_MASK,
566
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
567
                    (int)(env->a20_mask >> 20) & 1,
568
                    (env->hflags >> HF_SMM_SHIFT) & 1,
569
                    env->halted);
570
    }
571

    
572
#ifdef TARGET_X86_64
573
    if (env->hflags & HF_LMA_MASK) {
574
        for(i = 0; i < 6; i++) {
575
            SegmentCache *sc = &env->segs[i];
576
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
577
                        seg_name[i],
578
                        sc->selector,
579
                        sc->base,
580
                        sc->limit,
581
                        sc->flags);
582
        }
583
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
584
                    env->ldt.selector,
585
                    env->ldt.base,
586
                    env->ldt.limit,
587
                    env->ldt.flags);
588
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
589
                    env->tr.selector,
590
                    env->tr.base,
591
                    env->tr.limit,
592
                    env->tr.flags);
593
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
594
                    env->gdt.base, env->gdt.limit);
595
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
596
                    env->idt.base, env->idt.limit);
597
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
598
                    (uint32_t)env->cr[0],
599
                    env->cr[2],
600
                    env->cr[3],
601
                    (uint32_t)env->cr[4]);
602
    } else
603
#endif
604
    {
605
        for(i = 0; i < 6; i++) {
606
            SegmentCache *sc = &env->segs[i];
607
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
608
                        seg_name[i],
609
                        sc->selector,
610
                        (uint32_t)sc->base,
611
                        sc->limit,
612
                        sc->flags);
613
        }
614
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
615
                    env->ldt.selector,
616
                    (uint32_t)env->ldt.base,
617
                    env->ldt.limit,
618
                    env->ldt.flags);
619
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
620
                    env->tr.selector,
621
                    (uint32_t)env->tr.base,
622
                    env->tr.limit,
623
                    env->tr.flags);
624
        cpu_fprintf(f, "GDT=     %08x %08x\n",
625
                    (uint32_t)env->gdt.base, env->gdt.limit);
626
        cpu_fprintf(f, "IDT=     %08x %08x\n",
627
                    (uint32_t)env->idt.base, env->idt.limit);
628
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
629
                    (uint32_t)env->cr[0],
630
                    (uint32_t)env->cr[2],
631
                    (uint32_t)env->cr[3],
632
                    (uint32_t)env->cr[4]);
633
    }
634
    if (flags & X86_DUMP_CCOP) {
635
        if ((unsigned)env->cc_op < CC_OP_NB)
636
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
637
        else
638
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
639
#ifdef TARGET_X86_64
640
        if (env->hflags & HF_CS64_MASK) {
641
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
642
                        env->cc_src, env->cc_dst,
643
                        cc_op_name);
644
        } else
645
#endif
646
        {
647
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
648
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
649
                        cc_op_name);
650
        }
651
    }
652
    if (flags & X86_DUMP_FPU) {
653
        int fptag;
654
        fptag = 0;
655
        for(i = 0; i < 8; i++) {
656
            fptag |= ((!env->fptags[i]) << i);
657
        }
658
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
659
                    env->fpuc,
660
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
661
                    env->fpstt,
662
                    fptag,
663
                    env->mxcsr);
664
        for(i=0;i<8;i++) {
665
#if defined(USE_X86LDOUBLE)
666
            union {
667
                long double d;
668
                struct {
669
                    uint64_t lower;
670
                    uint16_t upper;
671
                } l;
672
            } tmp;
673
            tmp.d = env->fpregs[i].d;
674
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
675
                        i, tmp.l.lower, tmp.l.upper);
676
#else
677
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
678
                        i, env->fpregs[i].mmx.q);
679
#endif
680
            if ((i & 1) == 1)
681
                cpu_fprintf(f, "\n");
682
            else
683
                cpu_fprintf(f, " ");
684
        }
685
        if (env->hflags & HF_CS64_MASK)
686
            nb = 16;
687
        else
688
            nb = 8;
689
        for(i=0;i<nb;i++) {
690
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
691
                        i,
692
                        env->xmm_regs[i].XMM_L(3),
693
                        env->xmm_regs[i].XMM_L(2),
694
                        env->xmm_regs[i].XMM_L(1),
695
                        env->xmm_regs[i].XMM_L(0));
696
            if ((i & 1) == 1)
697
                cpu_fprintf(f, "\n");
698
            else
699
                cpu_fprintf(f, " ");
700
        }
701
    }
702
}
703

    
704
/***********************************************************/
705
/* x86 mmu */
706
/* XXX: add PGE support */
707

    
708
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
709
{
710
    a20_state = (a20_state != 0);
711
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
712
#if defined(DEBUG_MMU)
713
        printf("A20 update: a20=%d\n", a20_state);
714
#endif
715
        /* if the cpu is currently executing code, we must unlink it and
716
           all the potentially executing TB */
717
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
718

    
719
        /* when a20 is changed, all the MMU mappings are invalid, so
720
           we must flush everything */
721
        tlb_flush(env, 1);
722
        env->a20_mask = (~0x100000) | (a20_state << 20);
723
    }
724
}
725

    
726
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
727
{
728
    int pe_state;
729

    
730
#if defined(DEBUG_MMU)
731
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
732
#endif
733
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
734
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
735
        tlb_flush(env, 1);
736
    }
737

    
738
#ifdef TARGET_X86_64
739
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
740
        (env->efer & MSR_EFER_LME)) {
741
        /* enter in long mode */
742
        /* XXX: generate an exception */
743
        if (!(env->cr[4] & CR4_PAE_MASK))
744
            return;
745
        env->efer |= MSR_EFER_LMA;
746
        env->hflags |= HF_LMA_MASK;
747
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
748
               (env->efer & MSR_EFER_LMA)) {
749
        /* exit long mode */
750
        env->efer &= ~MSR_EFER_LMA;
751
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
752
        env->eip &= 0xffffffff;
753
    }
754
#endif
755
    env->cr[0] = new_cr0 | CR0_ET_MASK;
756

    
757
    /* update PE flag in hidden flags */
758
    pe_state = (env->cr[0] & CR0_PE_MASK);
759
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
760
    /* ensure that ADDSEG is always set in real mode */
761
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
762
    /* update FPU flags */
763
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
764
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
765
}
766

    
767
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
768
   the PDPT */
769
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
770
{
771
    env->cr[3] = new_cr3;
772
    if (env->cr[0] & CR0_PG_MASK) {
773
#if defined(DEBUG_MMU)
774
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
775
#endif
776
        tlb_flush(env, 0);
777
    }
778
}
779

    
780
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
781
{
782
#if defined(DEBUG_MMU)
783
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
784
#endif
785
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
786
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
787
        tlb_flush(env, 1);
788
    }
789
    /* SSE handling */
790
    if (!(env->cpuid_features & CPUID_SSE))
791
        new_cr4 &= ~CR4_OSFXSR_MASK;
792
    if (new_cr4 & CR4_OSFXSR_MASK)
793
        env->hflags |= HF_OSFXSR_MASK;
794
    else
795
        env->hflags &= ~HF_OSFXSR_MASK;
796

    
797
    env->cr[4] = new_cr4;
798
}
799

    
800
/* XXX: also flush 4MB pages */
801
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
802
{
803
    tlb_flush_page(env, addr);
804
}
805

    
806
#if defined(CONFIG_USER_ONLY)
807

    
808
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
809
                             int is_write, int mmu_idx, int is_softmmu)
810
{
811
    /* user mode only emulation */
812
    is_write &= 1;
813
    env->cr[2] = addr;
814
    env->error_code = (is_write << PG_ERROR_W_BIT);
815
    env->error_code |= PG_ERROR_U_MASK;
816
    env->exception_index = EXCP0E_PAGE;
817
    return 1;
818
}
819

    
820
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
821
{
822
    return addr;
823
}
824

    
825
#else
826

    
827
/* XXX: This value should match the one returned by CPUID
828
 * and in exec.c */
829
#if defined(USE_KQEMU)
830
#define PHYS_ADDR_MASK 0xfffff000LL
831
#else
832
# if defined(TARGET_X86_64)
833
# define PHYS_ADDR_MASK 0xfffffff000LL
834
# else
835
# define PHYS_ADDR_MASK 0xffffff000LL
836
# endif
837
#endif
838

    
839
/* return value:
840
   -1 = cannot handle fault
841
   0  = nothing more to do
842
   1  = generate PF fault
843
   2  = soft MMU activation required for this block
844
*/
845
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
846
                             int is_write1, int mmu_idx, int is_softmmu)
847
{
848
    uint64_t ptep, pte;
849
    target_ulong pde_addr, pte_addr;
850
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
851
    target_phys_addr_t paddr;
852
    uint32_t page_offset;
853
    target_ulong vaddr, virt_addr;
854

    
855
    is_user = mmu_idx == MMU_USER_IDX;
856
#if defined(DEBUG_MMU)
857
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
858
           addr, is_write1, is_user, env->eip);
859
#endif
860
    is_write = is_write1 & 1;
861

    
862
    if (!(env->cr[0] & CR0_PG_MASK)) {
863
        pte = addr;
864
        virt_addr = addr & TARGET_PAGE_MASK;
865
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
866
        page_size = 4096;
867
        goto do_mapping;
868
    }
869

    
870
    if (env->cr[4] & CR4_PAE_MASK) {
871
        uint64_t pde, pdpe;
872
        target_ulong pdpe_addr;
873

    
874
#ifdef TARGET_X86_64
875
        if (env->hflags & HF_LMA_MASK) {
876
            uint64_t pml4e_addr, pml4e;
877
            int32_t sext;
878

    
879
            /* test virtual address sign extension */
880
            sext = (int64_t)addr >> 47;
881
            if (sext != 0 && sext != -1) {
882
                env->error_code = 0;
883
                env->exception_index = EXCP0D_GPF;
884
                return 1;
885
            }
886

    
887
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
888
                env->a20_mask;
889
            pml4e = ldq_phys(pml4e_addr);
890
            if (!(pml4e & PG_PRESENT_MASK)) {
891
                error_code = 0;
892
                goto do_fault;
893
            }
894
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
895
                error_code = PG_ERROR_RSVD_MASK;
896
                goto do_fault;
897
            }
898
            if (!(pml4e & PG_ACCESSED_MASK)) {
899
                pml4e |= PG_ACCESSED_MASK;
900
                stl_phys_notdirty(pml4e_addr, pml4e);
901
            }
902
            ptep = pml4e ^ PG_NX_MASK;
903
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
904
                env->a20_mask;
905
            pdpe = ldq_phys(pdpe_addr);
906
            if (!(pdpe & PG_PRESENT_MASK)) {
907
                error_code = 0;
908
                goto do_fault;
909
            }
910
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
911
                error_code = PG_ERROR_RSVD_MASK;
912
                goto do_fault;
913
            }
914
            ptep &= pdpe ^ PG_NX_MASK;
915
            if (!(pdpe & PG_ACCESSED_MASK)) {
916
                pdpe |= PG_ACCESSED_MASK;
917
                stl_phys_notdirty(pdpe_addr, pdpe);
918
            }
919
        } else
920
#endif
921
        {
922
            /* XXX: load them when cr3 is loaded ? */
923
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
924
                env->a20_mask;
925
            pdpe = ldq_phys(pdpe_addr);
926
            if (!(pdpe & PG_PRESENT_MASK)) {
927
                error_code = 0;
928
                goto do_fault;
929
            }
930
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
931
        }
932

    
933
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
934
            env->a20_mask;
935
        pde = ldq_phys(pde_addr);
936
        if (!(pde & PG_PRESENT_MASK)) {
937
            error_code = 0;
938
            goto do_fault;
939
        }
940
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
941
            error_code = PG_ERROR_RSVD_MASK;
942
            goto do_fault;
943
        }
944
        ptep &= pde ^ PG_NX_MASK;
945
        if (pde & PG_PSE_MASK) {
946
            /* 2 MB page */
947
            page_size = 2048 * 1024;
948
            ptep ^= PG_NX_MASK;
949
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
950
                goto do_fault_protect;
951
            if (is_user) {
952
                if (!(ptep & PG_USER_MASK))
953
                    goto do_fault_protect;
954
                if (is_write && !(ptep & PG_RW_MASK))
955
                    goto do_fault_protect;
956
            } else {
957
                if ((env->cr[0] & CR0_WP_MASK) &&
958
                    is_write && !(ptep & PG_RW_MASK))
959
                    goto do_fault_protect;
960
            }
961
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
962
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
963
                pde |= PG_ACCESSED_MASK;
964
                if (is_dirty)
965
                    pde |= PG_DIRTY_MASK;
966
                stl_phys_notdirty(pde_addr, pde);
967
            }
968
            /* align to page_size */
969
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
970
            virt_addr = addr & ~(page_size - 1);
971
        } else {
972
            /* 4 KB page */
973
            if (!(pde & PG_ACCESSED_MASK)) {
974
                pde |= PG_ACCESSED_MASK;
975
                stl_phys_notdirty(pde_addr, pde);
976
            }
977
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
978
                env->a20_mask;
979
            pte = ldq_phys(pte_addr);
980
            if (!(pte & PG_PRESENT_MASK)) {
981
                error_code = 0;
982
                goto do_fault;
983
            }
984
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
985
                error_code = PG_ERROR_RSVD_MASK;
986
                goto do_fault;
987
            }
988
            /* combine pde and pte nx, user and rw protections */
989
            ptep &= pte ^ PG_NX_MASK;
990
            ptep ^= PG_NX_MASK;
991
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
992
                goto do_fault_protect;
993
            if (is_user) {
994
                if (!(ptep & PG_USER_MASK))
995
                    goto do_fault_protect;
996
                if (is_write && !(ptep & PG_RW_MASK))
997
                    goto do_fault_protect;
998
            } else {
999
                if ((env->cr[0] & CR0_WP_MASK) &&
1000
                    is_write && !(ptep & PG_RW_MASK))
1001
                    goto do_fault_protect;
1002
            }
1003
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1004
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1005
                pte |= PG_ACCESSED_MASK;
1006
                if (is_dirty)
1007
                    pte |= PG_DIRTY_MASK;
1008
                stl_phys_notdirty(pte_addr, pte);
1009
            }
1010
            page_size = 4096;
1011
            virt_addr = addr & ~0xfff;
1012
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1013
        }
1014
    } else {
1015
        uint32_t pde;
1016

    
1017
        /* page directory entry */
1018
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1019
            env->a20_mask;
1020
        pde = ldl_phys(pde_addr);
1021
        if (!(pde & PG_PRESENT_MASK)) {
1022
            error_code = 0;
1023
            goto do_fault;
1024
        }
1025
        /* if PSE bit is set, then we use a 4MB page */
1026
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1027
            page_size = 4096 * 1024;
1028
            if (is_user) {
1029
                if (!(pde & PG_USER_MASK))
1030
                    goto do_fault_protect;
1031
                if (is_write && !(pde & PG_RW_MASK))
1032
                    goto do_fault_protect;
1033
            } else {
1034
                if ((env->cr[0] & CR0_WP_MASK) &&
1035
                    is_write && !(pde & PG_RW_MASK))
1036
                    goto do_fault_protect;
1037
            }
1038
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1039
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1040
                pde |= PG_ACCESSED_MASK;
1041
                if (is_dirty)
1042
                    pde |= PG_DIRTY_MASK;
1043
                stl_phys_notdirty(pde_addr, pde);
1044
            }
1045

    
1046
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1047
            ptep = pte;
1048
            virt_addr = addr & ~(page_size - 1);
1049
        } else {
1050
            if (!(pde & PG_ACCESSED_MASK)) {
1051
                pde |= PG_ACCESSED_MASK;
1052
                stl_phys_notdirty(pde_addr, pde);
1053
            }
1054

    
1055
            /* page directory entry */
1056
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1057
                env->a20_mask;
1058
            pte = ldl_phys(pte_addr);
1059
            if (!(pte & PG_PRESENT_MASK)) {
1060
                error_code = 0;
1061
                goto do_fault;
1062
            }
1063
            /* combine pde and pte user and rw protections */
1064
            ptep = pte & pde;
1065
            if (is_user) {
1066
                if (!(ptep & PG_USER_MASK))
1067
                    goto do_fault_protect;
1068
                if (is_write && !(ptep & PG_RW_MASK))
1069
                    goto do_fault_protect;
1070
            } else {
1071
                if ((env->cr[0] & CR0_WP_MASK) &&
1072
                    is_write && !(ptep & PG_RW_MASK))
1073
                    goto do_fault_protect;
1074
            }
1075
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1076
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1077
                pte |= PG_ACCESSED_MASK;
1078
                if (is_dirty)
1079
                    pte |= PG_DIRTY_MASK;
1080
                stl_phys_notdirty(pte_addr, pte);
1081
            }
1082
            page_size = 4096;
1083
            virt_addr = addr & ~0xfff;
1084
        }
1085
    }
1086
    /* the page can be put in the TLB */
1087
    prot = PAGE_READ;
1088
    if (!(ptep & PG_NX_MASK))
1089
        prot |= PAGE_EXEC;
1090
    if (pte & PG_DIRTY_MASK) {
1091
        /* only set write access if already dirty... otherwise wait
1092
           for dirty access */
1093
        if (is_user) {
1094
            if (ptep & PG_RW_MASK)
1095
                prot |= PAGE_WRITE;
1096
        } else {
1097
            if (!(env->cr[0] & CR0_WP_MASK) ||
1098
                (ptep & PG_RW_MASK))
1099
                prot |= PAGE_WRITE;
1100
        }
1101
    }
1102
 do_mapping:
1103
    pte = pte & env->a20_mask;
1104

    
1105
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1106
       avoid filling it too fast */
1107
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1108
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1109
    vaddr = virt_addr + page_offset;
1110

    
1111
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1112
    return ret;
1113
 do_fault_protect:
1114
    error_code = PG_ERROR_P_MASK;
1115
 do_fault:
1116
    error_code |= (is_write << PG_ERROR_W_BIT);
1117
    if (is_user)
1118
        error_code |= PG_ERROR_U_MASK;
1119
    if (is_write1 == 2 &&
1120
        (env->efer & MSR_EFER_NXE) &&
1121
        (env->cr[4] & CR4_PAE_MASK))
1122
        error_code |= PG_ERROR_I_D_MASK;
1123
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1124
        /* cr2 is not modified in case of exceptions */
1125
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1126
                 addr);
1127
    } else {
1128
        env->cr[2] = addr;
1129
    }
1130
    env->error_code = error_code;
1131
    env->exception_index = EXCP0E_PAGE;
1132
    return 1;
1133
}
1134

    
1135
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1136
{
1137
    target_ulong pde_addr, pte_addr;
1138
    uint64_t pte;
1139
    target_phys_addr_t paddr;
1140
    uint32_t page_offset;
1141
    int page_size;
1142

    
1143
    if (env->cr[4] & CR4_PAE_MASK) {
1144
        target_ulong pdpe_addr;
1145
        uint64_t pde, pdpe;
1146

    
1147
#ifdef TARGET_X86_64
1148
        if (env->hflags & HF_LMA_MASK) {
1149
            uint64_t pml4e_addr, pml4e;
1150
            int32_t sext;
1151

    
1152
            /* test virtual address sign extension */
1153
            sext = (int64_t)addr >> 47;
1154
            if (sext != 0 && sext != -1)
1155
                return -1;
1156

    
1157
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1158
                env->a20_mask;
1159
            pml4e = ldq_phys(pml4e_addr);
1160
            if (!(pml4e & PG_PRESENT_MASK))
1161
                return -1;
1162

    
1163
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1164
                env->a20_mask;
1165
            pdpe = ldq_phys(pdpe_addr);
1166
            if (!(pdpe & PG_PRESENT_MASK))
1167
                return -1;
1168
        } else
1169
#endif
1170
        {
1171
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1172
                env->a20_mask;
1173
            pdpe = ldq_phys(pdpe_addr);
1174
            if (!(pdpe & PG_PRESENT_MASK))
1175
                return -1;
1176
        }
1177

    
1178
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1179
            env->a20_mask;
1180
        pde = ldq_phys(pde_addr);
1181
        if (!(pde & PG_PRESENT_MASK)) {
1182
            return -1;
1183
        }
1184
        if (pde & PG_PSE_MASK) {
1185
            /* 2 MB page */
1186
            page_size = 2048 * 1024;
1187
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1188
        } else {
1189
            /* 4 KB page */
1190
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1191
                env->a20_mask;
1192
            page_size = 4096;
1193
            pte = ldq_phys(pte_addr);
1194
        }
1195
    } else {
1196
        uint32_t pde;
1197

    
1198
        if (!(env->cr[0] & CR0_PG_MASK)) {
1199
            pte = addr;
1200
            page_size = 4096;
1201
        } else {
1202
            /* page directory entry */
1203
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1204
            pde = ldl_phys(pde_addr);
1205
            if (!(pde & PG_PRESENT_MASK))
1206
                return -1;
1207
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1208
                pte = pde & ~0x003ff000; /* align to 4MB */
1209
                page_size = 4096 * 1024;
1210
            } else {
1211
                /* page directory entry */
1212
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1213
                pte = ldl_phys(pte_addr);
1214
                if (!(pte & PG_PRESENT_MASK))
1215
                    return -1;
1216
                page_size = 4096;
1217
            }
1218
        }
1219
        pte = pte & env->a20_mask;
1220
    }
1221

    
1222
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1223
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1224
    return paddr;
1225
}
1226
#endif /* !CONFIG_USER_ONLY */