Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 558fa836

History | View | Annotate | Download (42.2 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "svm.h"
31
#include "qemu-common.h"
32

    
33
//#define DEBUG_MMU
34

    
35
static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
36

    
37
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
38
                                    uint32_t *ext_features, 
39
                                    uint32_t *ext2_features, 
40
                                    uint32_t *ext3_features)
41
{
42
    int i;
43
    /* feature flags taken from "Intel Processor Identification and the CPUID
44
     * Instruction" and AMD's "CPUID Specification". In cases of disagreement 
45
     * about feature names, the Linux name is used. */
46
    static const char *feature_name[] = {
47
        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48
        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49
        "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50
        "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
51
    };
52
    static const char *ext_feature_name[] = {
53
       "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54
       "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55
       NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
57
    };
58
    static const char *ext2_feature_name[] = {
59
       "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60
       "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61
       "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62
       "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
63
    };
64
    static const char *ext3_feature_name[] = {
65
       "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66
       "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69
    };
70

    
71
    for ( i = 0 ; i < 32 ; i++ ) 
72
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73
            *features |= 1 << i;
74
            return;
75
        }
76
    for ( i = 0 ; i < 32 ; i++ ) 
77
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78
            *ext_features |= 1 << i;
79
            return;
80
        }
81
    for ( i = 0 ; i < 32 ; i++ ) 
82
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83
            *ext2_features |= 1 << i;
84
            return;
85
        }
86
    for ( i = 0 ; i < 32 ; i++ ) 
87
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88
            *ext3_features |= 1 << i;
89
            return;
90
        }
91
    fprintf(stderr, "CPU feature %s not found\n", flagname);
92
}
93

    
94
CPUX86State *cpu_x86_init(const char *cpu_model)
95
{
96
    CPUX86State *env;
97
    static int inited;
98

    
99
    env = qemu_mallocz(sizeof(CPUX86State));
100
    if (!env)
101
        return NULL;
102
    cpu_exec_init(env);
103
    env->cpu_model_str = cpu_model;
104

    
105
    /* init various static tables */
106
    if (!inited) {
107
        inited = 1;
108
        optimize_flags_init();
109
    }
110
    if (cpu_x86_register(env, cpu_model) < 0) {
111
        cpu_x86_close(env);
112
        return NULL;
113
    }
114
    cpu_reset(env);
115
#ifdef USE_KQEMU
116
    kqemu_init(env);
117
#endif
118
    return env;
119
}
120

    
121
typedef struct x86_def_t {
122
    const char *name;
123
    uint32_t level;
124
    uint32_t vendor1, vendor2, vendor3;
125
    int family;
126
    int model;
127
    int stepping;
128
    uint32_t features, ext_features, ext2_features, ext3_features;
129
    uint32_t xlevel;
130
    char model_id[48];
131
} x86_def_t;
132

    
133
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
134
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
135
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
136
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
137
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
138
          CPUID_PSE36 | CPUID_FXSR)
139
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
140
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
141
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
142
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
143
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
144
static x86_def_t x86_defs[] = {
145
#ifdef TARGET_X86_64
146
    {
147
        .name = "qemu64",
148
        .level = 2,
149
        .vendor1 = CPUID_VENDOR_AMD_1,
150
        .vendor2 = CPUID_VENDOR_AMD_2,
151
        .vendor3 = CPUID_VENDOR_AMD_3,
152
        .family = 6,
153
        .model = 2,
154
        .stepping = 3,
155
        .features = PPRO_FEATURES | 
156
        /* these features are needed for Win64 and aren't fully implemented */
157
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
158
        /* this feature is needed for Solaris and isn't fully implemented */
159
            CPUID_PSE36,
160
        .ext_features = CPUID_EXT_SSE3,
161
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
162
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
164
        .ext3_features = CPUID_EXT3_SVM,
165
        .xlevel = 0x8000000A,
166
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
167
    },
168
    {
169
        .name = "core2duo",
170
        .level = 10,
171
        .family = 6,
172
        .model = 15,
173
        .stepping = 11,
174
        /* The original CPU also implements these features:
175
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
176
               CPUID_TM, CPUID_PBE */
177
        .features = PPRO_FEATURES |
178
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
179
            CPUID_PSE36,
180
        /* The original CPU also implements these ext features:
181
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
182
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
183
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
184
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
185
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
186
        .xlevel = 0x8000000A,
187
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
188
    },
189
#endif
190
    {
191
        .name = "qemu32",
192
        .level = 2,
193
        .family = 6,
194
        .model = 3,
195
        .stepping = 3,
196
        .features = PPRO_FEATURES,
197
        .ext_features = CPUID_EXT_SSE3,
198
        .xlevel = 0,
199
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
200
    },
201
    {
202
        .name = "486",
203
        .level = 0,
204
        .family = 4,
205
        .model = 0,
206
        .stepping = 0,
207
        .features = I486_FEATURES,
208
        .xlevel = 0,
209
    },
210
    {
211
        .name = "pentium",
212
        .level = 1,
213
        .family = 5,
214
        .model = 4,
215
        .stepping = 3,
216
        .features = PENTIUM_FEATURES,
217
        .xlevel = 0,
218
    },
219
    {
220
        .name = "pentium2",
221
        .level = 2,
222
        .family = 6,
223
        .model = 5,
224
        .stepping = 2,
225
        .features = PENTIUM2_FEATURES,
226
        .xlevel = 0,
227
    },
228
    {
229
        .name = "pentium3",
230
        .level = 2,
231
        .family = 6,
232
        .model = 7,
233
        .stepping = 3,
234
        .features = PENTIUM3_FEATURES,
235
        .xlevel = 0,
236
    },
237
    {
238
        .name = "athlon",
239
        .level = 2,
240
        .vendor1 = 0x68747541, /* "Auth" */
241
        .vendor2 = 0x69746e65, /* "enti" */
242
        .vendor3 = 0x444d4163, /* "cAMD" */
243
        .family = 6,
244
        .model = 2,
245
        .stepping = 3,
246
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
247
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
248
        .xlevel = 0x80000008,
249
        /* XXX: put another string ? */
250
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
251
    },
252
    {
253
        .name = "n270",
254
        /* original is on level 10 */
255
        .level = 5,
256
        .family = 6,
257
        .model = 28,
258
        .stepping = 2,
259
        .features = PPRO_FEATURES |
260
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
261
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
262
             * CPUID_HT | CPUID_TM | CPUID_PBE */
263
            /* Some CPUs got no CPUID_SEP */
264
        .ext_features = CPUID_EXT_MONITOR |
265
            CPUID_EXT_SSE3 /* PNI */, CPUID_EXT_SSSE3,
266
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
267
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
268
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
269
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
270
        .xlevel = 0x8000000A,
271
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
272
    },
273
};
274

    
275
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
276
{
277
    unsigned int i;
278
    x86_def_t *def;
279

    
280
    char *s = strdup(cpu_model);
281
    char *featurestr, *name = strtok(s, ",");
282
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
283
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
284
    int family = -1, model = -1, stepping = -1;
285

    
286
    def = NULL;
287
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
288
        if (strcmp(name, x86_defs[i].name) == 0) {
289
            def = &x86_defs[i];
290
            break;
291
        }
292
    }
293
    if (!def)
294
        goto error;
295
    memcpy(x86_cpu_def, def, sizeof(*def));
296

    
297
    featurestr = strtok(NULL, ",");
298

    
299
    while (featurestr) {
300
        char *val;
301
        if (featurestr[0] == '+') {
302
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
303
        } else if (featurestr[0] == '-') {
304
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
305
        } else if ((val = strchr(featurestr, '='))) {
306
            *val = 0; val++;
307
            if (!strcmp(featurestr, "family")) {
308
                char *err;
309
                family = strtol(val, &err, 10);
310
                if (!*val || *err || family < 0) {
311
                    fprintf(stderr, "bad numerical value %s\n", val);
312
                    goto error;
313
                }
314
                x86_cpu_def->family = family;
315
            } else if (!strcmp(featurestr, "model")) {
316
                char *err;
317
                model = strtol(val, &err, 10);
318
                if (!*val || *err || model < 0 || model > 0xf) {
319
                    fprintf(stderr, "bad numerical value %s\n", val);
320
                    goto error;
321
                }
322
                x86_cpu_def->model = model;
323
            } else if (!strcmp(featurestr, "stepping")) {
324
                char *err;
325
                stepping = strtol(val, &err, 10);
326
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
327
                    fprintf(stderr, "bad numerical value %s\n", val);
328
                    goto error;
329
                }
330
                x86_cpu_def->stepping = stepping;
331
            } else if (!strcmp(featurestr, "vendor")) {
332
                if (strlen(val) != 12) {
333
                    fprintf(stderr, "vendor string must be 12 chars long\n");
334
                    goto error;
335
                }
336
                x86_cpu_def->vendor1 = 0;
337
                x86_cpu_def->vendor2 = 0;
338
                x86_cpu_def->vendor3 = 0;
339
                for(i = 0; i < 4; i++) {
340
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
341
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
342
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
343
                }
344
            } else if (!strcmp(featurestr, "model_id")) {
345
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
346
                        val);
347
            } else {
348
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
349
                goto error;
350
            }
351
        } else {
352
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
353
            goto error;
354
        }
355
        featurestr = strtok(NULL, ",");
356
    }
357
    x86_cpu_def->features |= plus_features;
358
    x86_cpu_def->ext_features |= plus_ext_features;
359
    x86_cpu_def->ext2_features |= plus_ext2_features;
360
    x86_cpu_def->ext3_features |= plus_ext3_features;
361
    x86_cpu_def->features &= ~minus_features;
362
    x86_cpu_def->ext_features &= ~minus_ext_features;
363
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
364
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
365
    free(s);
366
    return 0;
367

    
368
error:
369
    free(s);
370
    return -1;
371
}
372

    
373
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
374
{
375
    unsigned int i;
376

    
377
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
378
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
379
}
380

    
381
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
382
{
383
    x86_def_t def1, *def = &def1;
384

    
385
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
386
        return -1;
387
    if (def->vendor1) {
388
        env->cpuid_vendor1 = def->vendor1;
389
        env->cpuid_vendor2 = def->vendor2;
390
        env->cpuid_vendor3 = def->vendor3;
391
    } else {
392
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
393
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
394
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
395
    }
396
    env->cpuid_level = def->level;
397
    env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
398
    env->cpuid_features = def->features;
399
    env->pat = 0x0007040600070406ULL;
400
    env->cpuid_ext_features = def->ext_features;
401
    env->cpuid_ext2_features = def->ext2_features;
402
    env->cpuid_xlevel = def->xlevel;
403
    env->cpuid_ext3_features = def->ext3_features;
404
    {
405
        const char *model_id = def->model_id;
406
        int c, len, i;
407
        if (!model_id)
408
            model_id = "";
409
        len = strlen(model_id);
410
        for(i = 0; i < 48; i++) {
411
            if (i >= len)
412
                c = '\0';
413
            else
414
                c = (uint8_t)model_id[i];
415
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
416
        }
417
    }
418
    return 0;
419
}
420

    
421
/* NOTE: must be called outside the CPU execute loop */
422
void cpu_reset(CPUX86State *env)
423
{
424
    int i;
425

    
426
    memset(env, 0, offsetof(CPUX86State, breakpoints));
427

    
428
    tlb_flush(env, 1);
429

    
430
    env->old_exception = -1;
431

    
432
    /* init to reset state */
433

    
434
#ifdef CONFIG_SOFTMMU
435
    env->hflags |= HF_SOFTMMU_MASK;
436
#endif
437
    env->hflags2 |= HF2_GIF_MASK;
438

    
439
    cpu_x86_update_cr0(env, 0x60000010);
440
    env->a20_mask = ~0x0;
441
    env->smbase = 0x30000;
442

    
443
    env->idt.limit = 0xffff;
444
    env->gdt.limit = 0xffff;
445
    env->ldt.limit = 0xffff;
446
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
447
    env->tr.limit = 0xffff;
448
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
449

    
450
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
451
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
452
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
453
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
454
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
455
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
456
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
457
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
458
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
459
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
460
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
461
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
462

    
463
    env->eip = 0xfff0;
464
    env->regs[R_EDX] = env->cpuid_version;
465

    
466
    env->eflags = 0x2;
467

    
468
    /* FPU init */
469
    for(i = 0;i < 8; i++)
470
        env->fptags[i] = 1;
471
    env->fpuc = 0x37f;
472

    
473
    env->mxcsr = 0x1f80;
474
}
475

    
476
void cpu_x86_close(CPUX86State *env)
477
{
478
    qemu_free(env);
479
}
480

    
481
/***********************************************************/
482
/* x86 debug */
483

    
484
static const char *cc_op_str[] = {
485
    "DYNAMIC",
486
    "EFLAGS",
487

    
488
    "MULB",
489
    "MULW",
490
    "MULL",
491
    "MULQ",
492

    
493
    "ADDB",
494
    "ADDW",
495
    "ADDL",
496
    "ADDQ",
497

    
498
    "ADCB",
499
    "ADCW",
500
    "ADCL",
501
    "ADCQ",
502

    
503
    "SUBB",
504
    "SUBW",
505
    "SUBL",
506
    "SUBQ",
507

    
508
    "SBBB",
509
    "SBBW",
510
    "SBBL",
511
    "SBBQ",
512

    
513
    "LOGICB",
514
    "LOGICW",
515
    "LOGICL",
516
    "LOGICQ",
517

    
518
    "INCB",
519
    "INCW",
520
    "INCL",
521
    "INCQ",
522

    
523
    "DECB",
524
    "DECW",
525
    "DECL",
526
    "DECQ",
527

    
528
    "SHLB",
529
    "SHLW",
530
    "SHLL",
531
    "SHLQ",
532

    
533
    "SARB",
534
    "SARW",
535
    "SARL",
536
    "SARQ",
537
};
538

    
539
void cpu_dump_state(CPUState *env, FILE *f,
540
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
541
                    int flags)
542
{
543
    int eflags, i, nb;
544
    char cc_op_name[32];
545
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
546

    
547
    eflags = env->eflags;
548
#ifdef TARGET_X86_64
549
    if (env->hflags & HF_CS64_MASK) {
550
        cpu_fprintf(f,
551
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
552
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
553
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
554
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
555
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
556
                    env->regs[R_EAX],
557
                    env->regs[R_EBX],
558
                    env->regs[R_ECX],
559
                    env->regs[R_EDX],
560
                    env->regs[R_ESI],
561
                    env->regs[R_EDI],
562
                    env->regs[R_EBP],
563
                    env->regs[R_ESP],
564
                    env->regs[8],
565
                    env->regs[9],
566
                    env->regs[10],
567
                    env->regs[11],
568
                    env->regs[12],
569
                    env->regs[13],
570
                    env->regs[14],
571
                    env->regs[15],
572
                    env->eip, eflags,
573
                    eflags & DF_MASK ? 'D' : '-',
574
                    eflags & CC_O ? 'O' : '-',
575
                    eflags & CC_S ? 'S' : '-',
576
                    eflags & CC_Z ? 'Z' : '-',
577
                    eflags & CC_A ? 'A' : '-',
578
                    eflags & CC_P ? 'P' : '-',
579
                    eflags & CC_C ? 'C' : '-',
580
                    env->hflags & HF_CPL_MASK,
581
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
582
                    (int)(env->a20_mask >> 20) & 1,
583
                    (env->hflags >> HF_SMM_SHIFT) & 1,
584
                    env->halted);
585
    } else
586
#endif
587
    {
588
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
589
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
590
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
591
                    (uint32_t)env->regs[R_EAX],
592
                    (uint32_t)env->regs[R_EBX],
593
                    (uint32_t)env->regs[R_ECX],
594
                    (uint32_t)env->regs[R_EDX],
595
                    (uint32_t)env->regs[R_ESI],
596
                    (uint32_t)env->regs[R_EDI],
597
                    (uint32_t)env->regs[R_EBP],
598
                    (uint32_t)env->regs[R_ESP],
599
                    (uint32_t)env->eip, eflags,
600
                    eflags & DF_MASK ? 'D' : '-',
601
                    eflags & CC_O ? 'O' : '-',
602
                    eflags & CC_S ? 'S' : '-',
603
                    eflags & CC_Z ? 'Z' : '-',
604
                    eflags & CC_A ? 'A' : '-',
605
                    eflags & CC_P ? 'P' : '-',
606
                    eflags & CC_C ? 'C' : '-',
607
                    env->hflags & HF_CPL_MASK,
608
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
609
                    (int)(env->a20_mask >> 20) & 1,
610
                    (env->hflags >> HF_SMM_SHIFT) & 1,
611
                    env->halted);
612
    }
613

    
614
#ifdef TARGET_X86_64
615
    if (env->hflags & HF_LMA_MASK) {
616
        for(i = 0; i < 6; i++) {
617
            SegmentCache *sc = &env->segs[i];
618
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
619
                        seg_name[i],
620
                        sc->selector,
621
                        sc->base,
622
                        sc->limit,
623
                        sc->flags);
624
        }
625
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
626
                    env->ldt.selector,
627
                    env->ldt.base,
628
                    env->ldt.limit,
629
                    env->ldt.flags);
630
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
631
                    env->tr.selector,
632
                    env->tr.base,
633
                    env->tr.limit,
634
                    env->tr.flags);
635
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
636
                    env->gdt.base, env->gdt.limit);
637
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
638
                    env->idt.base, env->idt.limit);
639
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
640
                    (uint32_t)env->cr[0],
641
                    env->cr[2],
642
                    env->cr[3],
643
                    (uint32_t)env->cr[4]);
644
    } else
645
#endif
646
    {
647
        for(i = 0; i < 6; i++) {
648
            SegmentCache *sc = &env->segs[i];
649
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
650
                        seg_name[i],
651
                        sc->selector,
652
                        (uint32_t)sc->base,
653
                        sc->limit,
654
                        sc->flags);
655
        }
656
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
657
                    env->ldt.selector,
658
                    (uint32_t)env->ldt.base,
659
                    env->ldt.limit,
660
                    env->ldt.flags);
661
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
662
                    env->tr.selector,
663
                    (uint32_t)env->tr.base,
664
                    env->tr.limit,
665
                    env->tr.flags);
666
        cpu_fprintf(f, "GDT=     %08x %08x\n",
667
                    (uint32_t)env->gdt.base, env->gdt.limit);
668
        cpu_fprintf(f, "IDT=     %08x %08x\n",
669
                    (uint32_t)env->idt.base, env->idt.limit);
670
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
671
                    (uint32_t)env->cr[0],
672
                    (uint32_t)env->cr[2],
673
                    (uint32_t)env->cr[3],
674
                    (uint32_t)env->cr[4]);
675
    }
676
    if (flags & X86_DUMP_CCOP) {
677
        if ((unsigned)env->cc_op < CC_OP_NB)
678
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
679
        else
680
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
681
#ifdef TARGET_X86_64
682
        if (env->hflags & HF_CS64_MASK) {
683
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
684
                        env->cc_src, env->cc_dst,
685
                        cc_op_name);
686
        } else
687
#endif
688
        {
689
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
690
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
691
                        cc_op_name);
692
        }
693
    }
694
    if (flags & X86_DUMP_FPU) {
695
        int fptag;
696
        fptag = 0;
697
        for(i = 0; i < 8; i++) {
698
            fptag |= ((!env->fptags[i]) << i);
699
        }
700
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
701
                    env->fpuc,
702
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
703
                    env->fpstt,
704
                    fptag,
705
                    env->mxcsr);
706
        for(i=0;i<8;i++) {
707
#if defined(USE_X86LDOUBLE)
708
            union {
709
                long double d;
710
                struct {
711
                    uint64_t lower;
712
                    uint16_t upper;
713
                } l;
714
            } tmp;
715
            tmp.d = env->fpregs[i].d;
716
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
717
                        i, tmp.l.lower, tmp.l.upper);
718
#else
719
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
720
                        i, env->fpregs[i].mmx.q);
721
#endif
722
            if ((i & 1) == 1)
723
                cpu_fprintf(f, "\n");
724
            else
725
                cpu_fprintf(f, " ");
726
        }
727
        if (env->hflags & HF_CS64_MASK)
728
            nb = 16;
729
        else
730
            nb = 8;
731
        for(i=0;i<nb;i++) {
732
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
733
                        i,
734
                        env->xmm_regs[i].XMM_L(3),
735
                        env->xmm_regs[i].XMM_L(2),
736
                        env->xmm_regs[i].XMM_L(1),
737
                        env->xmm_regs[i].XMM_L(0));
738
            if ((i & 1) == 1)
739
                cpu_fprintf(f, "\n");
740
            else
741
                cpu_fprintf(f, " ");
742
        }
743
    }
744
}
745

    
746
/***********************************************************/
747
/* x86 mmu */
748
/* XXX: add PGE support */
749

    
750
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
751
{
752
    a20_state = (a20_state != 0);
753
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
754
#if defined(DEBUG_MMU)
755
        printf("A20 update: a20=%d\n", a20_state);
756
#endif
757
        /* if the cpu is currently executing code, we must unlink it and
758
           all the potentially executing TB */
759
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
760

    
761
        /* when a20 is changed, all the MMU mappings are invalid, so
762
           we must flush everything */
763
        tlb_flush(env, 1);
764
        env->a20_mask = (~0x100000) | (a20_state << 20);
765
    }
766
}
767

    
768
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
769
{
770
    int pe_state;
771

    
772
#if defined(DEBUG_MMU)
773
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
774
#endif
775
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
776
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
777
        tlb_flush(env, 1);
778
    }
779

    
780
#ifdef TARGET_X86_64
781
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
782
        (env->efer & MSR_EFER_LME)) {
783
        /* enter in long mode */
784
        /* XXX: generate an exception */
785
        if (!(env->cr[4] & CR4_PAE_MASK))
786
            return;
787
        env->efer |= MSR_EFER_LMA;
788
        env->hflags |= HF_LMA_MASK;
789
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
790
               (env->efer & MSR_EFER_LMA)) {
791
        /* exit long mode */
792
        env->efer &= ~MSR_EFER_LMA;
793
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
794
        env->eip &= 0xffffffff;
795
    }
796
#endif
797
    env->cr[0] = new_cr0 | CR0_ET_MASK;
798

    
799
    /* update PE flag in hidden flags */
800
    pe_state = (env->cr[0] & CR0_PE_MASK);
801
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
802
    /* ensure that ADDSEG is always set in real mode */
803
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
804
    /* update FPU flags */
805
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
806
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
807
}
808

    
809
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
810
   the PDPT */
811
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
812
{
813
    env->cr[3] = new_cr3;
814
    if (env->cr[0] & CR0_PG_MASK) {
815
#if defined(DEBUG_MMU)
816
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
817
#endif
818
        tlb_flush(env, 0);
819
    }
820
}
821

    
822
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
823
{
824
#if defined(DEBUG_MMU)
825
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
826
#endif
827
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
828
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
829
        tlb_flush(env, 1);
830
    }
831
    /* SSE handling */
832
    if (!(env->cpuid_features & CPUID_SSE))
833
        new_cr4 &= ~CR4_OSFXSR_MASK;
834
    if (new_cr4 & CR4_OSFXSR_MASK)
835
        env->hflags |= HF_OSFXSR_MASK;
836
    else
837
        env->hflags &= ~HF_OSFXSR_MASK;
838

    
839
    env->cr[4] = new_cr4;
840
}
841

    
842
/* XXX: also flush 4MB pages */
843
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
844
{
845
    tlb_flush_page(env, addr);
846
}
847

    
848
#if defined(CONFIG_USER_ONLY)
849

    
850
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
851
                             int is_write, int mmu_idx, int is_softmmu)
852
{
853
    /* user mode only emulation */
854
    is_write &= 1;
855
    env->cr[2] = addr;
856
    env->error_code = (is_write << PG_ERROR_W_BIT);
857
    env->error_code |= PG_ERROR_U_MASK;
858
    env->exception_index = EXCP0E_PAGE;
859
    return 1;
860
}
861

    
862
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
863
{
864
    return addr;
865
}
866

    
867
#else
868

    
869
/* XXX: This value should match the one returned by CPUID
870
 * and in exec.c */
871
#if defined(USE_KQEMU)
872
#define PHYS_ADDR_MASK 0xfffff000LL
873
#else
874
# if defined(TARGET_X86_64)
875
# define PHYS_ADDR_MASK 0xfffffff000LL
876
# else
877
# define PHYS_ADDR_MASK 0xffffff000LL
878
# endif
879
#endif
880

    
881
/* return value:
882
   -1 = cannot handle fault
883
   0  = nothing more to do
884
   1  = generate PF fault
885
   2  = soft MMU activation required for this block
886
*/
887
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
888
                             int is_write1, int mmu_idx, int is_softmmu)
889
{
890
    uint64_t ptep, pte;
891
    target_ulong pde_addr, pte_addr;
892
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
893
    target_phys_addr_t paddr;
894
    uint32_t page_offset;
895
    target_ulong vaddr, virt_addr;
896

    
897
    is_user = mmu_idx == MMU_USER_IDX;
898
#if defined(DEBUG_MMU)
899
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
900
           addr, is_write1, is_user, env->eip);
901
#endif
902
    is_write = is_write1 & 1;
903

    
904
    if (!(env->cr[0] & CR0_PG_MASK)) {
905
        pte = addr;
906
        virt_addr = addr & TARGET_PAGE_MASK;
907
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
908
        page_size = 4096;
909
        goto do_mapping;
910
    }
911

    
912
    if (env->cr[4] & CR4_PAE_MASK) {
913
        uint64_t pde, pdpe;
914
        target_ulong pdpe_addr;
915

    
916
#ifdef TARGET_X86_64
917
        if (env->hflags & HF_LMA_MASK) {
918
            uint64_t pml4e_addr, pml4e;
919
            int32_t sext;
920

    
921
            /* test virtual address sign extension */
922
            sext = (int64_t)addr >> 47;
923
            if (sext != 0 && sext != -1) {
924
                env->error_code = 0;
925
                env->exception_index = EXCP0D_GPF;
926
                return 1;
927
            }
928

    
929
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
930
                env->a20_mask;
931
            pml4e = ldq_phys(pml4e_addr);
932
            if (!(pml4e & PG_PRESENT_MASK)) {
933
                error_code = 0;
934
                goto do_fault;
935
            }
936
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
937
                error_code = PG_ERROR_RSVD_MASK;
938
                goto do_fault;
939
            }
940
            if (!(pml4e & PG_ACCESSED_MASK)) {
941
                pml4e |= PG_ACCESSED_MASK;
942
                stl_phys_notdirty(pml4e_addr, pml4e);
943
            }
944
            ptep = pml4e ^ PG_NX_MASK;
945
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
946
                env->a20_mask;
947
            pdpe = ldq_phys(pdpe_addr);
948
            if (!(pdpe & PG_PRESENT_MASK)) {
949
                error_code = 0;
950
                goto do_fault;
951
            }
952
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
953
                error_code = PG_ERROR_RSVD_MASK;
954
                goto do_fault;
955
            }
956
            ptep &= pdpe ^ PG_NX_MASK;
957
            if (!(pdpe & PG_ACCESSED_MASK)) {
958
                pdpe |= PG_ACCESSED_MASK;
959
                stl_phys_notdirty(pdpe_addr, pdpe);
960
            }
961
        } else
962
#endif
963
        {
964
            /* XXX: load them when cr3 is loaded ? */
965
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
966
                env->a20_mask;
967
            pdpe = ldq_phys(pdpe_addr);
968
            if (!(pdpe & PG_PRESENT_MASK)) {
969
                error_code = 0;
970
                goto do_fault;
971
            }
972
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
973
        }
974

    
975
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
976
            env->a20_mask;
977
        pde = ldq_phys(pde_addr);
978
        if (!(pde & PG_PRESENT_MASK)) {
979
            error_code = 0;
980
            goto do_fault;
981
        }
982
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
983
            error_code = PG_ERROR_RSVD_MASK;
984
            goto do_fault;
985
        }
986
        ptep &= pde ^ PG_NX_MASK;
987
        if (pde & PG_PSE_MASK) {
988
            /* 2 MB page */
989
            page_size = 2048 * 1024;
990
            ptep ^= PG_NX_MASK;
991
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
992
                goto do_fault_protect;
993
            if (is_user) {
994
                if (!(ptep & PG_USER_MASK))
995
                    goto do_fault_protect;
996
                if (is_write && !(ptep & PG_RW_MASK))
997
                    goto do_fault_protect;
998
            } else {
999
                if ((env->cr[0] & CR0_WP_MASK) &&
1000
                    is_write && !(ptep & PG_RW_MASK))
1001
                    goto do_fault_protect;
1002
            }
1003
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1004
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1005
                pde |= PG_ACCESSED_MASK;
1006
                if (is_dirty)
1007
                    pde |= PG_DIRTY_MASK;
1008
                stl_phys_notdirty(pde_addr, pde);
1009
            }
1010
            /* align to page_size */
1011
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1012
            virt_addr = addr & ~(page_size - 1);
1013
        } else {
1014
            /* 4 KB page */
1015
            if (!(pde & PG_ACCESSED_MASK)) {
1016
                pde |= PG_ACCESSED_MASK;
1017
                stl_phys_notdirty(pde_addr, pde);
1018
            }
1019
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1020
                env->a20_mask;
1021
            pte = ldq_phys(pte_addr);
1022
            if (!(pte & PG_PRESENT_MASK)) {
1023
                error_code = 0;
1024
                goto do_fault;
1025
            }
1026
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1027
                error_code = PG_ERROR_RSVD_MASK;
1028
                goto do_fault;
1029
            }
1030
            /* combine pde and pte nx, user and rw protections */
1031
            ptep &= pte ^ PG_NX_MASK;
1032
            ptep ^= PG_NX_MASK;
1033
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1034
                goto do_fault_protect;
1035
            if (is_user) {
1036
                if (!(ptep & PG_USER_MASK))
1037
                    goto do_fault_protect;
1038
                if (is_write && !(ptep & PG_RW_MASK))
1039
                    goto do_fault_protect;
1040
            } else {
1041
                if ((env->cr[0] & CR0_WP_MASK) &&
1042
                    is_write && !(ptep & PG_RW_MASK))
1043
                    goto do_fault_protect;
1044
            }
1045
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1046
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1047
                pte |= PG_ACCESSED_MASK;
1048
                if (is_dirty)
1049
                    pte |= PG_DIRTY_MASK;
1050
                stl_phys_notdirty(pte_addr, pte);
1051
            }
1052
            page_size = 4096;
1053
            virt_addr = addr & ~0xfff;
1054
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1055
        }
1056
    } else {
1057
        uint32_t pde;
1058

    
1059
        /* page directory entry */
1060
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1061
            env->a20_mask;
1062
        pde = ldl_phys(pde_addr);
1063
        if (!(pde & PG_PRESENT_MASK)) {
1064
            error_code = 0;
1065
            goto do_fault;
1066
        }
1067
        /* if PSE bit is set, then we use a 4MB page */
1068
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1069
            page_size = 4096 * 1024;
1070
            if (is_user) {
1071
                if (!(pde & PG_USER_MASK))
1072
                    goto do_fault_protect;
1073
                if (is_write && !(pde & PG_RW_MASK))
1074
                    goto do_fault_protect;
1075
            } else {
1076
                if ((env->cr[0] & CR0_WP_MASK) &&
1077
                    is_write && !(pde & PG_RW_MASK))
1078
                    goto do_fault_protect;
1079
            }
1080
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1081
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1082
                pde |= PG_ACCESSED_MASK;
1083
                if (is_dirty)
1084
                    pde |= PG_DIRTY_MASK;
1085
                stl_phys_notdirty(pde_addr, pde);
1086
            }
1087

    
1088
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1089
            ptep = pte;
1090
            virt_addr = addr & ~(page_size - 1);
1091
        } else {
1092
            if (!(pde & PG_ACCESSED_MASK)) {
1093
                pde |= PG_ACCESSED_MASK;
1094
                stl_phys_notdirty(pde_addr, pde);
1095
            }
1096

    
1097
            /* page directory entry */
1098
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1099
                env->a20_mask;
1100
            pte = ldl_phys(pte_addr);
1101
            if (!(pte & PG_PRESENT_MASK)) {
1102
                error_code = 0;
1103
                goto do_fault;
1104
            }
1105
            /* combine pde and pte user and rw protections */
1106
            ptep = pte & pde;
1107
            if (is_user) {
1108
                if (!(ptep & PG_USER_MASK))
1109
                    goto do_fault_protect;
1110
                if (is_write && !(ptep & PG_RW_MASK))
1111
                    goto do_fault_protect;
1112
            } else {
1113
                if ((env->cr[0] & CR0_WP_MASK) &&
1114
                    is_write && !(ptep & PG_RW_MASK))
1115
                    goto do_fault_protect;
1116
            }
1117
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1118
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1119
                pte |= PG_ACCESSED_MASK;
1120
                if (is_dirty)
1121
                    pte |= PG_DIRTY_MASK;
1122
                stl_phys_notdirty(pte_addr, pte);
1123
            }
1124
            page_size = 4096;
1125
            virt_addr = addr & ~0xfff;
1126
        }
1127
    }
1128
    /* the page can be put in the TLB */
1129
    prot = PAGE_READ;
1130
    if (!(ptep & PG_NX_MASK))
1131
        prot |= PAGE_EXEC;
1132
    if (pte & PG_DIRTY_MASK) {
1133
        /* only set write access if already dirty... otherwise wait
1134
           for dirty access */
1135
        if (is_user) {
1136
            if (ptep & PG_RW_MASK)
1137
                prot |= PAGE_WRITE;
1138
        } else {
1139
            if (!(env->cr[0] & CR0_WP_MASK) ||
1140
                (ptep & PG_RW_MASK))
1141
                prot |= PAGE_WRITE;
1142
        }
1143
    }
1144
 do_mapping:
1145
    pte = pte & env->a20_mask;
1146

    
1147
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1148
       avoid filling it too fast */
1149
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1150
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1151
    vaddr = virt_addr + page_offset;
1152

    
1153
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1154
    return ret;
1155
 do_fault_protect:
1156
    error_code = PG_ERROR_P_MASK;
1157
 do_fault:
1158
    error_code |= (is_write << PG_ERROR_W_BIT);
1159
    if (is_user)
1160
        error_code |= PG_ERROR_U_MASK;
1161
    if (is_write1 == 2 &&
1162
        (env->efer & MSR_EFER_NXE) &&
1163
        (env->cr[4] & CR4_PAE_MASK))
1164
        error_code |= PG_ERROR_I_D_MASK;
1165
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1166
        /* cr2 is not modified in case of exceptions */
1167
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1168
                 addr);
1169
    } else {
1170
        env->cr[2] = addr;
1171
    }
1172
    env->error_code = error_code;
1173
    env->exception_index = EXCP0E_PAGE;
1174
    return 1;
1175
}
1176

    
1177
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1178
{
1179
    target_ulong pde_addr, pte_addr;
1180
    uint64_t pte;
1181
    target_phys_addr_t paddr;
1182
    uint32_t page_offset;
1183
    int page_size;
1184

    
1185
    if (env->cr[4] & CR4_PAE_MASK) {
1186
        target_ulong pdpe_addr;
1187
        uint64_t pde, pdpe;
1188

    
1189
#ifdef TARGET_X86_64
1190
        if (env->hflags & HF_LMA_MASK) {
1191
            uint64_t pml4e_addr, pml4e;
1192
            int32_t sext;
1193

    
1194
            /* test virtual address sign extension */
1195
            sext = (int64_t)addr >> 47;
1196
            if (sext != 0 && sext != -1)
1197
                return -1;
1198

    
1199
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1200
                env->a20_mask;
1201
            pml4e = ldq_phys(pml4e_addr);
1202
            if (!(pml4e & PG_PRESENT_MASK))
1203
                return -1;
1204

    
1205
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1206
                env->a20_mask;
1207
            pdpe = ldq_phys(pdpe_addr);
1208
            if (!(pdpe & PG_PRESENT_MASK))
1209
                return -1;
1210
        } else
1211
#endif
1212
        {
1213
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1214
                env->a20_mask;
1215
            pdpe = ldq_phys(pdpe_addr);
1216
            if (!(pdpe & PG_PRESENT_MASK))
1217
                return -1;
1218
        }
1219

    
1220
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1221
            env->a20_mask;
1222
        pde = ldq_phys(pde_addr);
1223
        if (!(pde & PG_PRESENT_MASK)) {
1224
            return -1;
1225
        }
1226
        if (pde & PG_PSE_MASK) {
1227
            /* 2 MB page */
1228
            page_size = 2048 * 1024;
1229
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1230
        } else {
1231
            /* 4 KB page */
1232
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1233
                env->a20_mask;
1234
            page_size = 4096;
1235
            pte = ldq_phys(pte_addr);
1236
        }
1237
        if (!(pte & PG_PRESENT_MASK))
1238
            return -1;
1239
    } else {
1240
        uint32_t pde;
1241

    
1242
        if (!(env->cr[0] & CR0_PG_MASK)) {
1243
            pte = addr;
1244
            page_size = 4096;
1245
        } else {
1246
            /* page directory entry */
1247
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1248
            pde = ldl_phys(pde_addr);
1249
            if (!(pde & PG_PRESENT_MASK))
1250
                return -1;
1251
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1252
                pte = pde & ~0x003ff000; /* align to 4MB */
1253
                page_size = 4096 * 1024;
1254
            } else {
1255
                /* page directory entry */
1256
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1257
                pte = ldl_phys(pte_addr);
1258
                if (!(pte & PG_PRESENT_MASK))
1259
                    return -1;
1260
                page_size = 4096;
1261
            }
1262
        }
1263
        pte = pte & env->a20_mask;
1264
    }
1265

    
1266
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1267
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1268
    return paddr;
1269
}
1270
#endif /* !CONFIG_USER_ONLY */