Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 10781c09

History | View | Annotate | Download (49.8 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "svm.h"
31
#include "qemu-common.h"
32
#include "kvm.h"
33

    
34
//#define DEBUG_MMU
35

    
36
static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
37

    
38
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
39
                                    uint32_t *ext_features, 
40
                                    uint32_t *ext2_features, 
41
                                    uint32_t *ext3_features)
42
{
43
    int i;
44
    /* feature flags taken from "Intel Processor Identification and the CPUID
45
     * Instruction" and AMD's "CPUID Specification". In cases of disagreement 
46
     * about feature names, the Linux name is used. */
47
    static const char *feature_name[] = {
48
        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
49
        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
50
        "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
51
        "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
52
    };
53
    static const char *ext_feature_name[] = {
54
       "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
55
       "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
56
       NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
57
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58
    };
59
    static const char *ext2_feature_name[] = {
60
       "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
61
       "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
62
       "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
63
       "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
64
    };
65
    static const char *ext3_feature_name[] = {
66
       "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
67
       "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
68
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70
    };
71

    
72
    for ( i = 0 ; i < 32 ; i++ ) 
73
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
74
            *features |= 1 << i;
75
            return;
76
        }
77
    for ( i = 0 ; i < 32 ; i++ ) 
78
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
79
            *ext_features |= 1 << i;
80
            return;
81
        }
82
    for ( i = 0 ; i < 32 ; i++ ) 
83
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
84
            *ext2_features |= 1 << i;
85
            return;
86
        }
87
    for ( i = 0 ; i < 32 ; i++ ) 
88
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
89
            *ext3_features |= 1 << i;
90
            return;
91
        }
92
    fprintf(stderr, "CPU feature %s not found\n", flagname);
93
}
94

    
95
CPUX86State *cpu_x86_init(const char *cpu_model)
96
{
97
    CPUX86State *env;
98
    static int inited;
99

    
100
    env = qemu_mallocz(sizeof(CPUX86State));
101
    if (!env)
102
        return NULL;
103
    cpu_exec_init(env);
104
    env->cpu_model_str = cpu_model;
105

    
106
    /* init various static tables */
107
    if (!inited) {
108
        inited = 1;
109
        optimize_flags_init();
110
    }
111
    if (cpu_x86_register(env, cpu_model) < 0) {
112
        cpu_x86_close(env);
113
        return NULL;
114
    }
115
    cpu_reset(env);
116
#ifdef USE_KQEMU
117
    kqemu_init(env);
118
#endif
119
    if (kvm_enabled())
120
        kvm_init_vcpu(env);
121
    return env;
122
}
123

    
124
typedef struct x86_def_t {
125
    const char *name;
126
    uint32_t level;
127
    uint32_t vendor1, vendor2, vendor3;
128
    int family;
129
    int model;
130
    int stepping;
131
    uint32_t features, ext_features, ext2_features, ext3_features;
132
    uint32_t xlevel;
133
    char model_id[48];
134
} x86_def_t;
135

    
136
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
137
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
138
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
139
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
140
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
141
          CPUID_PSE36 | CPUID_FXSR)
142
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
143
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
144
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
145
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
146
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
147
static x86_def_t x86_defs[] = {
148
#ifdef TARGET_X86_64
149
    {
150
        .name = "qemu64",
151
        .level = 2,
152
        .vendor1 = CPUID_VENDOR_AMD_1,
153
        .vendor2 = CPUID_VENDOR_AMD_2,
154
        .vendor3 = CPUID_VENDOR_AMD_3,
155
        .family = 6,
156
        .model = 2,
157
        .stepping = 3,
158
        .features = PPRO_FEATURES | 
159
        /* these features are needed for Win64 and aren't fully implemented */
160
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
161
        /* this feature is needed for Solaris and isn't fully implemented */
162
            CPUID_PSE36,
163
        .ext_features = CPUID_EXT_SSE3,
164
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
165
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
166
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
167
        .ext3_features = CPUID_EXT3_SVM,
168
        .xlevel = 0x8000000A,
169
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
170
    },
171
    {
172
        .name = "core2duo",
173
        .level = 10,
174
        .family = 6,
175
        .model = 15,
176
        .stepping = 11,
177
        /* The original CPU also implements these features:
178
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179
               CPUID_TM, CPUID_PBE */
180
        .features = PPRO_FEATURES |
181
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182
            CPUID_PSE36,
183
        /* The original CPU also implements these ext features:
184
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
189
        .xlevel = 0x80000008,
190
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
191
    },
192
#endif
193
    {
194
        .name = "qemu32",
195
        .level = 2,
196
        .family = 6,
197
        .model = 3,
198
        .stepping = 3,
199
        .features = PPRO_FEATURES,
200
        .ext_features = CPUID_EXT_SSE3,
201
        .xlevel = 0,
202
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
203
    },
204
    {
205
        .name = "coreduo",
206
        .level = 10,
207
        .family = 6,
208
        .model = 14,
209
        .stepping = 8,
210
        /* The original CPU also implements these features:
211
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
212
               CPUID_TM, CPUID_PBE */
213
        .features = PPRO_FEATURES | CPUID_VME |
214
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
215
        /* The original CPU also implements these ext features:
216
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
217
               CPUID_EXT_PDCM */
218
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
219
        .ext2_features = CPUID_EXT2_NX,
220
        .xlevel = 0x80000008,
221
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
222
    },
223
    {
224
        .name = "486",
225
        .level = 0,
226
        .family = 4,
227
        .model = 0,
228
        .stepping = 0,
229
        .features = I486_FEATURES,
230
        .xlevel = 0,
231
    },
232
    {
233
        .name = "pentium",
234
        .level = 1,
235
        .family = 5,
236
        .model = 4,
237
        .stepping = 3,
238
        .features = PENTIUM_FEATURES,
239
        .xlevel = 0,
240
    },
241
    {
242
        .name = "pentium2",
243
        .level = 2,
244
        .family = 6,
245
        .model = 5,
246
        .stepping = 2,
247
        .features = PENTIUM2_FEATURES,
248
        .xlevel = 0,
249
    },
250
    {
251
        .name = "pentium3",
252
        .level = 2,
253
        .family = 6,
254
        .model = 7,
255
        .stepping = 3,
256
        .features = PENTIUM3_FEATURES,
257
        .xlevel = 0,
258
    },
259
    {
260
        .name = "athlon",
261
        .level = 2,
262
        .vendor1 = 0x68747541, /* "Auth" */
263
        .vendor2 = 0x69746e65, /* "enti" */
264
        .vendor3 = 0x444d4163, /* "cAMD" */
265
        .family = 6,
266
        .model = 2,
267
        .stepping = 3,
268
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
269
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
270
        .xlevel = 0x80000008,
271
        /* XXX: put another string ? */
272
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
273
    },
274
    {
275
        .name = "n270",
276
        /* original is on level 10 */
277
        .level = 5,
278
        .family = 6,
279
        .model = 28,
280
        .stepping = 2,
281
        .features = PPRO_FEATURES |
282
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
283
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
284
             * CPUID_HT | CPUID_TM | CPUID_PBE */
285
            /* Some CPUs got no CPUID_SEP */
286
        .ext_features = CPUID_EXT_MONITOR |
287
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
288
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
289
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
290
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
291
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
292
        .xlevel = 0x8000000A,
293
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
294
    },
295
};
296

    
297
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
298
{
299
    unsigned int i;
300
    x86_def_t *def;
301

    
302
    char *s = strdup(cpu_model);
303
    char *featurestr, *name = strtok(s, ",");
304
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
305
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
306
    int family = -1, model = -1, stepping = -1;
307

    
308
    def = NULL;
309
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
310
        if (strcmp(name, x86_defs[i].name) == 0) {
311
            def = &x86_defs[i];
312
            break;
313
        }
314
    }
315
    if (!def)
316
        goto error;
317
    memcpy(x86_cpu_def, def, sizeof(*def));
318

    
319
    featurestr = strtok(NULL, ",");
320

    
321
    while (featurestr) {
322
        char *val;
323
        if (featurestr[0] == '+') {
324
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
325
        } else if (featurestr[0] == '-') {
326
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
327
        } else if ((val = strchr(featurestr, '='))) {
328
            *val = 0; val++;
329
            if (!strcmp(featurestr, "family")) {
330
                char *err;
331
                family = strtol(val, &err, 10);
332
                if (!*val || *err || family < 0) {
333
                    fprintf(stderr, "bad numerical value %s\n", val);
334
                    goto error;
335
                }
336
                x86_cpu_def->family = family;
337
            } else if (!strcmp(featurestr, "model")) {
338
                char *err;
339
                model = strtol(val, &err, 10);
340
                if (!*val || *err || model < 0 || model > 0xff) {
341
                    fprintf(stderr, "bad numerical value %s\n", val);
342
                    goto error;
343
                }
344
                x86_cpu_def->model = model;
345
            } else if (!strcmp(featurestr, "stepping")) {
346
                char *err;
347
                stepping = strtol(val, &err, 10);
348
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
349
                    fprintf(stderr, "bad numerical value %s\n", val);
350
                    goto error;
351
                }
352
                x86_cpu_def->stepping = stepping;
353
            } else if (!strcmp(featurestr, "vendor")) {
354
                if (strlen(val) != 12) {
355
                    fprintf(stderr, "vendor string must be 12 chars long\n");
356
                    goto error;
357
                }
358
                x86_cpu_def->vendor1 = 0;
359
                x86_cpu_def->vendor2 = 0;
360
                x86_cpu_def->vendor3 = 0;
361
                for(i = 0; i < 4; i++) {
362
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
363
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
364
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
365
                }
366
            } else if (!strcmp(featurestr, "model_id")) {
367
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
368
                        val);
369
            } else {
370
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
371
                goto error;
372
            }
373
        } else {
374
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
375
            goto error;
376
        }
377
        featurestr = strtok(NULL, ",");
378
    }
379
    x86_cpu_def->features |= plus_features;
380
    x86_cpu_def->ext_features |= plus_ext_features;
381
    x86_cpu_def->ext2_features |= plus_ext2_features;
382
    x86_cpu_def->ext3_features |= plus_ext3_features;
383
    x86_cpu_def->features &= ~minus_features;
384
    x86_cpu_def->ext_features &= ~minus_ext_features;
385
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
386
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
387
    free(s);
388
    return 0;
389

    
390
error:
391
    free(s);
392
    return -1;
393
}
394

    
395
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
396
{
397
    unsigned int i;
398

    
399
    for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
400
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
401
}
402

    
403
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
404
{
405
    x86_def_t def1, *def = &def1;
406

    
407
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
408
        return -1;
409
    if (def->vendor1) {
410
        env->cpuid_vendor1 = def->vendor1;
411
        env->cpuid_vendor2 = def->vendor2;
412
        env->cpuid_vendor3 = def->vendor3;
413
    } else {
414
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
415
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
416
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
417
    }
418
    env->cpuid_level = def->level;
419
    if (def->family > 0x0f)
420
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
421
    else
422
        env->cpuid_version = def->family << 8;
423
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
424
    env->cpuid_version |= def->stepping;
425
    env->cpuid_features = def->features;
426
    env->pat = 0x0007040600070406ULL;
427
    env->cpuid_ext_features = def->ext_features;
428
    env->cpuid_ext2_features = def->ext2_features;
429
    env->cpuid_xlevel = def->xlevel;
430
    env->cpuid_ext3_features = def->ext3_features;
431
    {
432
        const char *model_id = def->model_id;
433
        int c, len, i;
434
        if (!model_id)
435
            model_id = "";
436
        len = strlen(model_id);
437
        for(i = 0; i < 48; i++) {
438
            if (i >= len)
439
                c = '\0';
440
            else
441
                c = (uint8_t)model_id[i];
442
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
443
        }
444
    }
445
    return 0;
446
}
447

    
448
/* NOTE: must be called outside the CPU execute loop */
449
void cpu_reset(CPUX86State *env)
450
{
451
    int i;
452

    
453
    memset(env, 0, offsetof(CPUX86State, breakpoints));
454

    
455
    tlb_flush(env, 1);
456

    
457
    env->old_exception = -1;
458

    
459
    /* init to reset state */
460

    
461
#ifdef CONFIG_SOFTMMU
462
    env->hflags |= HF_SOFTMMU_MASK;
463
#endif
464
    env->hflags2 |= HF2_GIF_MASK;
465

    
466
    cpu_x86_update_cr0(env, 0x60000010);
467
    env->a20_mask = ~0x0;
468
    env->smbase = 0x30000;
469

    
470
    env->idt.limit = 0xffff;
471
    env->gdt.limit = 0xffff;
472
    env->ldt.limit = 0xffff;
473
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
474
    env->tr.limit = 0xffff;
475
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
476

    
477
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
478
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
479
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
480
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
481
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
482
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
483
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
484
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
485
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
486
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
487
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
488
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
489

    
490
    env->eip = 0xfff0;
491
    env->regs[R_EDX] = env->cpuid_version;
492

    
493
    env->eflags = 0x2;
494

    
495
    /* FPU init */
496
    for(i = 0;i < 8; i++)
497
        env->fptags[i] = 1;
498
    env->fpuc = 0x37f;
499

    
500
    env->mxcsr = 0x1f80;
501
}
502

    
503
void cpu_x86_close(CPUX86State *env)
504
{
505
    qemu_free(env);
506
}
507

    
508
/***********************************************************/
509
/* x86 debug */
510

    
511
static const char *cc_op_str[] = {
512
    "DYNAMIC",
513
    "EFLAGS",
514

    
515
    "MULB",
516
    "MULW",
517
    "MULL",
518
    "MULQ",
519

    
520
    "ADDB",
521
    "ADDW",
522
    "ADDL",
523
    "ADDQ",
524

    
525
    "ADCB",
526
    "ADCW",
527
    "ADCL",
528
    "ADCQ",
529

    
530
    "SUBB",
531
    "SUBW",
532
    "SUBL",
533
    "SUBQ",
534

    
535
    "SBBB",
536
    "SBBW",
537
    "SBBL",
538
    "SBBQ",
539

    
540
    "LOGICB",
541
    "LOGICW",
542
    "LOGICL",
543
    "LOGICQ",
544

    
545
    "INCB",
546
    "INCW",
547
    "INCL",
548
    "INCQ",
549

    
550
    "DECB",
551
    "DECW",
552
    "DECL",
553
    "DECQ",
554

    
555
    "SHLB",
556
    "SHLW",
557
    "SHLL",
558
    "SHLQ",
559

    
560
    "SARB",
561
    "SARW",
562
    "SARL",
563
    "SARQ",
564
};
565

    
566
void cpu_dump_state(CPUState *env, FILE *f,
567
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
568
                    int flags)
569
{
570
    int eflags, i, nb;
571
    char cc_op_name[32];
572
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
573

    
574
    eflags = env->eflags;
575
#ifdef TARGET_X86_64
576
    if (env->hflags & HF_CS64_MASK) {
577
        cpu_fprintf(f,
578
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
579
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
580
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
581
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
582
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
583
                    env->regs[R_EAX],
584
                    env->regs[R_EBX],
585
                    env->regs[R_ECX],
586
                    env->regs[R_EDX],
587
                    env->regs[R_ESI],
588
                    env->regs[R_EDI],
589
                    env->regs[R_EBP],
590
                    env->regs[R_ESP],
591
                    env->regs[8],
592
                    env->regs[9],
593
                    env->regs[10],
594
                    env->regs[11],
595
                    env->regs[12],
596
                    env->regs[13],
597
                    env->regs[14],
598
                    env->regs[15],
599
                    env->eip, eflags,
600
                    eflags & DF_MASK ? 'D' : '-',
601
                    eflags & CC_O ? 'O' : '-',
602
                    eflags & CC_S ? 'S' : '-',
603
                    eflags & CC_Z ? 'Z' : '-',
604
                    eflags & CC_A ? 'A' : '-',
605
                    eflags & CC_P ? 'P' : '-',
606
                    eflags & CC_C ? 'C' : '-',
607
                    env->hflags & HF_CPL_MASK,
608
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
609
                    (int)(env->a20_mask >> 20) & 1,
610
                    (env->hflags >> HF_SMM_SHIFT) & 1,
611
                    env->halted);
612
    } else
613
#endif
614
    {
615
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
616
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
617
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
618
                    (uint32_t)env->regs[R_EAX],
619
                    (uint32_t)env->regs[R_EBX],
620
                    (uint32_t)env->regs[R_ECX],
621
                    (uint32_t)env->regs[R_EDX],
622
                    (uint32_t)env->regs[R_ESI],
623
                    (uint32_t)env->regs[R_EDI],
624
                    (uint32_t)env->regs[R_EBP],
625
                    (uint32_t)env->regs[R_ESP],
626
                    (uint32_t)env->eip, eflags,
627
                    eflags & DF_MASK ? 'D' : '-',
628
                    eflags & CC_O ? 'O' : '-',
629
                    eflags & CC_S ? 'S' : '-',
630
                    eflags & CC_Z ? 'Z' : '-',
631
                    eflags & CC_A ? 'A' : '-',
632
                    eflags & CC_P ? 'P' : '-',
633
                    eflags & CC_C ? 'C' : '-',
634
                    env->hflags & HF_CPL_MASK,
635
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
636
                    (int)(env->a20_mask >> 20) & 1,
637
                    (env->hflags >> HF_SMM_SHIFT) & 1,
638
                    env->halted);
639
    }
640

    
641
#ifdef TARGET_X86_64
642
    if (env->hflags & HF_LMA_MASK) {
643
        for(i = 0; i < 6; i++) {
644
            SegmentCache *sc = &env->segs[i];
645
            cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
646
                        seg_name[i],
647
                        sc->selector,
648
                        sc->base,
649
                        sc->limit,
650
                        sc->flags);
651
        }
652
        cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
653
                    env->ldt.selector,
654
                    env->ldt.base,
655
                    env->ldt.limit,
656
                    env->ldt.flags);
657
        cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
658
                    env->tr.selector,
659
                    env->tr.base,
660
                    env->tr.limit,
661
                    env->tr.flags);
662
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
663
                    env->gdt.base, env->gdt.limit);
664
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
665
                    env->idt.base, env->idt.limit);
666
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
667
                    (uint32_t)env->cr[0],
668
                    env->cr[2],
669
                    env->cr[3],
670
                    (uint32_t)env->cr[4]);
671
    } else
672
#endif
673
    {
674
        for(i = 0; i < 6; i++) {
675
            SegmentCache *sc = &env->segs[i];
676
            cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
677
                        seg_name[i],
678
                        sc->selector,
679
                        (uint32_t)sc->base,
680
                        sc->limit,
681
                        sc->flags);
682
        }
683
        cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
684
                    env->ldt.selector,
685
                    (uint32_t)env->ldt.base,
686
                    env->ldt.limit,
687
                    env->ldt.flags);
688
        cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
689
                    env->tr.selector,
690
                    (uint32_t)env->tr.base,
691
                    env->tr.limit,
692
                    env->tr.flags);
693
        cpu_fprintf(f, "GDT=     %08x %08x\n",
694
                    (uint32_t)env->gdt.base, env->gdt.limit);
695
        cpu_fprintf(f, "IDT=     %08x %08x\n",
696
                    (uint32_t)env->idt.base, env->idt.limit);
697
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
698
                    (uint32_t)env->cr[0],
699
                    (uint32_t)env->cr[2],
700
                    (uint32_t)env->cr[3],
701
                    (uint32_t)env->cr[4]);
702
    }
703
    if (flags & X86_DUMP_CCOP) {
704
        if ((unsigned)env->cc_op < CC_OP_NB)
705
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
706
        else
707
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
708
#ifdef TARGET_X86_64
709
        if (env->hflags & HF_CS64_MASK) {
710
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
711
                        env->cc_src, env->cc_dst,
712
                        cc_op_name);
713
        } else
714
#endif
715
        {
716
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
717
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
718
                        cc_op_name);
719
        }
720
    }
721
    if (flags & X86_DUMP_FPU) {
722
        int fptag;
723
        fptag = 0;
724
        for(i = 0; i < 8; i++) {
725
            fptag |= ((!env->fptags[i]) << i);
726
        }
727
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
728
                    env->fpuc,
729
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
730
                    env->fpstt,
731
                    fptag,
732
                    env->mxcsr);
733
        for(i=0;i<8;i++) {
734
#if defined(USE_X86LDOUBLE)
735
            union {
736
                long double d;
737
                struct {
738
                    uint64_t lower;
739
                    uint16_t upper;
740
                } l;
741
            } tmp;
742
            tmp.d = env->fpregs[i].d;
743
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
744
                        i, tmp.l.lower, tmp.l.upper);
745
#else
746
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
747
                        i, env->fpregs[i].mmx.q);
748
#endif
749
            if ((i & 1) == 1)
750
                cpu_fprintf(f, "\n");
751
            else
752
                cpu_fprintf(f, " ");
753
        }
754
        if (env->hflags & HF_CS64_MASK)
755
            nb = 16;
756
        else
757
            nb = 8;
758
        for(i=0;i<nb;i++) {
759
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
760
                        i,
761
                        env->xmm_regs[i].XMM_L(3),
762
                        env->xmm_regs[i].XMM_L(2),
763
                        env->xmm_regs[i].XMM_L(1),
764
                        env->xmm_regs[i].XMM_L(0));
765
            if ((i & 1) == 1)
766
                cpu_fprintf(f, "\n");
767
            else
768
                cpu_fprintf(f, " ");
769
        }
770
    }
771
}
772

    
773
/***********************************************************/
774
/* x86 mmu */
775
/* XXX: add PGE support */
776

    
777
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
778
{
779
    a20_state = (a20_state != 0);
780
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
781
#if defined(DEBUG_MMU)
782
        printf("A20 update: a20=%d\n", a20_state);
783
#endif
784
        /* if the cpu is currently executing code, we must unlink it and
785
           all the potentially executing TB */
786
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
787

    
788
        /* when a20 is changed, all the MMU mappings are invalid, so
789
           we must flush everything */
790
        tlb_flush(env, 1);
791
        env->a20_mask = (~0x100000) | (a20_state << 20);
792
    }
793
}
794

    
795
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
796
{
797
    int pe_state;
798

    
799
#if defined(DEBUG_MMU)
800
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
801
#endif
802
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
803
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
804
        tlb_flush(env, 1);
805
    }
806

    
807
#ifdef TARGET_X86_64
808
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
809
        (env->efer & MSR_EFER_LME)) {
810
        /* enter in long mode */
811
        /* XXX: generate an exception */
812
        if (!(env->cr[4] & CR4_PAE_MASK))
813
            return;
814
        env->efer |= MSR_EFER_LMA;
815
        env->hflags |= HF_LMA_MASK;
816
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
817
               (env->efer & MSR_EFER_LMA)) {
818
        /* exit long mode */
819
        env->efer &= ~MSR_EFER_LMA;
820
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
821
        env->eip &= 0xffffffff;
822
    }
823
#endif
824
    env->cr[0] = new_cr0 | CR0_ET_MASK;
825

    
826
    /* update PE flag in hidden flags */
827
    pe_state = (env->cr[0] & CR0_PE_MASK);
828
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
829
    /* ensure that ADDSEG is always set in real mode */
830
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
831
    /* update FPU flags */
832
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
833
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
834
}
835

    
836
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
837
   the PDPT */
838
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
839
{
840
    env->cr[3] = new_cr3;
841
    if (env->cr[0] & CR0_PG_MASK) {
842
#if defined(DEBUG_MMU)
843
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
844
#endif
845
        tlb_flush(env, 0);
846
    }
847
}
848

    
849
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
850
{
851
#if defined(DEBUG_MMU)
852
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
853
#endif
854
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
855
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
856
        tlb_flush(env, 1);
857
    }
858
    /* SSE handling */
859
    if (!(env->cpuid_features & CPUID_SSE))
860
        new_cr4 &= ~CR4_OSFXSR_MASK;
861
    if (new_cr4 & CR4_OSFXSR_MASK)
862
        env->hflags |= HF_OSFXSR_MASK;
863
    else
864
        env->hflags &= ~HF_OSFXSR_MASK;
865

    
866
    env->cr[4] = new_cr4;
867
}
868

    
869
/* XXX: also flush 4MB pages */
870
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
871
{
872
    tlb_flush_page(env, addr);
873
}
874

    
875
#if defined(CONFIG_USER_ONLY)
876

    
877
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
878
                             int is_write, int mmu_idx, int is_softmmu)
879
{
880
    /* user mode only emulation */
881
    is_write &= 1;
882
    env->cr[2] = addr;
883
    env->error_code = (is_write << PG_ERROR_W_BIT);
884
    env->error_code |= PG_ERROR_U_MASK;
885
    env->exception_index = EXCP0E_PAGE;
886
    return 1;
887
}
888

    
889
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
890
{
891
    return addr;
892
}
893

    
894
#else
895

    
896
/* XXX: This value should match the one returned by CPUID
897
 * and in exec.c */
898
#if defined(USE_KQEMU)
899
#define PHYS_ADDR_MASK 0xfffff000LL
900
#else
901
# if defined(TARGET_X86_64)
902
# define PHYS_ADDR_MASK 0xfffffff000LL
903
# else
904
# define PHYS_ADDR_MASK 0xffffff000LL
905
# endif
906
#endif
907

    
908
/* return value:
909
   -1 = cannot handle fault
910
   0  = nothing more to do
911
   1  = generate PF fault
912
   2  = soft MMU activation required for this block
913
*/
914
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
915
                             int is_write1, int mmu_idx, int is_softmmu)
916
{
917
    uint64_t ptep, pte;
918
    target_ulong pde_addr, pte_addr;
919
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
920
    target_phys_addr_t paddr;
921
    uint32_t page_offset;
922
    target_ulong vaddr, virt_addr;
923

    
924
    is_user = mmu_idx == MMU_USER_IDX;
925
#if defined(DEBUG_MMU)
926
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
927
           addr, is_write1, is_user, env->eip);
928
#endif
929
    is_write = is_write1 & 1;
930

    
931
    if (!(env->cr[0] & CR0_PG_MASK)) {
932
        pte = addr;
933
        virt_addr = addr & TARGET_PAGE_MASK;
934
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
935
        page_size = 4096;
936
        goto do_mapping;
937
    }
938

    
939
    if (env->cr[4] & CR4_PAE_MASK) {
940
        uint64_t pde, pdpe;
941
        target_ulong pdpe_addr;
942

    
943
#ifdef TARGET_X86_64
944
        if (env->hflags & HF_LMA_MASK) {
945
            uint64_t pml4e_addr, pml4e;
946
            int32_t sext;
947

    
948
            /* test virtual address sign extension */
949
            sext = (int64_t)addr >> 47;
950
            if (sext != 0 && sext != -1) {
951
                env->error_code = 0;
952
                env->exception_index = EXCP0D_GPF;
953
                return 1;
954
            }
955

    
956
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
957
                env->a20_mask;
958
            pml4e = ldq_phys(pml4e_addr);
959
            if (!(pml4e & PG_PRESENT_MASK)) {
960
                error_code = 0;
961
                goto do_fault;
962
            }
963
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
964
                error_code = PG_ERROR_RSVD_MASK;
965
                goto do_fault;
966
            }
967
            if (!(pml4e & PG_ACCESSED_MASK)) {
968
                pml4e |= PG_ACCESSED_MASK;
969
                stl_phys_notdirty(pml4e_addr, pml4e);
970
            }
971
            ptep = pml4e ^ PG_NX_MASK;
972
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
973
                env->a20_mask;
974
            pdpe = ldq_phys(pdpe_addr);
975
            if (!(pdpe & PG_PRESENT_MASK)) {
976
                error_code = 0;
977
                goto do_fault;
978
            }
979
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
980
                error_code = PG_ERROR_RSVD_MASK;
981
                goto do_fault;
982
            }
983
            ptep &= pdpe ^ PG_NX_MASK;
984
            if (!(pdpe & PG_ACCESSED_MASK)) {
985
                pdpe |= PG_ACCESSED_MASK;
986
                stl_phys_notdirty(pdpe_addr, pdpe);
987
            }
988
        } else
989
#endif
990
        {
991
            /* XXX: load them when cr3 is loaded ? */
992
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
993
                env->a20_mask;
994
            pdpe = ldq_phys(pdpe_addr);
995
            if (!(pdpe & PG_PRESENT_MASK)) {
996
                error_code = 0;
997
                goto do_fault;
998
            }
999
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1000
        }
1001

    
1002
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1003
            env->a20_mask;
1004
        pde = ldq_phys(pde_addr);
1005
        if (!(pde & PG_PRESENT_MASK)) {
1006
            error_code = 0;
1007
            goto do_fault;
1008
        }
1009
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1010
            error_code = PG_ERROR_RSVD_MASK;
1011
            goto do_fault;
1012
        }
1013
        ptep &= pde ^ PG_NX_MASK;
1014
        if (pde & PG_PSE_MASK) {
1015
            /* 2 MB page */
1016
            page_size = 2048 * 1024;
1017
            ptep ^= PG_NX_MASK;
1018
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1019
                goto do_fault_protect;
1020
            if (is_user) {
1021
                if (!(ptep & PG_USER_MASK))
1022
                    goto do_fault_protect;
1023
                if (is_write && !(ptep & PG_RW_MASK))
1024
                    goto do_fault_protect;
1025
            } else {
1026
                if ((env->cr[0] & CR0_WP_MASK) &&
1027
                    is_write && !(ptep & PG_RW_MASK))
1028
                    goto do_fault_protect;
1029
            }
1030
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1031
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1032
                pde |= PG_ACCESSED_MASK;
1033
                if (is_dirty)
1034
                    pde |= PG_DIRTY_MASK;
1035
                stl_phys_notdirty(pde_addr, pde);
1036
            }
1037
            /* align to page_size */
1038
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1039
            virt_addr = addr & ~(page_size - 1);
1040
        } else {
1041
            /* 4 KB page */
1042
            if (!(pde & PG_ACCESSED_MASK)) {
1043
                pde |= PG_ACCESSED_MASK;
1044
                stl_phys_notdirty(pde_addr, pde);
1045
            }
1046
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1047
                env->a20_mask;
1048
            pte = ldq_phys(pte_addr);
1049
            if (!(pte & PG_PRESENT_MASK)) {
1050
                error_code = 0;
1051
                goto do_fault;
1052
            }
1053
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1054
                error_code = PG_ERROR_RSVD_MASK;
1055
                goto do_fault;
1056
            }
1057
            /* combine pde and pte nx, user and rw protections */
1058
            ptep &= pte ^ PG_NX_MASK;
1059
            ptep ^= PG_NX_MASK;
1060
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1061
                goto do_fault_protect;
1062
            if (is_user) {
1063
                if (!(ptep & PG_USER_MASK))
1064
                    goto do_fault_protect;
1065
                if (is_write && !(ptep & PG_RW_MASK))
1066
                    goto do_fault_protect;
1067
            } else {
1068
                if ((env->cr[0] & CR0_WP_MASK) &&
1069
                    is_write && !(ptep & PG_RW_MASK))
1070
                    goto do_fault_protect;
1071
            }
1072
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1073
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1074
                pte |= PG_ACCESSED_MASK;
1075
                if (is_dirty)
1076
                    pte |= PG_DIRTY_MASK;
1077
                stl_phys_notdirty(pte_addr, pte);
1078
            }
1079
            page_size = 4096;
1080
            virt_addr = addr & ~0xfff;
1081
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1082
        }
1083
    } else {
1084
        uint32_t pde;
1085

    
1086
        /* page directory entry */
1087
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1088
            env->a20_mask;
1089
        pde = ldl_phys(pde_addr);
1090
        if (!(pde & PG_PRESENT_MASK)) {
1091
            error_code = 0;
1092
            goto do_fault;
1093
        }
1094
        /* if PSE bit is set, then we use a 4MB page */
1095
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1096
            page_size = 4096 * 1024;
1097
            if (is_user) {
1098
                if (!(pde & PG_USER_MASK))
1099
                    goto do_fault_protect;
1100
                if (is_write && !(pde & PG_RW_MASK))
1101
                    goto do_fault_protect;
1102
            } else {
1103
                if ((env->cr[0] & CR0_WP_MASK) &&
1104
                    is_write && !(pde & PG_RW_MASK))
1105
                    goto do_fault_protect;
1106
            }
1107
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1108
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1109
                pde |= PG_ACCESSED_MASK;
1110
                if (is_dirty)
1111
                    pde |= PG_DIRTY_MASK;
1112
                stl_phys_notdirty(pde_addr, pde);
1113
            }
1114

    
1115
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1116
            ptep = pte;
1117
            virt_addr = addr & ~(page_size - 1);
1118
        } else {
1119
            if (!(pde & PG_ACCESSED_MASK)) {
1120
                pde |= PG_ACCESSED_MASK;
1121
                stl_phys_notdirty(pde_addr, pde);
1122
            }
1123

    
1124
            /* page directory entry */
1125
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1126
                env->a20_mask;
1127
            pte = ldl_phys(pte_addr);
1128
            if (!(pte & PG_PRESENT_MASK)) {
1129
                error_code = 0;
1130
                goto do_fault;
1131
            }
1132
            /* combine pde and pte user and rw protections */
1133
            ptep = pte & pde;
1134
            if (is_user) {
1135
                if (!(ptep & PG_USER_MASK))
1136
                    goto do_fault_protect;
1137
                if (is_write && !(ptep & PG_RW_MASK))
1138
                    goto do_fault_protect;
1139
            } else {
1140
                if ((env->cr[0] & CR0_WP_MASK) &&
1141
                    is_write && !(ptep & PG_RW_MASK))
1142
                    goto do_fault_protect;
1143
            }
1144
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1145
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1146
                pte |= PG_ACCESSED_MASK;
1147
                if (is_dirty)
1148
                    pte |= PG_DIRTY_MASK;
1149
                stl_phys_notdirty(pte_addr, pte);
1150
            }
1151
            page_size = 4096;
1152
            virt_addr = addr & ~0xfff;
1153
        }
1154
    }
1155
    /* the page can be put in the TLB */
1156
    prot = PAGE_READ;
1157
    if (!(ptep & PG_NX_MASK))
1158
        prot |= PAGE_EXEC;
1159
    if (pte & PG_DIRTY_MASK) {
1160
        /* only set write access if already dirty... otherwise wait
1161
           for dirty access */
1162
        if (is_user) {
1163
            if (ptep & PG_RW_MASK)
1164
                prot |= PAGE_WRITE;
1165
        } else {
1166
            if (!(env->cr[0] & CR0_WP_MASK) ||
1167
                (ptep & PG_RW_MASK))
1168
                prot |= PAGE_WRITE;
1169
        }
1170
    }
1171
 do_mapping:
1172
    pte = pte & env->a20_mask;
1173

    
1174
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1175
       avoid filling it too fast */
1176
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1177
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1178
    vaddr = virt_addr + page_offset;
1179

    
1180
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1181
    return ret;
1182
 do_fault_protect:
1183
    error_code = PG_ERROR_P_MASK;
1184
 do_fault:
1185
    error_code |= (is_write << PG_ERROR_W_BIT);
1186
    if (is_user)
1187
        error_code |= PG_ERROR_U_MASK;
1188
    if (is_write1 == 2 &&
1189
        (env->efer & MSR_EFER_NXE) &&
1190
        (env->cr[4] & CR4_PAE_MASK))
1191
        error_code |= PG_ERROR_I_D_MASK;
1192
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1193
        /* cr2 is not modified in case of exceptions */
1194
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1195
                 addr);
1196
    } else {
1197
        env->cr[2] = addr;
1198
    }
1199
    env->error_code = error_code;
1200
    env->exception_index = EXCP0E_PAGE;
1201
    return 1;
1202
}
1203

    
1204
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1205
{
1206
    target_ulong pde_addr, pte_addr;
1207
    uint64_t pte;
1208
    target_phys_addr_t paddr;
1209
    uint32_t page_offset;
1210
    int page_size;
1211

    
1212
    if (env->cr[4] & CR4_PAE_MASK) {
1213
        target_ulong pdpe_addr;
1214
        uint64_t pde, pdpe;
1215

    
1216
#ifdef TARGET_X86_64
1217
        if (env->hflags & HF_LMA_MASK) {
1218
            uint64_t pml4e_addr, pml4e;
1219
            int32_t sext;
1220

    
1221
            /* test virtual address sign extension */
1222
            sext = (int64_t)addr >> 47;
1223
            if (sext != 0 && sext != -1)
1224
                return -1;
1225

    
1226
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1227
                env->a20_mask;
1228
            pml4e = ldq_phys(pml4e_addr);
1229
            if (!(pml4e & PG_PRESENT_MASK))
1230
                return -1;
1231

    
1232
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1233
                env->a20_mask;
1234
            pdpe = ldq_phys(pdpe_addr);
1235
            if (!(pdpe & PG_PRESENT_MASK))
1236
                return -1;
1237
        } else
1238
#endif
1239
        {
1240
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1241
                env->a20_mask;
1242
            pdpe = ldq_phys(pdpe_addr);
1243
            if (!(pdpe & PG_PRESENT_MASK))
1244
                return -1;
1245
        }
1246

    
1247
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1248
            env->a20_mask;
1249
        pde = ldq_phys(pde_addr);
1250
        if (!(pde & PG_PRESENT_MASK)) {
1251
            return -1;
1252
        }
1253
        if (pde & PG_PSE_MASK) {
1254
            /* 2 MB page */
1255
            page_size = 2048 * 1024;
1256
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1257
        } else {
1258
            /* 4 KB page */
1259
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1260
                env->a20_mask;
1261
            page_size = 4096;
1262
            pte = ldq_phys(pte_addr);
1263
        }
1264
        if (!(pte & PG_PRESENT_MASK))
1265
            return -1;
1266
    } else {
1267
        uint32_t pde;
1268

    
1269
        if (!(env->cr[0] & CR0_PG_MASK)) {
1270
            pte = addr;
1271
            page_size = 4096;
1272
        } else {
1273
            /* page directory entry */
1274
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1275
            pde = ldl_phys(pde_addr);
1276
            if (!(pde & PG_PRESENT_MASK))
1277
                return -1;
1278
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1279
                pte = pde & ~0x003ff000; /* align to 4MB */
1280
                page_size = 4096 * 1024;
1281
            } else {
1282
                /* page directory entry */
1283
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1284
                pte = ldl_phys(pte_addr);
1285
                if (!(pte & PG_PRESENT_MASK))
1286
                    return -1;
1287
                page_size = 4096;
1288
            }
1289
        }
1290
        pte = pte & env->a20_mask;
1291
    }
1292

    
1293
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1294
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1295
    return paddr;
1296
}
1297
#endif /* !CONFIG_USER_ONLY */
1298

    
1299
static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1300
                       uint32_t *ecx, uint32_t *edx)
1301
{
1302
#if defined(CONFIG_KVM)
1303
    uint32_t vec[4];
1304

    
1305
#ifdef __x86_64__
1306
    asm volatile("cpuid"
1307
                 : "=a"(vec[0]), "=b"(vec[1]),
1308
                   "=c"(vec[2]), "=d"(vec[3])
1309
                 : "0"(function) : "cc");
1310
#else
1311
    asm volatile("pusha \n\t"
1312
                 "cpuid \n\t"
1313
                 "mov %%eax, 0(%1) \n\t"
1314
                 "mov %%ebx, 4(%1) \n\t"
1315
                 "mov %%ecx, 8(%1) \n\t"
1316
                 "mov %%edx, 12(%1) \n\t"
1317
                 "popa"
1318
                 : : "a"(function), "S"(vec)
1319
                 : "memory", "cc");
1320
#endif
1321

    
1322
    if (eax)
1323
        *eax = vec[0];
1324
    if (ebx)
1325
        *ebx = vec[1];
1326
    if (ecx)
1327
        *ecx = vec[2];
1328
    if (edx)
1329
        *edx = vec[3];
1330
#endif
1331
}
1332

    
1333
void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1334
                   uint32_t *eax, uint32_t *ebx,
1335
                   uint32_t *ecx, uint32_t *edx)
1336
{
1337
    /* test if maximum index reached */
1338
    if (index & 0x80000000) {
1339
        if (index > env->cpuid_xlevel)
1340
            index = env->cpuid_level;
1341
    } else {
1342
        if (index > env->cpuid_level)
1343
            index = env->cpuid_level;
1344
    }
1345

    
1346
    switch(index) {
1347
    case 0:
1348
        *eax = env->cpuid_level;
1349
        *ebx = env->cpuid_vendor1;
1350
        *edx = env->cpuid_vendor2;
1351
        *ecx = env->cpuid_vendor3;
1352

    
1353
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1354
         * isn't supported in compatibility mode on Intel.  so advertise the
1355
         * actuall cpu, and say goodbye to migration between different vendors
1356
         * is you use compatibility mode. */
1357
        if (kvm_enabled())
1358
            host_cpuid(0, NULL, ebx, ecx, edx);
1359
        break;
1360
    case 1:
1361
        *eax = env->cpuid_version;
1362
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1363
        *ecx = env->cpuid_ext_features;
1364
        *edx = env->cpuid_features;
1365

    
1366
        /* "Hypervisor present" bit required for Microsoft SVVP */
1367
        if (kvm_enabled())
1368
            *ecx |= (1 << 31);
1369
        break;
1370
    case 2:
1371
        /* cache info: needed for Pentium Pro compatibility */
1372
        *eax = 1;
1373
        *ebx = 0;
1374
        *ecx = 0;
1375
        *edx = 0x2c307d;
1376
        break;
1377
    case 4:
1378
        /* cache info: needed for Core compatibility */
1379
        switch (*ecx) {
1380
            case 0: /* L1 dcache info */
1381
                *eax = 0x0000121;
1382
                *ebx = 0x1c0003f;
1383
                *ecx = 0x000003f;
1384
                *edx = 0x0000001;
1385
                break;
1386
            case 1: /* L1 icache info */
1387
                *eax = 0x0000122;
1388
                *ebx = 0x1c0003f;
1389
                *ecx = 0x000003f;
1390
                *edx = 0x0000001;
1391
                break;
1392
            case 2: /* L2 cache info */
1393
                *eax = 0x0000143;
1394
                *ebx = 0x3c0003f;
1395
                *ecx = 0x0000fff;
1396
                *edx = 0x0000001;
1397
                break;
1398
            default: /* end of info */
1399
                *eax = 0;
1400
                *ebx = 0;
1401
                *ecx = 0;
1402
                *edx = 0;
1403
                break;
1404
        }
1405

    
1406
        break;
1407
    case 5:
1408
        /* mwait info: needed for Core compatibility */
1409
        *eax = 0; /* Smallest monitor-line size in bytes */
1410
        *ebx = 0; /* Largest monitor-line size in bytes */
1411
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1412
        *edx = 0;
1413
        break;
1414
    case 6:
1415
        /* Thermal and Power Leaf */
1416
        *eax = 0;
1417
        *ebx = 0;
1418
        *ecx = 0;
1419
        *edx = 0;
1420
        break;
1421
    case 9:
1422
        /* Direct Cache Access Information Leaf */
1423
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1424
        *ebx = 0;
1425
        *ecx = 0;
1426
        *edx = 0;
1427
        break;
1428
    case 0xA:
1429
        /* Architectural Performance Monitoring Leaf */
1430
        *eax = 0;
1431
        *ebx = 0;
1432
        *ecx = 0;
1433
        *edx = 0;
1434
        break;
1435
    case 0x80000000:
1436
        *eax = env->cpuid_xlevel;
1437
        *ebx = env->cpuid_vendor1;
1438
        *edx = env->cpuid_vendor2;
1439
        *ecx = env->cpuid_vendor3;
1440
        break;
1441
    case 0x80000001:
1442
        *eax = env->cpuid_features;
1443
        *ebx = 0;
1444
        *ecx = env->cpuid_ext3_features;
1445
        *edx = env->cpuid_ext2_features;
1446

    
1447
        if (kvm_enabled()) {
1448
            uint32_t h_eax, h_edx;
1449

    
1450
            host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1451

    
1452
            /* disable CPU features that the host does not support */
1453

    
1454
            /* long mode */
1455
            if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1456
                *edx &= ~0x20000000;
1457
            /* syscall */
1458
            if ((h_edx & 0x00000800) == 0)
1459
                *edx &= ~0x00000800;
1460
            /* nx */
1461
            if ((h_edx & 0x00100000) == 0)
1462
                *edx &= ~0x00100000;
1463

    
1464
            /* disable CPU features that KVM cannot support */
1465

    
1466
            /* svm */
1467
            *ecx &= ~4UL;
1468
            /* 3dnow */
1469
            *edx &= ~0xc0000000;
1470
        }
1471
        break;
1472
    case 0x80000002:
1473
    case 0x80000003:
1474
    case 0x80000004:
1475
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1476
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1477
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1478
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1479
        break;
1480
    case 0x80000005:
1481
        /* cache info (L1 cache) */
1482
        *eax = 0x01ff01ff;
1483
        *ebx = 0x01ff01ff;
1484
        *ecx = 0x40020140;
1485
        *edx = 0x40020140;
1486
        break;
1487
    case 0x80000006:
1488
        /* cache info (L2 cache) */
1489
        *eax = 0;
1490
        *ebx = 0x42004200;
1491
        *ecx = 0x02008140;
1492
        *edx = 0;
1493
        break;
1494
    case 0x80000008:
1495
        /* virtual & phys address size in low 2 bytes. */
1496
/* XXX: This value must match the one used in the MMU code. */ 
1497
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1498
            /* 64 bit processor */
1499
#if defined(USE_KQEMU)
1500
            *eax = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1501
#else
1502
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1503
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1504
#endif
1505
        } else {
1506
#if defined(USE_KQEMU)
1507
            *eax = 0x00000020;        /* 32 bits physical */
1508
#else
1509
            if (env->cpuid_features & CPUID_PSE36)
1510
                *eax = 0x00000024; /* 36 bits physical */
1511
            else
1512
                *eax = 0x00000020; /* 32 bits physical */
1513
#endif
1514
        }
1515
        *ebx = 0;
1516
        *ecx = 0;
1517
        *edx = 0;
1518
        break;
1519
    case 0x8000000A:
1520
        *eax = 0x00000001; /* SVM Revision */
1521
        *ebx = 0x00000010; /* nr of ASIDs */
1522
        *ecx = 0;
1523
        *edx = 0; /* optional features */
1524
        break;
1525
    default:
1526
        /* reserved values: zero */
1527
        *eax = 0;
1528
        *ebx = 0;
1529
        *ecx = 0;
1530
        *edx = 0;
1531
        break;
1532
    }
1533
}