Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ ef768138

History | View | Annotate | Download (56.8 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26

    
27
#include "cpu.h"
28
#include "exec-all.h"
29
#include "qemu-common.h"
30
#include "kvm.h"
31

    
32
//#define DEBUG_MMU
33

    
34
/* feature flags taken from "Intel Processor Identification and the CPUID
35
 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
36
 * about feature names, the Linux name is used. */
37
static const char *feature_name[] = {
38
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
39
    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
40
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
41
    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42
};
43
static const char *ext_feature_name[] = {
44
    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
45
    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
46
    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
47
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
48
};
49
static const char *ext2_feature_name[] = {
50
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
51
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
52
    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
53
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54
};
55
static const char *ext3_feature_name[] = {
56
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
57
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
58
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60
};
61

    
62
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
63
                                    uint32_t *ext_features, 
64
                                    uint32_t *ext2_features, 
65
                                    uint32_t *ext3_features)
66
{
67
    int i;
68
    int found = 0;
69

    
70
    for ( i = 0 ; i < 32 ; i++ ) 
71
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
72
            *features |= 1 << i;
73
            found = 1;
74
        }
75
    for ( i = 0 ; i < 32 ; i++ ) 
76
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
77
            *ext_features |= 1 << i;
78
            found = 1;
79
        }
80
    for ( i = 0 ; i < 32 ; i++ ) 
81
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
82
            *ext2_features |= 1 << i;
83
            found = 1;
84
        }
85
    for ( i = 0 ; i < 32 ; i++ ) 
86
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
87
            *ext3_features |= 1 << i;
88
            found = 1;
89
        }
90
    if (!found) {
91
        fprintf(stderr, "CPU feature %s not found\n", flagname);
92
    }
93
}
94

    
95
static void kvm_trim_features(uint32_t *features, uint32_t supported,
96
                              const char *names[])
97
{
98
    int i;
99
    uint32_t mask;
100

    
101
    for (i = 0; i < 32; ++i) {
102
        mask = 1U << i;
103
        if ((*features & mask) && !(supported & mask)) {
104
            *features &= ~mask;
105
        }
106
    }
107
}
108

    
109
typedef struct x86_def_t {
110
    const char *name;
111
    uint32_t level;
112
    uint32_t vendor1, vendor2, vendor3;
113
    int family;
114
    int model;
115
    int stepping;
116
    uint32_t features, ext_features, ext2_features, ext3_features;
117
    uint32_t xlevel;
118
    char model_id[48];
119
    int vendor_override;
120
} x86_def_t;
121

    
122
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
123
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
124
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
125
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
126
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
127
          CPUID_PSE36 | CPUID_FXSR)
128
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
129
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
130
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
131
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
132
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
133
static x86_def_t x86_defs[] = {
134
#ifdef TARGET_X86_64
135
    {
136
        .name = "qemu64",
137
        .level = 2,
138
        .vendor1 = CPUID_VENDOR_AMD_1,
139
        .vendor2 = CPUID_VENDOR_AMD_2,
140
        .vendor3 = CPUID_VENDOR_AMD_3,
141
        .family = 6,
142
        .model = 2,
143
        .stepping = 3,
144
        .features = PPRO_FEATURES | 
145
        /* these features are needed for Win64 and aren't fully implemented */
146
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
147
        /* this feature is needed for Solaris and isn't fully implemented */
148
            CPUID_PSE36,
149
        .ext_features = CPUID_EXT_SSE3,
150
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
151
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
152
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
153
        .ext3_features = CPUID_EXT3_SVM,
154
        .xlevel = 0x8000000A,
155
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
156
    },
157
    {
158
        .name = "phenom",
159
        .level = 5,
160
        .vendor1 = CPUID_VENDOR_AMD_1,
161
        .vendor2 = CPUID_VENDOR_AMD_2,
162
        .vendor3 = CPUID_VENDOR_AMD_3,
163
        .family = 16,
164
        .model = 2,
165
        .stepping = 3,
166
        /* Missing: CPUID_VME, CPUID_HT */
167
        .features = PPRO_FEATURES | 
168
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
169
            CPUID_PSE36,
170
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
171
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
172
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
173
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
174
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
175
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
176
            CPUID_EXT2_FFXSR,
177
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
178
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
179
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
180
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
181
        .ext3_features = CPUID_EXT3_SVM,
182
        .xlevel = 0x8000001A,
183
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
184
    },
185
    {
186
        .name = "core2duo",
187
        .level = 10,
188
        .family = 6,
189
        .model = 15,
190
        .stepping = 11,
191
        /* The original CPU also implements these features:
192
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
193
               CPUID_TM, CPUID_PBE */
194
        .features = PPRO_FEATURES |
195
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
196
            CPUID_PSE36,
197
        /* The original CPU also implements these ext features:
198
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
199
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
200
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
201
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
202
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
203
        .xlevel = 0x80000008,
204
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
205
    },
206
#endif
207
    {
208
        .name = "qemu32",
209
        .level = 2,
210
        .family = 6,
211
        .model = 3,
212
        .stepping = 3,
213
        .features = PPRO_FEATURES,
214
        .ext_features = CPUID_EXT_SSE3,
215
        .xlevel = 0,
216
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
217
    },
218
    {
219
        .name = "coreduo",
220
        .level = 10,
221
        .family = 6,
222
        .model = 14,
223
        .stepping = 8,
224
        /* The original CPU also implements these features:
225
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
226
               CPUID_TM, CPUID_PBE */
227
        .features = PPRO_FEATURES | CPUID_VME |
228
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
229
        /* The original CPU also implements these ext features:
230
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
231
               CPUID_EXT_PDCM */
232
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
233
        .ext2_features = CPUID_EXT2_NX,
234
        .xlevel = 0x80000008,
235
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
236
    },
237
    {
238
        .name = "486",
239
        .level = 0,
240
        .family = 4,
241
        .model = 0,
242
        .stepping = 0,
243
        .features = I486_FEATURES,
244
        .xlevel = 0,
245
    },
246
    {
247
        .name = "pentium",
248
        .level = 1,
249
        .family = 5,
250
        .model = 4,
251
        .stepping = 3,
252
        .features = PENTIUM_FEATURES,
253
        .xlevel = 0,
254
    },
255
    {
256
        .name = "pentium2",
257
        .level = 2,
258
        .family = 6,
259
        .model = 5,
260
        .stepping = 2,
261
        .features = PENTIUM2_FEATURES,
262
        .xlevel = 0,
263
    },
264
    {
265
        .name = "pentium3",
266
        .level = 2,
267
        .family = 6,
268
        .model = 7,
269
        .stepping = 3,
270
        .features = PENTIUM3_FEATURES,
271
        .xlevel = 0,
272
    },
273
    {
274
        .name = "athlon",
275
        .level = 2,
276
        .vendor1 = 0x68747541, /* "Auth" */
277
        .vendor2 = 0x69746e65, /* "enti" */
278
        .vendor3 = 0x444d4163, /* "cAMD" */
279
        .family = 6,
280
        .model = 2,
281
        .stepping = 3,
282
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
283
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
284
        .xlevel = 0x80000008,
285
        /* XXX: put another string ? */
286
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
287
    },
288
    {
289
        .name = "n270",
290
        /* original is on level 10 */
291
        .level = 5,
292
        .family = 6,
293
        .model = 28,
294
        .stepping = 2,
295
        .features = PPRO_FEATURES |
296
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
297
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
298
             * CPUID_HT | CPUID_TM | CPUID_PBE */
299
            /* Some CPUs got no CPUID_SEP */
300
        .ext_features = CPUID_EXT_MONITOR |
301
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
302
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
303
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
304
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
305
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
306
        .xlevel = 0x8000000A,
307
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
308
    },
309
};
310

    
311
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
312
{
313
    unsigned int i;
314
    x86_def_t *def;
315

    
316
    char *s = strdup(cpu_model);
317
    char *featurestr, *name = strtok(s, ",");
318
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
319
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
320
    int family = -1, model = -1, stepping = -1;
321

    
322
    def = NULL;
323
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
324
        if (strcmp(name, x86_defs[i].name) == 0) {
325
            def = &x86_defs[i];
326
            break;
327
        }
328
    }
329
    if (!def)
330
        goto error;
331
    memcpy(x86_cpu_def, def, sizeof(*def));
332

    
333
    featurestr = strtok(NULL, ",");
334

    
335
    while (featurestr) {
336
        char *val;
337
        if (featurestr[0] == '+') {
338
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
339
        } else if (featurestr[0] == '-') {
340
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
341
        } else if ((val = strchr(featurestr, '='))) {
342
            *val = 0; val++;
343
            if (!strcmp(featurestr, "family")) {
344
                char *err;
345
                family = strtol(val, &err, 10);
346
                if (!*val || *err || family < 0) {
347
                    fprintf(stderr, "bad numerical value %s\n", val);
348
                    goto error;
349
                }
350
                x86_cpu_def->family = family;
351
            } else if (!strcmp(featurestr, "model")) {
352
                char *err;
353
                model = strtol(val, &err, 10);
354
                if (!*val || *err || model < 0 || model > 0xff) {
355
                    fprintf(stderr, "bad numerical value %s\n", val);
356
                    goto error;
357
                }
358
                x86_cpu_def->model = model;
359
            } else if (!strcmp(featurestr, "stepping")) {
360
                char *err;
361
                stepping = strtol(val, &err, 10);
362
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
363
                    fprintf(stderr, "bad numerical value %s\n", val);
364
                    goto error;
365
                }
366
                x86_cpu_def->stepping = stepping;
367
            } else if (!strcmp(featurestr, "vendor")) {
368
                if (strlen(val) != 12) {
369
                    fprintf(stderr, "vendor string must be 12 chars long\n");
370
                    goto error;
371
                }
372
                x86_cpu_def->vendor1 = 0;
373
                x86_cpu_def->vendor2 = 0;
374
                x86_cpu_def->vendor3 = 0;
375
                for(i = 0; i < 4; i++) {
376
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
377
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
378
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
379
                }
380
                x86_cpu_def->vendor_override = 1;
381
            } else if (!strcmp(featurestr, "model_id")) {
382
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
383
                        val);
384
            } else {
385
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
386
                goto error;
387
            }
388
        } else {
389
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
390
            goto error;
391
        }
392
        featurestr = strtok(NULL, ",");
393
    }
394
    x86_cpu_def->features |= plus_features;
395
    x86_cpu_def->ext_features |= plus_ext_features;
396
    x86_cpu_def->ext2_features |= plus_ext2_features;
397
    x86_cpu_def->ext3_features |= plus_ext3_features;
398
    x86_cpu_def->features &= ~minus_features;
399
    x86_cpu_def->ext_features &= ~minus_ext_features;
400
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
401
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
402
    free(s);
403
    return 0;
404

    
405
error:
406
    free(s);
407
    return -1;
408
}
409

    
410
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
411
{
412
    unsigned int i;
413

    
414
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
415
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
416
}
417

    
418
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
419
{
420
    x86_def_t def1, *def = &def1;
421

    
422
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
423
        return -1;
424
    if (def->vendor1) {
425
        env->cpuid_vendor1 = def->vendor1;
426
        env->cpuid_vendor2 = def->vendor2;
427
        env->cpuid_vendor3 = def->vendor3;
428
    } else {
429
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
430
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
431
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
432
    }
433
    env->cpuid_vendor_override = def->vendor_override;
434
    env->cpuid_level = def->level;
435
    if (def->family > 0x0f)
436
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
437
    else
438
        env->cpuid_version = def->family << 8;
439
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
440
    env->cpuid_version |= def->stepping;
441
    env->cpuid_features = def->features;
442
    env->pat = 0x0007040600070406ULL;
443
    env->cpuid_ext_features = def->ext_features;
444
    env->cpuid_ext2_features = def->ext2_features;
445
    env->cpuid_xlevel = def->xlevel;
446
    env->cpuid_ext3_features = def->ext3_features;
447
    {
448
        const char *model_id = def->model_id;
449
        int c, len, i;
450
        if (!model_id)
451
            model_id = "";
452
        len = strlen(model_id);
453
        for(i = 0; i < 48; i++) {
454
            if (i >= len)
455
                c = '\0';
456
            else
457
                c = (uint8_t)model_id[i];
458
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
459
        }
460
    }
461
    return 0;
462
}
463

    
464
/* NOTE: must be called outside the CPU execute loop */
465
void cpu_reset(CPUX86State *env)
466
{
467
    int i;
468

    
469
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
470
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
471
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
472
    }
473

    
474
    memset(env, 0, offsetof(CPUX86State, breakpoints));
475

    
476
    tlb_flush(env, 1);
477

    
478
    env->old_exception = -1;
479

    
480
    /* init to reset state */
481

    
482
#ifdef CONFIG_SOFTMMU
483
    env->hflags |= HF_SOFTMMU_MASK;
484
#endif
485
    env->hflags2 |= HF2_GIF_MASK;
486

    
487
    cpu_x86_update_cr0(env, 0x60000010);
488
    env->a20_mask = ~0x0;
489
    env->smbase = 0x30000;
490

    
491
    env->idt.limit = 0xffff;
492
    env->gdt.limit = 0xffff;
493
    env->ldt.limit = 0xffff;
494
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
495
    env->tr.limit = 0xffff;
496
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
497

    
498
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
499
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
500
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
501
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
502
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
503
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
504
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
505
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
506
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
507
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
508
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
509
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
510

    
511
    env->eip = 0xfff0;
512
    env->regs[R_EDX] = env->cpuid_version;
513

    
514
    env->eflags = 0x2;
515

    
516
    /* FPU init */
517
    for(i = 0;i < 8; i++)
518
        env->fptags[i] = 1;
519
    env->fpuc = 0x37f;
520

    
521
    env->mxcsr = 0x1f80;
522

    
523
    memset(env->dr, 0, sizeof(env->dr));
524
    env->dr[6] = DR6_FIXED_1;
525
    env->dr[7] = DR7_FIXED_1;
526
    cpu_breakpoint_remove_all(env, BP_CPU);
527
    cpu_watchpoint_remove_all(env, BP_CPU);
528
}
529

    
530
void cpu_x86_close(CPUX86State *env)
531
{
532
    qemu_free(env);
533
}
534

    
535
/***********************************************************/
536
/* x86 debug */
537

    
538
static const char *cc_op_str[] = {
539
    "DYNAMIC",
540
    "EFLAGS",
541

    
542
    "MULB",
543
    "MULW",
544
    "MULL",
545
    "MULQ",
546

    
547
    "ADDB",
548
    "ADDW",
549
    "ADDL",
550
    "ADDQ",
551

    
552
    "ADCB",
553
    "ADCW",
554
    "ADCL",
555
    "ADCQ",
556

    
557
    "SUBB",
558
    "SUBW",
559
    "SUBL",
560
    "SUBQ",
561

    
562
    "SBBB",
563
    "SBBW",
564
    "SBBL",
565
    "SBBQ",
566

    
567
    "LOGICB",
568
    "LOGICW",
569
    "LOGICL",
570
    "LOGICQ",
571

    
572
    "INCB",
573
    "INCW",
574
    "INCL",
575
    "INCQ",
576

    
577
    "DECB",
578
    "DECW",
579
    "DECL",
580
    "DECQ",
581

    
582
    "SHLB",
583
    "SHLW",
584
    "SHLL",
585
    "SHLQ",
586

    
587
    "SARB",
588
    "SARW",
589
    "SARL",
590
    "SARQ",
591
};
592

    
593
static void
594
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
595
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
596
                       const char *name, struct SegmentCache *sc)
597
{
598
#ifdef TARGET_X86_64
599
    if (env->hflags & HF_CS64_MASK) {
600
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
601
                    sc->selector, sc->base, sc->limit, sc->flags);
602
    } else
603
#endif
604
    {
605
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
606
                    (uint32_t)sc->base, sc->limit, sc->flags);
607
    }
608

    
609
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
610
        goto done;
611

    
612
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
613
    if (sc->flags & DESC_S_MASK) {
614
        if (sc->flags & DESC_CS_MASK) {
615
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
616
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
617
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
618
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
619
        } else {
620
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
621
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
622
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
623
        }
624
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
625
    } else {
626
        static const char *sys_type_name[2][16] = {
627
            { /* 32 bit mode */
628
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
629
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
630
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
631
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
632
            },
633
            { /* 64 bit mode */
634
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
635
                "Reserved", "Reserved", "Reserved", "Reserved",
636
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
637
                "Reserved", "IntGate64", "TrapGate64"
638
            }
639
        };
640
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
641
                                    [(sc->flags & DESC_TYPE_MASK)
642
                                     >> DESC_TYPE_SHIFT]);
643
    }
644
done:
645
    cpu_fprintf(f, "\n");
646
}
647

    
648
void cpu_dump_state(CPUState *env, FILE *f,
649
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
650
                    int flags)
651
{
652
    int eflags, i, nb;
653
    char cc_op_name[32];
654
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
655

    
656
    if (kvm_enabled())
657
        kvm_arch_get_registers(env);
658

    
659
    eflags = env->eflags;
660
#ifdef TARGET_X86_64
661
    if (env->hflags & HF_CS64_MASK) {
662
        cpu_fprintf(f,
663
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
664
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
665
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
666
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
667
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
668
                    env->regs[R_EAX],
669
                    env->regs[R_EBX],
670
                    env->regs[R_ECX],
671
                    env->regs[R_EDX],
672
                    env->regs[R_ESI],
673
                    env->regs[R_EDI],
674
                    env->regs[R_EBP],
675
                    env->regs[R_ESP],
676
                    env->regs[8],
677
                    env->regs[9],
678
                    env->regs[10],
679
                    env->regs[11],
680
                    env->regs[12],
681
                    env->regs[13],
682
                    env->regs[14],
683
                    env->regs[15],
684
                    env->eip, eflags,
685
                    eflags & DF_MASK ? 'D' : '-',
686
                    eflags & CC_O ? 'O' : '-',
687
                    eflags & CC_S ? 'S' : '-',
688
                    eflags & CC_Z ? 'Z' : '-',
689
                    eflags & CC_A ? 'A' : '-',
690
                    eflags & CC_P ? 'P' : '-',
691
                    eflags & CC_C ? 'C' : '-',
692
                    env->hflags & HF_CPL_MASK,
693
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
694
                    (int)(env->a20_mask >> 20) & 1,
695
                    (env->hflags >> HF_SMM_SHIFT) & 1,
696
                    env->halted);
697
    } else
698
#endif
699
    {
700
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
701
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
702
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
703
                    (uint32_t)env->regs[R_EAX],
704
                    (uint32_t)env->regs[R_EBX],
705
                    (uint32_t)env->regs[R_ECX],
706
                    (uint32_t)env->regs[R_EDX],
707
                    (uint32_t)env->regs[R_ESI],
708
                    (uint32_t)env->regs[R_EDI],
709
                    (uint32_t)env->regs[R_EBP],
710
                    (uint32_t)env->regs[R_ESP],
711
                    (uint32_t)env->eip, eflags,
712
                    eflags & DF_MASK ? 'D' : '-',
713
                    eflags & CC_O ? 'O' : '-',
714
                    eflags & CC_S ? 'S' : '-',
715
                    eflags & CC_Z ? 'Z' : '-',
716
                    eflags & CC_A ? 'A' : '-',
717
                    eflags & CC_P ? 'P' : '-',
718
                    eflags & CC_C ? 'C' : '-',
719
                    env->hflags & HF_CPL_MASK,
720
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
721
                    (int)(env->a20_mask >> 20) & 1,
722
                    (env->hflags >> HF_SMM_SHIFT) & 1,
723
                    env->halted);
724
    }
725

    
726
    for(i = 0; i < 6; i++) {
727
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
728
                               &env->segs[i]);
729
    }
730
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
731
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
732

    
733
#ifdef TARGET_X86_64
734
    if (env->hflags & HF_LMA_MASK) {
735
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
736
                    env->gdt.base, env->gdt.limit);
737
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
738
                    env->idt.base, env->idt.limit);
739
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
740
                    (uint32_t)env->cr[0],
741
                    env->cr[2],
742
                    env->cr[3],
743
                    (uint32_t)env->cr[4]);
744
        for(i = 0; i < 4; i++)
745
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
746
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
747
                    env->dr[6], env->dr[7]);
748
    } else
749
#endif
750
    {
751
        cpu_fprintf(f, "GDT=     %08x %08x\n",
752
                    (uint32_t)env->gdt.base, env->gdt.limit);
753
        cpu_fprintf(f, "IDT=     %08x %08x\n",
754
                    (uint32_t)env->idt.base, env->idt.limit);
755
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
756
                    (uint32_t)env->cr[0],
757
                    (uint32_t)env->cr[2],
758
                    (uint32_t)env->cr[3],
759
                    (uint32_t)env->cr[4]);
760
        for(i = 0; i < 4; i++)
761
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
762
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
763
    }
764
    if (flags & X86_DUMP_CCOP) {
765
        if ((unsigned)env->cc_op < CC_OP_NB)
766
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
767
        else
768
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
769
#ifdef TARGET_X86_64
770
        if (env->hflags & HF_CS64_MASK) {
771
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
772
                        env->cc_src, env->cc_dst,
773
                        cc_op_name);
774
        } else
775
#endif
776
        {
777
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
778
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
779
                        cc_op_name);
780
        }
781
    }
782
    if (flags & X86_DUMP_FPU) {
783
        int fptag;
784
        fptag = 0;
785
        for(i = 0; i < 8; i++) {
786
            fptag |= ((!env->fptags[i]) << i);
787
        }
788
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
789
                    env->fpuc,
790
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
791
                    env->fpstt,
792
                    fptag,
793
                    env->mxcsr);
794
        for(i=0;i<8;i++) {
795
#if defined(USE_X86LDOUBLE)
796
            union {
797
                long double d;
798
                struct {
799
                    uint64_t lower;
800
                    uint16_t upper;
801
                } l;
802
            } tmp;
803
            tmp.d = env->fpregs[i].d;
804
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
805
                        i, tmp.l.lower, tmp.l.upper);
806
#else
807
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
808
                        i, env->fpregs[i].mmx.q);
809
#endif
810
            if ((i & 1) == 1)
811
                cpu_fprintf(f, "\n");
812
            else
813
                cpu_fprintf(f, " ");
814
        }
815
        if (env->hflags & HF_CS64_MASK)
816
            nb = 16;
817
        else
818
            nb = 8;
819
        for(i=0;i<nb;i++) {
820
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
821
                        i,
822
                        env->xmm_regs[i].XMM_L(3),
823
                        env->xmm_regs[i].XMM_L(2),
824
                        env->xmm_regs[i].XMM_L(1),
825
                        env->xmm_regs[i].XMM_L(0));
826
            if ((i & 1) == 1)
827
                cpu_fprintf(f, "\n");
828
            else
829
                cpu_fprintf(f, " ");
830
        }
831
    }
832
}
833

    
834
/***********************************************************/
835
/* x86 mmu */
836
/* XXX: add PGE support */
837

    
838
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
839
{
840
    a20_state = (a20_state != 0);
841
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
842
#if defined(DEBUG_MMU)
843
        printf("A20 update: a20=%d\n", a20_state);
844
#endif
845
        /* if the cpu is currently executing code, we must unlink it and
846
           all the potentially executing TB */
847
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
848

    
849
        /* when a20 is changed, all the MMU mappings are invalid, so
850
           we must flush everything */
851
        tlb_flush(env, 1);
852
        env->a20_mask = (~0x100000) | (a20_state << 20);
853
    }
854
}
855

    
856
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
857
{
858
    int pe_state;
859

    
860
#if defined(DEBUG_MMU)
861
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
862
#endif
863
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
864
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
865
        tlb_flush(env, 1);
866
    }
867

    
868
#ifdef TARGET_X86_64
869
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
870
        (env->efer & MSR_EFER_LME)) {
871
        /* enter in long mode */
872
        /* XXX: generate an exception */
873
        if (!(env->cr[4] & CR4_PAE_MASK))
874
            return;
875
        env->efer |= MSR_EFER_LMA;
876
        env->hflags |= HF_LMA_MASK;
877
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
878
               (env->efer & MSR_EFER_LMA)) {
879
        /* exit long mode */
880
        env->efer &= ~MSR_EFER_LMA;
881
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
882
        env->eip &= 0xffffffff;
883
    }
884
#endif
885
    env->cr[0] = new_cr0 | CR0_ET_MASK;
886

    
887
    /* update PE flag in hidden flags */
888
    pe_state = (env->cr[0] & CR0_PE_MASK);
889
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
890
    /* ensure that ADDSEG is always set in real mode */
891
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
892
    /* update FPU flags */
893
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
894
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
895
}
896

    
897
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
898
   the PDPT */
899
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
900
{
901
    env->cr[3] = new_cr3;
902
    if (env->cr[0] & CR0_PG_MASK) {
903
#if defined(DEBUG_MMU)
904
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
905
#endif
906
        tlb_flush(env, 0);
907
    }
908
}
909

    
910
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
911
{
912
#if defined(DEBUG_MMU)
913
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
914
#endif
915
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
916
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
917
        tlb_flush(env, 1);
918
    }
919
    /* SSE handling */
920
    if (!(env->cpuid_features & CPUID_SSE))
921
        new_cr4 &= ~CR4_OSFXSR_MASK;
922
    if (new_cr4 & CR4_OSFXSR_MASK)
923
        env->hflags |= HF_OSFXSR_MASK;
924
    else
925
        env->hflags &= ~HF_OSFXSR_MASK;
926

    
927
    env->cr[4] = new_cr4;
928
}
929

    
930
#if defined(CONFIG_USER_ONLY)
931

    
932
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
933
                             int is_write, int mmu_idx, int is_softmmu)
934
{
935
    /* user mode only emulation */
936
    is_write &= 1;
937
    env->cr[2] = addr;
938
    env->error_code = (is_write << PG_ERROR_W_BIT);
939
    env->error_code |= PG_ERROR_U_MASK;
940
    env->exception_index = EXCP0E_PAGE;
941
    return 1;
942
}
943

    
944
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
945
{
946
    return addr;
947
}
948

    
949
#else
950

    
951
/* XXX: This value should match the one returned by CPUID
952
 * and in exec.c */
953
#if defined(CONFIG_KQEMU)
954
#define PHYS_ADDR_MASK 0xfffff000LL
955
#else
956
# if defined(TARGET_X86_64)
957
# define PHYS_ADDR_MASK 0xfffffff000LL
958
# else
959
# define PHYS_ADDR_MASK 0xffffff000LL
960
# endif
961
#endif
962

    
963
/* return value:
964
   -1 = cannot handle fault
965
   0  = nothing more to do
966
   1  = generate PF fault
967
   2  = soft MMU activation required for this block
968
*/
969
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
970
                             int is_write1, int mmu_idx, int is_softmmu)
971
{
972
    uint64_t ptep, pte;
973
    target_ulong pde_addr, pte_addr;
974
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
975
    target_phys_addr_t paddr;
976
    uint32_t page_offset;
977
    target_ulong vaddr, virt_addr;
978

    
979
    is_user = mmu_idx == MMU_USER_IDX;
980
#if defined(DEBUG_MMU)
981
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
982
           addr, is_write1, is_user, env->eip);
983
#endif
984
    is_write = is_write1 & 1;
985

    
986
    if (!(env->cr[0] & CR0_PG_MASK)) {
987
        pte = addr;
988
        virt_addr = addr & TARGET_PAGE_MASK;
989
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
990
        page_size = 4096;
991
        goto do_mapping;
992
    }
993

    
994
    if (env->cr[4] & CR4_PAE_MASK) {
995
        uint64_t pde, pdpe;
996
        target_ulong pdpe_addr;
997

    
998
#ifdef TARGET_X86_64
999
        if (env->hflags & HF_LMA_MASK) {
1000
            uint64_t pml4e_addr, pml4e;
1001
            int32_t sext;
1002

    
1003
            /* test virtual address sign extension */
1004
            sext = (int64_t)addr >> 47;
1005
            if (sext != 0 && sext != -1) {
1006
                env->error_code = 0;
1007
                env->exception_index = EXCP0D_GPF;
1008
                return 1;
1009
            }
1010

    
1011
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1012
                env->a20_mask;
1013
            pml4e = ldq_phys(pml4e_addr);
1014
            if (!(pml4e & PG_PRESENT_MASK)) {
1015
                error_code = 0;
1016
                goto do_fault;
1017
            }
1018
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1019
                error_code = PG_ERROR_RSVD_MASK;
1020
                goto do_fault;
1021
            }
1022
            if (!(pml4e & PG_ACCESSED_MASK)) {
1023
                pml4e |= PG_ACCESSED_MASK;
1024
                stl_phys_notdirty(pml4e_addr, pml4e);
1025
            }
1026
            ptep = pml4e ^ PG_NX_MASK;
1027
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1028
                env->a20_mask;
1029
            pdpe = ldq_phys(pdpe_addr);
1030
            if (!(pdpe & PG_PRESENT_MASK)) {
1031
                error_code = 0;
1032
                goto do_fault;
1033
            }
1034
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1035
                error_code = PG_ERROR_RSVD_MASK;
1036
                goto do_fault;
1037
            }
1038
            ptep &= pdpe ^ PG_NX_MASK;
1039
            if (!(pdpe & PG_ACCESSED_MASK)) {
1040
                pdpe |= PG_ACCESSED_MASK;
1041
                stl_phys_notdirty(pdpe_addr, pdpe);
1042
            }
1043
        } else
1044
#endif
1045
        {
1046
            /* XXX: load them when cr3 is loaded ? */
1047
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1048
                env->a20_mask;
1049
            pdpe = ldq_phys(pdpe_addr);
1050
            if (!(pdpe & PG_PRESENT_MASK)) {
1051
                error_code = 0;
1052
                goto do_fault;
1053
            }
1054
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1055
        }
1056

    
1057
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1058
            env->a20_mask;
1059
        pde = ldq_phys(pde_addr);
1060
        if (!(pde & PG_PRESENT_MASK)) {
1061
            error_code = 0;
1062
            goto do_fault;
1063
        }
1064
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1065
            error_code = PG_ERROR_RSVD_MASK;
1066
            goto do_fault;
1067
        }
1068
        ptep &= pde ^ PG_NX_MASK;
1069
        if (pde & PG_PSE_MASK) {
1070
            /* 2 MB page */
1071
            page_size = 2048 * 1024;
1072
            ptep ^= PG_NX_MASK;
1073
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1074
                goto do_fault_protect;
1075
            if (is_user) {
1076
                if (!(ptep & PG_USER_MASK))
1077
                    goto do_fault_protect;
1078
                if (is_write && !(ptep & PG_RW_MASK))
1079
                    goto do_fault_protect;
1080
            } else {
1081
                if ((env->cr[0] & CR0_WP_MASK) &&
1082
                    is_write && !(ptep & PG_RW_MASK))
1083
                    goto do_fault_protect;
1084
            }
1085
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1086
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1087
                pde |= PG_ACCESSED_MASK;
1088
                if (is_dirty)
1089
                    pde |= PG_DIRTY_MASK;
1090
                stl_phys_notdirty(pde_addr, pde);
1091
            }
1092
            /* align to page_size */
1093
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1094
            virt_addr = addr & ~(page_size - 1);
1095
        } else {
1096
            /* 4 KB page */
1097
            if (!(pde & PG_ACCESSED_MASK)) {
1098
                pde |= PG_ACCESSED_MASK;
1099
                stl_phys_notdirty(pde_addr, pde);
1100
            }
1101
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1102
                env->a20_mask;
1103
            pte = ldq_phys(pte_addr);
1104
            if (!(pte & PG_PRESENT_MASK)) {
1105
                error_code = 0;
1106
                goto do_fault;
1107
            }
1108
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1109
                error_code = PG_ERROR_RSVD_MASK;
1110
                goto do_fault;
1111
            }
1112
            /* combine pde and pte nx, user and rw protections */
1113
            ptep &= pte ^ PG_NX_MASK;
1114
            ptep ^= PG_NX_MASK;
1115
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1116
                goto do_fault_protect;
1117
            if (is_user) {
1118
                if (!(ptep & PG_USER_MASK))
1119
                    goto do_fault_protect;
1120
                if (is_write && !(ptep & PG_RW_MASK))
1121
                    goto do_fault_protect;
1122
            } else {
1123
                if ((env->cr[0] & CR0_WP_MASK) &&
1124
                    is_write && !(ptep & PG_RW_MASK))
1125
                    goto do_fault_protect;
1126
            }
1127
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1128
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1129
                pte |= PG_ACCESSED_MASK;
1130
                if (is_dirty)
1131
                    pte |= PG_DIRTY_MASK;
1132
                stl_phys_notdirty(pte_addr, pte);
1133
            }
1134
            page_size = 4096;
1135
            virt_addr = addr & ~0xfff;
1136
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1137
        }
1138
    } else {
1139
        uint32_t pde;
1140

    
1141
        /* page directory entry */
1142
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1143
            env->a20_mask;
1144
        pde = ldl_phys(pde_addr);
1145
        if (!(pde & PG_PRESENT_MASK)) {
1146
            error_code = 0;
1147
            goto do_fault;
1148
        }
1149
        /* if PSE bit is set, then we use a 4MB page */
1150
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1151
            page_size = 4096 * 1024;
1152
            if (is_user) {
1153
                if (!(pde & PG_USER_MASK))
1154
                    goto do_fault_protect;
1155
                if (is_write && !(pde & PG_RW_MASK))
1156
                    goto do_fault_protect;
1157
            } else {
1158
                if ((env->cr[0] & CR0_WP_MASK) &&
1159
                    is_write && !(pde & PG_RW_MASK))
1160
                    goto do_fault_protect;
1161
            }
1162
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1163
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1164
                pde |= PG_ACCESSED_MASK;
1165
                if (is_dirty)
1166
                    pde |= PG_DIRTY_MASK;
1167
                stl_phys_notdirty(pde_addr, pde);
1168
            }
1169

    
1170
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1171
            ptep = pte;
1172
            virt_addr = addr & ~(page_size - 1);
1173
        } else {
1174
            if (!(pde & PG_ACCESSED_MASK)) {
1175
                pde |= PG_ACCESSED_MASK;
1176
                stl_phys_notdirty(pde_addr, pde);
1177
            }
1178

    
1179
            /* page directory entry */
1180
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1181
                env->a20_mask;
1182
            pte = ldl_phys(pte_addr);
1183
            if (!(pte & PG_PRESENT_MASK)) {
1184
                error_code = 0;
1185
                goto do_fault;
1186
            }
1187
            /* combine pde and pte user and rw protections */
1188
            ptep = pte & pde;
1189
            if (is_user) {
1190
                if (!(ptep & PG_USER_MASK))
1191
                    goto do_fault_protect;
1192
                if (is_write && !(ptep & PG_RW_MASK))
1193
                    goto do_fault_protect;
1194
            } else {
1195
                if ((env->cr[0] & CR0_WP_MASK) &&
1196
                    is_write && !(ptep & PG_RW_MASK))
1197
                    goto do_fault_protect;
1198
            }
1199
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1200
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1201
                pte |= PG_ACCESSED_MASK;
1202
                if (is_dirty)
1203
                    pte |= PG_DIRTY_MASK;
1204
                stl_phys_notdirty(pte_addr, pte);
1205
            }
1206
            page_size = 4096;
1207
            virt_addr = addr & ~0xfff;
1208
        }
1209
    }
1210
    /* the page can be put in the TLB */
1211
    prot = PAGE_READ;
1212
    if (!(ptep & PG_NX_MASK))
1213
        prot |= PAGE_EXEC;
1214
    if (pte & PG_DIRTY_MASK) {
1215
        /* only set write access if already dirty... otherwise wait
1216
           for dirty access */
1217
        if (is_user) {
1218
            if (ptep & PG_RW_MASK)
1219
                prot |= PAGE_WRITE;
1220
        } else {
1221
            if (!(env->cr[0] & CR0_WP_MASK) ||
1222
                (ptep & PG_RW_MASK))
1223
                prot |= PAGE_WRITE;
1224
        }
1225
    }
1226
 do_mapping:
1227
    pte = pte & env->a20_mask;
1228

    
1229
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1230
       avoid filling it too fast */
1231
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1232
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1233
    vaddr = virt_addr + page_offset;
1234

    
1235
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1236
    return ret;
1237
 do_fault_protect:
1238
    error_code = PG_ERROR_P_MASK;
1239
 do_fault:
1240
    error_code |= (is_write << PG_ERROR_W_BIT);
1241
    if (is_user)
1242
        error_code |= PG_ERROR_U_MASK;
1243
    if (is_write1 == 2 &&
1244
        (env->efer & MSR_EFER_NXE) &&
1245
        (env->cr[4] & CR4_PAE_MASK))
1246
        error_code |= PG_ERROR_I_D_MASK;
1247
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1248
        /* cr2 is not modified in case of exceptions */
1249
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1250
                 addr);
1251
    } else {
1252
        env->cr[2] = addr;
1253
    }
1254
    env->error_code = error_code;
1255
    env->exception_index = EXCP0E_PAGE;
1256
    return 1;
1257
}
1258

    
1259
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1260
{
1261
    target_ulong pde_addr, pte_addr;
1262
    uint64_t pte;
1263
    target_phys_addr_t paddr;
1264
    uint32_t page_offset;
1265
    int page_size;
1266

    
1267
    if (env->cr[4] & CR4_PAE_MASK) {
1268
        target_ulong pdpe_addr;
1269
        uint64_t pde, pdpe;
1270

    
1271
#ifdef TARGET_X86_64
1272
        if (env->hflags & HF_LMA_MASK) {
1273
            uint64_t pml4e_addr, pml4e;
1274
            int32_t sext;
1275

    
1276
            /* test virtual address sign extension */
1277
            sext = (int64_t)addr >> 47;
1278
            if (sext != 0 && sext != -1)
1279
                return -1;
1280

    
1281
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1282
                env->a20_mask;
1283
            pml4e = ldq_phys(pml4e_addr);
1284
            if (!(pml4e & PG_PRESENT_MASK))
1285
                return -1;
1286

    
1287
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1288
                env->a20_mask;
1289
            pdpe = ldq_phys(pdpe_addr);
1290
            if (!(pdpe & PG_PRESENT_MASK))
1291
                return -1;
1292
        } else
1293
#endif
1294
        {
1295
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1296
                env->a20_mask;
1297
            pdpe = ldq_phys(pdpe_addr);
1298
            if (!(pdpe & PG_PRESENT_MASK))
1299
                return -1;
1300
        }
1301

    
1302
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1303
            env->a20_mask;
1304
        pde = ldq_phys(pde_addr);
1305
        if (!(pde & PG_PRESENT_MASK)) {
1306
            return -1;
1307
        }
1308
        if (pde & PG_PSE_MASK) {
1309
            /* 2 MB page */
1310
            page_size = 2048 * 1024;
1311
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1312
        } else {
1313
            /* 4 KB page */
1314
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1315
                env->a20_mask;
1316
            page_size = 4096;
1317
            pte = ldq_phys(pte_addr);
1318
        }
1319
        if (!(pte & PG_PRESENT_MASK))
1320
            return -1;
1321
    } else {
1322
        uint32_t pde;
1323

    
1324
        if (!(env->cr[0] & CR0_PG_MASK)) {
1325
            pte = addr;
1326
            page_size = 4096;
1327
        } else {
1328
            /* page directory entry */
1329
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1330
            pde = ldl_phys(pde_addr);
1331
            if (!(pde & PG_PRESENT_MASK))
1332
                return -1;
1333
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1334
                pte = pde & ~0x003ff000; /* align to 4MB */
1335
                page_size = 4096 * 1024;
1336
            } else {
1337
                /* page directory entry */
1338
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1339
                pte = ldl_phys(pte_addr);
1340
                if (!(pte & PG_PRESENT_MASK))
1341
                    return -1;
1342
                page_size = 4096;
1343
            }
1344
        }
1345
        pte = pte & env->a20_mask;
1346
    }
1347

    
1348
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1349
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1350
    return paddr;
1351
}
1352

    
1353
void hw_breakpoint_insert(CPUState *env, int index)
1354
{
1355
    int type, err = 0;
1356

    
1357
    switch (hw_breakpoint_type(env->dr[7], index)) {
1358
    case 0:
1359
        if (hw_breakpoint_enabled(env->dr[7], index))
1360
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1361
                                        &env->cpu_breakpoint[index]);
1362
        break;
1363
    case 1:
1364
        type = BP_CPU | BP_MEM_WRITE;
1365
        goto insert_wp;
1366
    case 2:
1367
         /* No support for I/O watchpoints yet */
1368
        break;
1369
    case 3:
1370
        type = BP_CPU | BP_MEM_ACCESS;
1371
    insert_wp:
1372
        err = cpu_watchpoint_insert(env, env->dr[index],
1373
                                    hw_breakpoint_len(env->dr[7], index),
1374
                                    type, &env->cpu_watchpoint[index]);
1375
        break;
1376
    }
1377
    if (err)
1378
        env->cpu_breakpoint[index] = NULL;
1379
}
1380

    
1381
void hw_breakpoint_remove(CPUState *env, int index)
1382
{
1383
    if (!env->cpu_breakpoint[index])
1384
        return;
1385
    switch (hw_breakpoint_type(env->dr[7], index)) {
1386
    case 0:
1387
        if (hw_breakpoint_enabled(env->dr[7], index))
1388
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1389
        break;
1390
    case 1:
1391
    case 3:
1392
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1393
        break;
1394
    case 2:
1395
        /* No support for I/O watchpoints yet */
1396
        break;
1397
    }
1398
}
1399

    
1400
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1401
{
1402
    target_ulong dr6;
1403
    int reg, type;
1404
    int hit_enabled = 0;
1405

    
1406
    dr6 = env->dr[6] & ~0xf;
1407
    for (reg = 0; reg < 4; reg++) {
1408
        type = hw_breakpoint_type(env->dr[7], reg);
1409
        if ((type == 0 && env->dr[reg] == env->eip) ||
1410
            ((type & 1) && env->cpu_watchpoint[reg] &&
1411
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1412
            dr6 |= 1 << reg;
1413
            if (hw_breakpoint_enabled(env->dr[7], reg))
1414
                hit_enabled = 1;
1415
        }
1416
    }
1417
    if (hit_enabled || force_dr6_update)
1418
        env->dr[6] = dr6;
1419
    return hit_enabled;
1420
}
1421

    
1422
static CPUDebugExcpHandler *prev_debug_excp_handler;
1423

    
1424
void raise_exception(int exception_index);
1425

    
1426
static void breakpoint_handler(CPUState *env)
1427
{
1428
    CPUBreakpoint *bp;
1429

    
1430
    if (env->watchpoint_hit) {
1431
        if (env->watchpoint_hit->flags & BP_CPU) {
1432
            env->watchpoint_hit = NULL;
1433
            if (check_hw_breakpoints(env, 0))
1434
                raise_exception(EXCP01_DB);
1435
            else
1436
                cpu_resume_from_signal(env, NULL);
1437
        }
1438
    } else {
1439
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1440
            if (bp->pc == env->eip) {
1441
                if (bp->flags & BP_CPU) {
1442
                    check_hw_breakpoints(env, 1);
1443
                    raise_exception(EXCP01_DB);
1444
                }
1445
                break;
1446
            }
1447
    }
1448
    if (prev_debug_excp_handler)
1449
        prev_debug_excp_handler(env);
1450
}
1451
#endif /* !CONFIG_USER_ONLY */
1452

    
1453
static void host_cpuid(uint32_t function, uint32_t count,
1454
                       uint32_t *eax, uint32_t *ebx,
1455
                       uint32_t *ecx, uint32_t *edx)
1456
{
1457
#if defined(CONFIG_KVM)
1458
    uint32_t vec[4];
1459

    
1460
#ifdef __x86_64__
1461
    asm volatile("cpuid"
1462
                 : "=a"(vec[0]), "=b"(vec[1]),
1463
                   "=c"(vec[2]), "=d"(vec[3])
1464
                 : "0"(function), "c"(count) : "cc");
1465
#else
1466
    asm volatile("pusha \n\t"
1467
                 "cpuid \n\t"
1468
                 "mov %%eax, 0(%2) \n\t"
1469
                 "mov %%ebx, 4(%2) \n\t"
1470
                 "mov %%ecx, 8(%2) \n\t"
1471
                 "mov %%edx, 12(%2) \n\t"
1472
                 "popa"
1473
                 : : "a"(function), "c"(count), "S"(vec)
1474
                 : "memory", "cc");
1475
#endif
1476

    
1477
    if (eax)
1478
        *eax = vec[0];
1479
    if (ebx)
1480
        *ebx = vec[1];
1481
    if (ecx)
1482
        *ecx = vec[2];
1483
    if (edx)
1484
        *edx = vec[3];
1485
#endif
1486
}
1487

    
1488
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1489
                   uint32_t *eax, uint32_t *ebx,
1490
                   uint32_t *ecx, uint32_t *edx)
1491
{
1492
    /* test if maximum index reached */
1493
    if (index & 0x80000000) {
1494
        if (index > env->cpuid_xlevel)
1495
            index = env->cpuid_level;
1496
    } else {
1497
        if (index > env->cpuid_level)
1498
            index = env->cpuid_level;
1499
    }
1500

    
1501
    switch(index) {
1502
    case 0:
1503
        *eax = env->cpuid_level;
1504
        *ebx = env->cpuid_vendor1;
1505
        *edx = env->cpuid_vendor2;
1506
        *ecx = env->cpuid_vendor3;
1507

    
1508
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1509
         * isn't supported in compatibility mode on Intel.  so advertise the
1510
         * actuall cpu, and say goodbye to migration between different vendors
1511
         * is you use compatibility mode. */
1512
        if (kvm_enabled() && !env->cpuid_vendor_override)
1513
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1514
        break;
1515
    case 1:
1516
        *eax = env->cpuid_version;
1517
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1518
        *ecx = env->cpuid_ext_features;
1519
        *edx = env->cpuid_features;
1520

    
1521
        /* "Hypervisor present" bit required for Microsoft SVVP */
1522
        if (kvm_enabled())
1523
            *ecx |= (1 << 31);
1524
        break;
1525
    case 2:
1526
        /* cache info: needed for Pentium Pro compatibility */
1527
        *eax = 1;
1528
        *ebx = 0;
1529
        *ecx = 0;
1530
        *edx = 0x2c307d;
1531
        break;
1532
    case 4:
1533
        /* cache info: needed for Core compatibility */
1534
        switch (count) {
1535
            case 0: /* L1 dcache info */
1536
                *eax = 0x0000121;
1537
                *ebx = 0x1c0003f;
1538
                *ecx = 0x000003f;
1539
                *edx = 0x0000001;
1540
                break;
1541
            case 1: /* L1 icache info */
1542
                *eax = 0x0000122;
1543
                *ebx = 0x1c0003f;
1544
                *ecx = 0x000003f;
1545
                *edx = 0x0000001;
1546
                break;
1547
            case 2: /* L2 cache info */
1548
                *eax = 0x0000143;
1549
                *ebx = 0x3c0003f;
1550
                *ecx = 0x0000fff;
1551
                *edx = 0x0000001;
1552
                break;
1553
            default: /* end of info */
1554
                *eax = 0;
1555
                *ebx = 0;
1556
                *ecx = 0;
1557
                *edx = 0;
1558
                break;
1559
        }
1560
        break;
1561
    case 5:
1562
        /* mwait info: needed for Core compatibility */
1563
        *eax = 0; /* Smallest monitor-line size in bytes */
1564
        *ebx = 0; /* Largest monitor-line size in bytes */
1565
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1566
        *edx = 0;
1567
        break;
1568
    case 6:
1569
        /* Thermal and Power Leaf */
1570
        *eax = 0;
1571
        *ebx = 0;
1572
        *ecx = 0;
1573
        *edx = 0;
1574
        break;
1575
    case 9:
1576
        /* Direct Cache Access Information Leaf */
1577
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1578
        *ebx = 0;
1579
        *ecx = 0;
1580
        *edx = 0;
1581
        break;
1582
    case 0xA:
1583
        /* Architectural Performance Monitoring Leaf */
1584
        *eax = 0;
1585
        *ebx = 0;
1586
        *ecx = 0;
1587
        *edx = 0;
1588
        break;
1589
    case 0x80000000:
1590
        *eax = env->cpuid_xlevel;
1591
        *ebx = env->cpuid_vendor1;
1592
        *edx = env->cpuid_vendor2;
1593
        *ecx = env->cpuid_vendor3;
1594
        break;
1595
    case 0x80000001:
1596
        *eax = env->cpuid_features;
1597
        *ebx = 0;
1598
        *ecx = env->cpuid_ext3_features;
1599
        *edx = env->cpuid_ext2_features;
1600

    
1601
        if (kvm_enabled()) {
1602
            uint32_t h_eax, h_edx;
1603

    
1604
            host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1605

    
1606
            /* disable CPU features that the host does not support */
1607

    
1608
            /* long mode */
1609
            if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1610
                *edx &= ~0x20000000;
1611
            /* syscall */
1612
            if ((h_edx & 0x00000800) == 0)
1613
                *edx &= ~0x00000800;
1614
            /* nx */
1615
            if ((h_edx & 0x00100000) == 0)
1616
                *edx &= ~0x00100000;
1617

    
1618
            /* disable CPU features that KVM cannot support */
1619

    
1620
            /* svm */
1621
            *ecx &= ~4UL;
1622
            /* 3dnow */
1623
            *edx &= ~0xc0000000;
1624
        }
1625
        break;
1626
    case 0x80000002:
1627
    case 0x80000003:
1628
    case 0x80000004:
1629
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1630
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1631
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1632
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1633
        break;
1634
    case 0x80000005:
1635
        /* cache info (L1 cache) */
1636
        *eax = 0x01ff01ff;
1637
        *ebx = 0x01ff01ff;
1638
        *ecx = 0x40020140;
1639
        *edx = 0x40020140;
1640
        break;
1641
    case 0x80000006:
1642
        /* cache info (L2 cache) */
1643
        *eax = 0;
1644
        *ebx = 0x42004200;
1645
        *ecx = 0x02008140;
1646
        *edx = 0;
1647
        break;
1648
    case 0x80000008:
1649
        /* virtual & phys address size in low 2 bytes. */
1650
/* XXX: This value must match the one used in the MMU code. */ 
1651
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1652
            /* 64 bit processor */
1653
#if defined(CONFIG_KQEMU)
1654
            *eax = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1655
#else
1656
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1657
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1658
#endif
1659
        } else {
1660
#if defined(CONFIG_KQEMU)
1661
            *eax = 0x00000020;        /* 32 bits physical */
1662
#else
1663
            if (env->cpuid_features & CPUID_PSE36)
1664
                *eax = 0x00000024; /* 36 bits physical */
1665
            else
1666
                *eax = 0x00000020; /* 32 bits physical */
1667
#endif
1668
        }
1669
        *ebx = 0;
1670
        *ecx = 0;
1671
        *edx = 0;
1672
        break;
1673
    case 0x8000000A:
1674
        *eax = 0x00000001; /* SVM Revision */
1675
        *ebx = 0x00000010; /* nr of ASIDs */
1676
        *ecx = 0;
1677
        *edx = 0; /* optional features */
1678
        break;
1679
    default:
1680
        /* reserved values: zero */
1681
        *eax = 0;
1682
        *ebx = 0;
1683
        *ecx = 0;
1684
        *edx = 0;
1685
        break;
1686
    }
1687
}
1688

    
1689
CPUX86State *cpu_x86_init(const char *cpu_model)
1690
{
1691
    CPUX86State *env;
1692
    static int inited;
1693

    
1694
    env = qemu_mallocz(sizeof(CPUX86State));
1695
    cpu_exec_init(env);
1696
    env->cpu_model_str = cpu_model;
1697

    
1698
    /* init various static tables */
1699
    if (!inited) {
1700
        inited = 1;
1701
        optimize_flags_init();
1702
#ifndef CONFIG_USER_ONLY
1703
        prev_debug_excp_handler =
1704
            cpu_set_debug_excp_handler(breakpoint_handler);
1705
#endif
1706
    }
1707
    if (cpu_x86_register(env, cpu_model) < 0) {
1708
        cpu_x86_close(env);
1709
        return NULL;
1710
    }
1711
    cpu_reset(env);
1712
#ifdef CONFIG_KQEMU
1713
    kqemu_init(env);
1714
#endif
1715

    
1716
    qemu_init_vcpu(env);
1717

    
1718
    if (kvm_enabled()) {
1719
        kvm_trim_features(&env->cpuid_features,
1720
                          kvm_arch_get_supported_cpuid(env, 1, R_EDX),
1721
                          feature_name);
1722
        kvm_trim_features(&env->cpuid_ext_features,
1723
                          kvm_arch_get_supported_cpuid(env, 1, R_ECX),
1724
                          ext_feature_name);
1725
        kvm_trim_features(&env->cpuid_ext2_features,
1726
                          kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
1727
                          ext2_feature_name);
1728
        kvm_trim_features(&env->cpuid_ext3_features,
1729
                          kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
1730
                          ext3_feature_name);
1731
    }
1732

    
1733
    return env;
1734
}