Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ fe4bce09

History | View | Annotate | Download (58.9 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26

    
27
#include "cpu.h"
28
#include "exec-all.h"
29
#include "qemu-common.h"
30
#include "kvm.h"
31

    
32
//#define DEBUG_MMU
33

    
34
/* feature flags taken from "Intel Processor Identification and the CPUID
35
 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
36
 * about feature names, the Linux name is used. */
37
static const char *feature_name[] = {
38
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
39
    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
40
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
41
    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42
};
43
static const char *ext_feature_name[] = {
44
    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
45
    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
46
    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
47
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
48
};
49
static const char *ext2_feature_name[] = {
50
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
51
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
52
    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
53
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54
};
55
static const char *ext3_feature_name[] = {
56
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
57
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
58
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60
};
61

    
62
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
63
                                    uint32_t *ext_features,
64
                                    uint32_t *ext2_features,
65
                                    uint32_t *ext3_features)
66
{
67
    int i;
68
    int found = 0;
69

    
70
    for ( i = 0 ; i < 32 ; i++ )
71
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
72
            *features |= 1 << i;
73
            found = 1;
74
        }
75
    for ( i = 0 ; i < 32 ; i++ )
76
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
77
            *ext_features |= 1 << i;
78
            found = 1;
79
        }
80
    for ( i = 0 ; i < 32 ; i++ )
81
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
82
            *ext2_features |= 1 << i;
83
            found = 1;
84
        }
85
    for ( i = 0 ; i < 32 ; i++ )
86
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
87
            *ext3_features |= 1 << i;
88
            found = 1;
89
        }
90
    if (!found) {
91
        fprintf(stderr, "CPU feature %s not found\n", flagname);
92
    }
93
}
94

    
95
static void kvm_trim_features(uint32_t *features, uint32_t supported,
96
                              const char *names[])
97
{
98
    int i;
99
    uint32_t mask;
100

    
101
    for (i = 0; i < 32; ++i) {
102
        mask = 1U << i;
103
        if ((*features & mask) && !(supported & mask)) {
104
            *features &= ~mask;
105
        }
106
    }
107
}
108

    
109
typedef struct x86_def_t {
110
    const char *name;
111
    uint32_t level;
112
    uint32_t vendor1, vendor2, vendor3;
113
    int family;
114
    int model;
115
    int stepping;
116
    uint32_t features, ext_features, ext2_features, ext3_features;
117
    uint32_t xlevel;
118
    char model_id[48];
119
    int vendor_override;
120
} x86_def_t;
121

    
122
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
123
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
124
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
125
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
126
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
127
          CPUID_PSE36 | CPUID_FXSR)
128
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
129
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
130
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
131
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
132
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
133
static x86_def_t x86_defs[] = {
134
#ifdef TARGET_X86_64
135
    {
136
        .name = "qemu64",
137
        .level = 2,
138
        .vendor1 = CPUID_VENDOR_AMD_1,
139
        .vendor2 = CPUID_VENDOR_AMD_2,
140
        .vendor3 = CPUID_VENDOR_AMD_3,
141
        .family = 6,
142
        .model = 2,
143
        .stepping = 3,
144
        .features = PPRO_FEATURES | 
145
        /* these features are needed for Win64 and aren't fully implemented */
146
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
147
        /* this feature is needed for Solaris and isn't fully implemented */
148
            CPUID_PSE36,
149
        .ext_features = CPUID_EXT_SSE3,
150
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
151
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
152
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
153
        .ext3_features = CPUID_EXT3_SVM,
154
        .xlevel = 0x8000000A,
155
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
156
    },
157
    {
158
        .name = "phenom",
159
        .level = 5,
160
        .vendor1 = CPUID_VENDOR_AMD_1,
161
        .vendor2 = CPUID_VENDOR_AMD_2,
162
        .vendor3 = CPUID_VENDOR_AMD_3,
163
        .family = 16,
164
        .model = 2,
165
        .stepping = 3,
166
        /* Missing: CPUID_VME, CPUID_HT */
167
        .features = PPRO_FEATURES | 
168
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
169
            CPUID_PSE36,
170
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
171
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
172
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
173
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
174
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
175
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
176
            CPUID_EXT2_FFXSR,
177
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
178
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
179
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
180
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
181
        .ext3_features = CPUID_EXT3_SVM,
182
        .xlevel = 0x8000001A,
183
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
184
    },
185
    {
186
        .name = "core2duo",
187
        .level = 10,
188
        .family = 6,
189
        .model = 15,
190
        .stepping = 11,
191
        /* The original CPU also implements these features:
192
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
193
               CPUID_TM, CPUID_PBE */
194
        .features = PPRO_FEATURES |
195
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
196
            CPUID_PSE36,
197
        /* The original CPU also implements these ext features:
198
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
199
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
200
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
201
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
202
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
203
        .xlevel = 0x80000008,
204
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
205
    },
206
#endif
207
    {
208
        .name = "qemu32",
209
        .level = 2,
210
        .family = 6,
211
        .model = 3,
212
        .stepping = 3,
213
        .features = PPRO_FEATURES,
214
        .ext_features = CPUID_EXT_SSE3,
215
        .xlevel = 0,
216
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
217
    },
218
    {
219
        .name = "coreduo",
220
        .level = 10,
221
        .family = 6,
222
        .model = 14,
223
        .stepping = 8,
224
        /* The original CPU also implements these features:
225
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
226
               CPUID_TM, CPUID_PBE */
227
        .features = PPRO_FEATURES | CPUID_VME |
228
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
229
        /* The original CPU also implements these ext features:
230
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
231
               CPUID_EXT_PDCM */
232
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
233
        .ext2_features = CPUID_EXT2_NX,
234
        .xlevel = 0x80000008,
235
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
236
    },
237
    {
238
        .name = "486",
239
        .level = 0,
240
        .family = 4,
241
        .model = 0,
242
        .stepping = 0,
243
        .features = I486_FEATURES,
244
        .xlevel = 0,
245
    },
246
    {
247
        .name = "pentium",
248
        .level = 1,
249
        .family = 5,
250
        .model = 4,
251
        .stepping = 3,
252
        .features = PENTIUM_FEATURES,
253
        .xlevel = 0,
254
    },
255
    {
256
        .name = "pentium2",
257
        .level = 2,
258
        .family = 6,
259
        .model = 5,
260
        .stepping = 2,
261
        .features = PENTIUM2_FEATURES,
262
        .xlevel = 0,
263
    },
264
    {
265
        .name = "pentium3",
266
        .level = 2,
267
        .family = 6,
268
        .model = 7,
269
        .stepping = 3,
270
        .features = PENTIUM3_FEATURES,
271
        .xlevel = 0,
272
    },
273
    {
274
        .name = "athlon",
275
        .level = 2,
276
        .vendor1 = CPUID_VENDOR_AMD_1,
277
        .vendor2 = CPUID_VENDOR_AMD_2,
278
        .vendor3 = CPUID_VENDOR_AMD_3,
279
        .family = 6,
280
        .model = 2,
281
        .stepping = 3,
282
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
283
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
284
        .xlevel = 0x80000008,
285
        /* XXX: put another string ? */
286
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
287
    },
288
    {
289
        .name = "n270",
290
        /* original is on level 10 */
291
        .level = 5,
292
        .family = 6,
293
        .model = 28,
294
        .stepping = 2,
295
        .features = PPRO_FEATURES |
296
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
297
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
298
             * CPUID_HT | CPUID_TM | CPUID_PBE */
299
            /* Some CPUs got no CPUID_SEP */
300
        .ext_features = CPUID_EXT_MONITOR |
301
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
302
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
303
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
304
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
305
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
306
        .xlevel = 0x8000000A,
307
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
308
    },
309
};
310

    
311
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
312
                               uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
313

    
314
static int cpu_x86_fill_model_id(char *str)
315
{
316
    uint32_t eax, ebx, ecx, edx;
317
    int i;
318

    
319
    for (i = 0; i < 3; i++) {
320
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
321
        memcpy(str + i * 16 +  0, &eax, 4);
322
        memcpy(str + i * 16 +  4, &ebx, 4);
323
        memcpy(str + i * 16 +  8, &ecx, 4);
324
        memcpy(str + i * 16 + 12, &edx, 4);
325
    }
326
    return 0;
327
}
328

    
329
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
330
{
331
    uint32_t eax, ebx, ecx, edx;
332

    
333
    x86_cpu_def->name = "host";
334
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
335
    x86_cpu_def->level = eax;
336
    x86_cpu_def->vendor1 = ebx;
337
    x86_cpu_def->vendor2 = edx;
338
    x86_cpu_def->vendor3 = ecx;
339

    
340
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
341
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
342
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
343
    x86_cpu_def->stepping = eax & 0x0F;
344
    x86_cpu_def->ext_features = ecx;
345
    x86_cpu_def->features = edx;
346

    
347
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
348
    x86_cpu_def->xlevel = eax;
349

    
350
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
351
    x86_cpu_def->ext2_features = edx;
352
    x86_cpu_def->ext3_features = ecx;
353
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
354
    x86_cpu_def->vendor_override = 0;
355

    
356
    return 0;
357
}
358

    
359
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
360
{
361
    unsigned int i;
362
    x86_def_t *def;
363

    
364
    char *s = strdup(cpu_model);
365
    char *featurestr, *name = strtok(s, ",");
366
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
367
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
368
    int family = -1, model = -1, stepping = -1;
369

    
370
    def = NULL;
371
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
372
        if (strcmp(name, x86_defs[i].name) == 0) {
373
            def = &x86_defs[i];
374
            break;
375
        }
376
    }
377
    if (!def) {
378
        if (strcmp(name, "host") != 0) {
379
            goto error;
380
        }
381
        cpu_x86_fill_host(x86_cpu_def);
382
    } else {
383
        memcpy(x86_cpu_def, def, sizeof(*def));
384
    }
385

    
386
    add_flagname_to_bitmaps("hypervisor", &plus_features,
387
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
388

    
389
    featurestr = strtok(NULL, ",");
390

    
391
    while (featurestr) {
392
        char *val;
393
        if (featurestr[0] == '+') {
394
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
395
        } else if (featurestr[0] == '-') {
396
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
397
        } else if ((val = strchr(featurestr, '='))) {
398
            *val = 0; val++;
399
            if (!strcmp(featurestr, "family")) {
400
                char *err;
401
                family = strtol(val, &err, 10);
402
                if (!*val || *err || family < 0) {
403
                    fprintf(stderr, "bad numerical value %s\n", val);
404
                    goto error;
405
                }
406
                x86_cpu_def->family = family;
407
            } else if (!strcmp(featurestr, "model")) {
408
                char *err;
409
                model = strtol(val, &err, 10);
410
                if (!*val || *err || model < 0 || model > 0xff) {
411
                    fprintf(stderr, "bad numerical value %s\n", val);
412
                    goto error;
413
                }
414
                x86_cpu_def->model = model;
415
            } else if (!strcmp(featurestr, "stepping")) {
416
                char *err;
417
                stepping = strtol(val, &err, 10);
418
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
419
                    fprintf(stderr, "bad numerical value %s\n", val);
420
                    goto error;
421
                }
422
                x86_cpu_def->stepping = stepping;
423
            } else if (!strcmp(featurestr, "vendor")) {
424
                if (strlen(val) != 12) {
425
                    fprintf(stderr, "vendor string must be 12 chars long\n");
426
                    goto error;
427
                }
428
                x86_cpu_def->vendor1 = 0;
429
                x86_cpu_def->vendor2 = 0;
430
                x86_cpu_def->vendor3 = 0;
431
                for(i = 0; i < 4; i++) {
432
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
433
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
434
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
435
                }
436
                x86_cpu_def->vendor_override = 1;
437
            } else if (!strcmp(featurestr, "model_id")) {
438
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
439
                        val);
440
            } else {
441
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
442
                goto error;
443
            }
444
        } else {
445
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
446
            goto error;
447
        }
448
        featurestr = strtok(NULL, ",");
449
    }
450
    x86_cpu_def->features |= plus_features;
451
    x86_cpu_def->ext_features |= plus_ext_features;
452
    x86_cpu_def->ext2_features |= plus_ext2_features;
453
    x86_cpu_def->ext3_features |= plus_ext3_features;
454
    x86_cpu_def->features &= ~minus_features;
455
    x86_cpu_def->ext_features &= ~minus_ext_features;
456
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
457
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
458
    free(s);
459
    return 0;
460

    
461
error:
462
    free(s);
463
    return -1;
464
}
465

    
466
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
467
{
468
    unsigned int i;
469

    
470
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
471
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
472
}
473

    
474
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
475
{
476
    x86_def_t def1, *def = &def1;
477

    
478
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
479
        return -1;
480
    if (def->vendor1) {
481
        env->cpuid_vendor1 = def->vendor1;
482
        env->cpuid_vendor2 = def->vendor2;
483
        env->cpuid_vendor3 = def->vendor3;
484
    } else {
485
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
486
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
487
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
488
    }
489
    env->cpuid_vendor_override = def->vendor_override;
490
    env->cpuid_level = def->level;
491
    if (def->family > 0x0f)
492
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
493
    else
494
        env->cpuid_version = def->family << 8;
495
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
496
    env->cpuid_version |= def->stepping;
497
    env->cpuid_features = def->features;
498
    env->pat = 0x0007040600070406ULL;
499
    env->cpuid_ext_features = def->ext_features;
500
    env->cpuid_ext2_features = def->ext2_features;
501
    env->cpuid_xlevel = def->xlevel;
502
    env->cpuid_ext3_features = def->ext3_features;
503
    {
504
        const char *model_id = def->model_id;
505
        int c, len, i;
506
        if (!model_id)
507
            model_id = "";
508
        len = strlen(model_id);
509
        for(i = 0; i < 48; i++) {
510
            if (i >= len)
511
                c = '\0';
512
            else
513
                c = (uint8_t)model_id[i];
514
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
515
        }
516
    }
517
    return 0;
518
}
519

    
520
/* NOTE: must be called outside the CPU execute loop */
521
void cpu_reset(CPUX86State *env)
522
{
523
    int i;
524

    
525
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
526
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
527
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
528
    }
529

    
530
    memset(env, 0, offsetof(CPUX86State, breakpoints));
531

    
532
    tlb_flush(env, 1);
533

    
534
    env->old_exception = -1;
535

    
536
    /* init to reset state */
537

    
538
#ifdef CONFIG_SOFTMMU
539
    env->hflags |= HF_SOFTMMU_MASK;
540
#endif
541
    env->hflags2 |= HF2_GIF_MASK;
542

    
543
    cpu_x86_update_cr0(env, 0x60000010);
544
    env->a20_mask = ~0x0;
545
    env->smbase = 0x30000;
546

    
547
    env->idt.limit = 0xffff;
548
    env->gdt.limit = 0xffff;
549
    env->ldt.limit = 0xffff;
550
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
551
    env->tr.limit = 0xffff;
552
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
553

    
554
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
555
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
556
                           DESC_R_MASK | DESC_A_MASK);
557
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
558
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
559
                           DESC_A_MASK);
560
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
561
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
562
                           DESC_A_MASK);
563
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
564
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
565
                           DESC_A_MASK);
566
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
567
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
568
                           DESC_A_MASK);
569
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
570
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
571
                           DESC_A_MASK);
572

    
573
    env->eip = 0xfff0;
574
    env->regs[R_EDX] = env->cpuid_version;
575

    
576
    env->eflags = 0x2;
577

    
578
    /* FPU init */
579
    for(i = 0;i < 8; i++)
580
        env->fptags[i] = 1;
581
    env->fpuc = 0x37f;
582

    
583
    env->mxcsr = 0x1f80;
584

    
585
    memset(env->dr, 0, sizeof(env->dr));
586
    env->dr[6] = DR6_FIXED_1;
587
    env->dr[7] = DR7_FIXED_1;
588
    cpu_breakpoint_remove_all(env, BP_CPU);
589
    cpu_watchpoint_remove_all(env, BP_CPU);
590
}
591

    
592
void cpu_x86_close(CPUX86State *env)
593
{
594
    qemu_free(env);
595
}
596

    
597
/***********************************************************/
598
/* x86 debug */
599

    
600
static const char *cc_op_str[] = {
601
    "DYNAMIC",
602
    "EFLAGS",
603

    
604
    "MULB",
605
    "MULW",
606
    "MULL",
607
    "MULQ",
608

    
609
    "ADDB",
610
    "ADDW",
611
    "ADDL",
612
    "ADDQ",
613

    
614
    "ADCB",
615
    "ADCW",
616
    "ADCL",
617
    "ADCQ",
618

    
619
    "SUBB",
620
    "SUBW",
621
    "SUBL",
622
    "SUBQ",
623

    
624
    "SBBB",
625
    "SBBW",
626
    "SBBL",
627
    "SBBQ",
628

    
629
    "LOGICB",
630
    "LOGICW",
631
    "LOGICL",
632
    "LOGICQ",
633

    
634
    "INCB",
635
    "INCW",
636
    "INCL",
637
    "INCQ",
638

    
639
    "DECB",
640
    "DECW",
641
    "DECL",
642
    "DECQ",
643

    
644
    "SHLB",
645
    "SHLW",
646
    "SHLL",
647
    "SHLQ",
648

    
649
    "SARB",
650
    "SARW",
651
    "SARL",
652
    "SARQ",
653
};
654

    
655
static void
656
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
657
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
658
                       const char *name, struct SegmentCache *sc)
659
{
660
#ifdef TARGET_X86_64
661
    if (env->hflags & HF_CS64_MASK) {
662
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
663
                    sc->selector, sc->base, sc->limit, sc->flags);
664
    } else
665
#endif
666
    {
667
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
668
                    (uint32_t)sc->base, sc->limit, sc->flags);
669
    }
670

    
671
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
672
        goto done;
673

    
674
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
675
    if (sc->flags & DESC_S_MASK) {
676
        if (sc->flags & DESC_CS_MASK) {
677
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
678
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
679
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
680
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
681
        } else {
682
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
683
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
684
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
685
        }
686
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
687
    } else {
688
        static const char *sys_type_name[2][16] = {
689
            { /* 32 bit mode */
690
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
691
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
692
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
693
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
694
            },
695
            { /* 64 bit mode */
696
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
697
                "Reserved", "Reserved", "Reserved", "Reserved",
698
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
699
                "Reserved", "IntGate64", "TrapGate64"
700
            }
701
        };
702
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
703
                                    [(sc->flags & DESC_TYPE_MASK)
704
                                     >> DESC_TYPE_SHIFT]);
705
    }
706
done:
707
    cpu_fprintf(f, "\n");
708
}
709

    
710
void cpu_dump_state(CPUState *env, FILE *f,
711
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
712
                    int flags)
713
{
714
    int eflags, i, nb;
715
    char cc_op_name[32];
716
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
717

    
718
    if (kvm_enabled())
719
        kvm_arch_get_registers(env);
720

    
721
    eflags = env->eflags;
722
#ifdef TARGET_X86_64
723
    if (env->hflags & HF_CS64_MASK) {
724
        cpu_fprintf(f,
725
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
726
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
727
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
728
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
729
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
730
                    env->regs[R_EAX],
731
                    env->regs[R_EBX],
732
                    env->regs[R_ECX],
733
                    env->regs[R_EDX],
734
                    env->regs[R_ESI],
735
                    env->regs[R_EDI],
736
                    env->regs[R_EBP],
737
                    env->regs[R_ESP],
738
                    env->regs[8],
739
                    env->regs[9],
740
                    env->regs[10],
741
                    env->regs[11],
742
                    env->regs[12],
743
                    env->regs[13],
744
                    env->regs[14],
745
                    env->regs[15],
746
                    env->eip, eflags,
747
                    eflags & DF_MASK ? 'D' : '-',
748
                    eflags & CC_O ? 'O' : '-',
749
                    eflags & CC_S ? 'S' : '-',
750
                    eflags & CC_Z ? 'Z' : '-',
751
                    eflags & CC_A ? 'A' : '-',
752
                    eflags & CC_P ? 'P' : '-',
753
                    eflags & CC_C ? 'C' : '-',
754
                    env->hflags & HF_CPL_MASK,
755
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
756
                    (int)(env->a20_mask >> 20) & 1,
757
                    (env->hflags >> HF_SMM_SHIFT) & 1,
758
                    env->halted);
759
    } else
760
#endif
761
    {
762
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
763
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
764
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
765
                    (uint32_t)env->regs[R_EAX],
766
                    (uint32_t)env->regs[R_EBX],
767
                    (uint32_t)env->regs[R_ECX],
768
                    (uint32_t)env->regs[R_EDX],
769
                    (uint32_t)env->regs[R_ESI],
770
                    (uint32_t)env->regs[R_EDI],
771
                    (uint32_t)env->regs[R_EBP],
772
                    (uint32_t)env->regs[R_ESP],
773
                    (uint32_t)env->eip, eflags,
774
                    eflags & DF_MASK ? 'D' : '-',
775
                    eflags & CC_O ? 'O' : '-',
776
                    eflags & CC_S ? 'S' : '-',
777
                    eflags & CC_Z ? 'Z' : '-',
778
                    eflags & CC_A ? 'A' : '-',
779
                    eflags & CC_P ? 'P' : '-',
780
                    eflags & CC_C ? 'C' : '-',
781
                    env->hflags & HF_CPL_MASK,
782
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
783
                    (int)(env->a20_mask >> 20) & 1,
784
                    (env->hflags >> HF_SMM_SHIFT) & 1,
785
                    env->halted);
786
    }
787

    
788
    for(i = 0; i < 6; i++) {
789
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
790
                               &env->segs[i]);
791
    }
792
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
793
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
794

    
795
#ifdef TARGET_X86_64
796
    if (env->hflags & HF_LMA_MASK) {
797
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
798
                    env->gdt.base, env->gdt.limit);
799
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
800
                    env->idt.base, env->idt.limit);
801
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
802
                    (uint32_t)env->cr[0],
803
                    env->cr[2],
804
                    env->cr[3],
805
                    (uint32_t)env->cr[4]);
806
        for(i = 0; i < 4; i++)
807
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
808
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
809
                    env->dr[6], env->dr[7]);
810
    } else
811
#endif
812
    {
813
        cpu_fprintf(f, "GDT=     %08x %08x\n",
814
                    (uint32_t)env->gdt.base, env->gdt.limit);
815
        cpu_fprintf(f, "IDT=     %08x %08x\n",
816
                    (uint32_t)env->idt.base, env->idt.limit);
817
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
818
                    (uint32_t)env->cr[0],
819
                    (uint32_t)env->cr[2],
820
                    (uint32_t)env->cr[3],
821
                    (uint32_t)env->cr[4]);
822
        for(i = 0; i < 4; i++)
823
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
824
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
825
    }
826
    if (flags & X86_DUMP_CCOP) {
827
        if ((unsigned)env->cc_op < CC_OP_NB)
828
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
829
        else
830
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
831
#ifdef TARGET_X86_64
832
        if (env->hflags & HF_CS64_MASK) {
833
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
834
                        env->cc_src, env->cc_dst,
835
                        cc_op_name);
836
        } else
837
#endif
838
        {
839
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
840
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
841
                        cc_op_name);
842
        }
843
    }
844
    if (flags & X86_DUMP_FPU) {
845
        int fptag;
846
        fptag = 0;
847
        for(i = 0; i < 8; i++) {
848
            fptag |= ((!env->fptags[i]) << i);
849
        }
850
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
851
                    env->fpuc,
852
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
853
                    env->fpstt,
854
                    fptag,
855
                    env->mxcsr);
856
        for(i=0;i<8;i++) {
857
#if defined(USE_X86LDOUBLE)
858
            union {
859
                long double d;
860
                struct {
861
                    uint64_t lower;
862
                    uint16_t upper;
863
                } l;
864
            } tmp;
865
            tmp.d = env->fpregs[i].d;
866
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
867
                        i, tmp.l.lower, tmp.l.upper);
868
#else
869
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
870
                        i, env->fpregs[i].mmx.q);
871
#endif
872
            if ((i & 1) == 1)
873
                cpu_fprintf(f, "\n");
874
            else
875
                cpu_fprintf(f, " ");
876
        }
877
        if (env->hflags & HF_CS64_MASK)
878
            nb = 16;
879
        else
880
            nb = 8;
881
        for(i=0;i<nb;i++) {
882
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
883
                        i,
884
                        env->xmm_regs[i].XMM_L(3),
885
                        env->xmm_regs[i].XMM_L(2),
886
                        env->xmm_regs[i].XMM_L(1),
887
                        env->xmm_regs[i].XMM_L(0));
888
            if ((i & 1) == 1)
889
                cpu_fprintf(f, "\n");
890
            else
891
                cpu_fprintf(f, " ");
892
        }
893
    }
894
}
895

    
896
/***********************************************************/
897
/* x86 mmu */
898
/* XXX: add PGE support */
899

    
900
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
901
{
902
    a20_state = (a20_state != 0);
903
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
904
#if defined(DEBUG_MMU)
905
        printf("A20 update: a20=%d\n", a20_state);
906
#endif
907
        /* if the cpu is currently executing code, we must unlink it and
908
           all the potentially executing TB */
909
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
910

    
911
        /* when a20 is changed, all the MMU mappings are invalid, so
912
           we must flush everything */
913
        tlb_flush(env, 1);
914
        env->a20_mask = (~0x100000) | (a20_state << 20);
915
    }
916
}
917

    
918
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
919
{
920
    int pe_state;
921

    
922
#if defined(DEBUG_MMU)
923
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
924
#endif
925
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
926
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
927
        tlb_flush(env, 1);
928
    }
929

    
930
#ifdef TARGET_X86_64
931
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
932
        (env->efer & MSR_EFER_LME)) {
933
        /* enter in long mode */
934
        /* XXX: generate an exception */
935
        if (!(env->cr[4] & CR4_PAE_MASK))
936
            return;
937
        env->efer |= MSR_EFER_LMA;
938
        env->hflags |= HF_LMA_MASK;
939
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
940
               (env->efer & MSR_EFER_LMA)) {
941
        /* exit long mode */
942
        env->efer &= ~MSR_EFER_LMA;
943
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
944
        env->eip &= 0xffffffff;
945
    }
946
#endif
947
    env->cr[0] = new_cr0 | CR0_ET_MASK;
948

    
949
    /* update PE flag in hidden flags */
950
    pe_state = (env->cr[0] & CR0_PE_MASK);
951
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
952
    /* ensure that ADDSEG is always set in real mode */
953
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
954
    /* update FPU flags */
955
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
956
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
957
}
958

    
959
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
960
   the PDPT */
961
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
962
{
963
    env->cr[3] = new_cr3;
964
    if (env->cr[0] & CR0_PG_MASK) {
965
#if defined(DEBUG_MMU)
966
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
967
#endif
968
        tlb_flush(env, 0);
969
    }
970
}
971

    
972
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
973
{
974
#if defined(DEBUG_MMU)
975
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
976
#endif
977
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
978
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
979
        tlb_flush(env, 1);
980
    }
981
    /* SSE handling */
982
    if (!(env->cpuid_features & CPUID_SSE))
983
        new_cr4 &= ~CR4_OSFXSR_MASK;
984
    if (new_cr4 & CR4_OSFXSR_MASK)
985
        env->hflags |= HF_OSFXSR_MASK;
986
    else
987
        env->hflags &= ~HF_OSFXSR_MASK;
988

    
989
    env->cr[4] = new_cr4;
990
}
991

    
992
#if defined(CONFIG_USER_ONLY)
993

    
994
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
995
                             int is_write, int mmu_idx, int is_softmmu)
996
{
997
    /* user mode only emulation */
998
    is_write &= 1;
999
    env->cr[2] = addr;
1000
    env->error_code = (is_write << PG_ERROR_W_BIT);
1001
    env->error_code |= PG_ERROR_U_MASK;
1002
    env->exception_index = EXCP0E_PAGE;
1003
    return 1;
1004
}
1005

    
1006
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1007
{
1008
    return addr;
1009
}
1010

    
1011
#else
1012

    
1013
/* XXX: This value should match the one returned by CPUID
1014
 * and in exec.c */
1015
#if defined(CONFIG_KQEMU)
1016
#define PHYS_ADDR_MASK 0xfffff000LL
1017
#else
1018
# if defined(TARGET_X86_64)
1019
# define PHYS_ADDR_MASK 0xfffffff000LL
1020
# else
1021
# define PHYS_ADDR_MASK 0xffffff000LL
1022
# endif
1023
#endif
1024

    
1025
/* return value:
1026
   -1 = cannot handle fault
1027
   0  = nothing more to do
1028
   1  = generate PF fault
1029
   2  = soft MMU activation required for this block
1030
*/
1031
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1032
                             int is_write1, int mmu_idx, int is_softmmu)
1033
{
1034
    uint64_t ptep, pte;
1035
    target_ulong pde_addr, pte_addr;
1036
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1037
    target_phys_addr_t paddr;
1038
    uint32_t page_offset;
1039
    target_ulong vaddr, virt_addr;
1040

    
1041
    is_user = mmu_idx == MMU_USER_IDX;
1042
#if defined(DEBUG_MMU)
1043
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1044
           addr, is_write1, is_user, env->eip);
1045
#endif
1046
    is_write = is_write1 & 1;
1047

    
1048
    if (!(env->cr[0] & CR0_PG_MASK)) {
1049
        pte = addr;
1050
        virt_addr = addr & TARGET_PAGE_MASK;
1051
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1052
        page_size = 4096;
1053
        goto do_mapping;
1054
    }
1055

    
1056
    if (env->cr[4] & CR4_PAE_MASK) {
1057
        uint64_t pde, pdpe;
1058
        target_ulong pdpe_addr;
1059

    
1060
#ifdef TARGET_X86_64
1061
        if (env->hflags & HF_LMA_MASK) {
1062
            uint64_t pml4e_addr, pml4e;
1063
            int32_t sext;
1064

    
1065
            /* test virtual address sign extension */
1066
            sext = (int64_t)addr >> 47;
1067
            if (sext != 0 && sext != -1) {
1068
                env->error_code = 0;
1069
                env->exception_index = EXCP0D_GPF;
1070
                return 1;
1071
            }
1072

    
1073
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1074
                env->a20_mask;
1075
            pml4e = ldq_phys(pml4e_addr);
1076
            if (!(pml4e & PG_PRESENT_MASK)) {
1077
                error_code = 0;
1078
                goto do_fault;
1079
            }
1080
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1081
                error_code = PG_ERROR_RSVD_MASK;
1082
                goto do_fault;
1083
            }
1084
            if (!(pml4e & PG_ACCESSED_MASK)) {
1085
                pml4e |= PG_ACCESSED_MASK;
1086
                stl_phys_notdirty(pml4e_addr, pml4e);
1087
            }
1088
            ptep = pml4e ^ PG_NX_MASK;
1089
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1090
                env->a20_mask;
1091
            pdpe = ldq_phys(pdpe_addr);
1092
            if (!(pdpe & PG_PRESENT_MASK)) {
1093
                error_code = 0;
1094
                goto do_fault;
1095
            }
1096
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1097
                error_code = PG_ERROR_RSVD_MASK;
1098
                goto do_fault;
1099
            }
1100
            ptep &= pdpe ^ PG_NX_MASK;
1101
            if (!(pdpe & PG_ACCESSED_MASK)) {
1102
                pdpe |= PG_ACCESSED_MASK;
1103
                stl_phys_notdirty(pdpe_addr, pdpe);
1104
            }
1105
        } else
1106
#endif
1107
        {
1108
            /* XXX: load them when cr3 is loaded ? */
1109
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1110
                env->a20_mask;
1111
            pdpe = ldq_phys(pdpe_addr);
1112
            if (!(pdpe & PG_PRESENT_MASK)) {
1113
                error_code = 0;
1114
                goto do_fault;
1115
            }
1116
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1117
        }
1118

    
1119
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1120
            env->a20_mask;
1121
        pde = ldq_phys(pde_addr);
1122
        if (!(pde & PG_PRESENT_MASK)) {
1123
            error_code = 0;
1124
            goto do_fault;
1125
        }
1126
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1127
            error_code = PG_ERROR_RSVD_MASK;
1128
            goto do_fault;
1129
        }
1130
        ptep &= pde ^ PG_NX_MASK;
1131
        if (pde & PG_PSE_MASK) {
1132
            /* 2 MB page */
1133
            page_size = 2048 * 1024;
1134
            ptep ^= PG_NX_MASK;
1135
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1136
                goto do_fault_protect;
1137
            if (is_user) {
1138
                if (!(ptep & PG_USER_MASK))
1139
                    goto do_fault_protect;
1140
                if (is_write && !(ptep & PG_RW_MASK))
1141
                    goto do_fault_protect;
1142
            } else {
1143
                if ((env->cr[0] & CR0_WP_MASK) &&
1144
                    is_write && !(ptep & PG_RW_MASK))
1145
                    goto do_fault_protect;
1146
            }
1147
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1148
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1149
                pde |= PG_ACCESSED_MASK;
1150
                if (is_dirty)
1151
                    pde |= PG_DIRTY_MASK;
1152
                stl_phys_notdirty(pde_addr, pde);
1153
            }
1154
            /* align to page_size */
1155
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1156
            virt_addr = addr & ~(page_size - 1);
1157
        } else {
1158
            /* 4 KB page */
1159
            if (!(pde & PG_ACCESSED_MASK)) {
1160
                pde |= PG_ACCESSED_MASK;
1161
                stl_phys_notdirty(pde_addr, pde);
1162
            }
1163
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1164
                env->a20_mask;
1165
            pte = ldq_phys(pte_addr);
1166
            if (!(pte & PG_PRESENT_MASK)) {
1167
                error_code = 0;
1168
                goto do_fault;
1169
            }
1170
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1171
                error_code = PG_ERROR_RSVD_MASK;
1172
                goto do_fault;
1173
            }
1174
            /* combine pde and pte nx, user and rw protections */
1175
            ptep &= pte ^ PG_NX_MASK;
1176
            ptep ^= PG_NX_MASK;
1177
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1178
                goto do_fault_protect;
1179
            if (is_user) {
1180
                if (!(ptep & PG_USER_MASK))
1181
                    goto do_fault_protect;
1182
                if (is_write && !(ptep & PG_RW_MASK))
1183
                    goto do_fault_protect;
1184
            } else {
1185
                if ((env->cr[0] & CR0_WP_MASK) &&
1186
                    is_write && !(ptep & PG_RW_MASK))
1187
                    goto do_fault_protect;
1188
            }
1189
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1190
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1191
                pte |= PG_ACCESSED_MASK;
1192
                if (is_dirty)
1193
                    pte |= PG_DIRTY_MASK;
1194
                stl_phys_notdirty(pte_addr, pte);
1195
            }
1196
            page_size = 4096;
1197
            virt_addr = addr & ~0xfff;
1198
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1199
        }
1200
    } else {
1201
        uint32_t pde;
1202

    
1203
        /* page directory entry */
1204
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1205
            env->a20_mask;
1206
        pde = ldl_phys(pde_addr);
1207
        if (!(pde & PG_PRESENT_MASK)) {
1208
            error_code = 0;
1209
            goto do_fault;
1210
        }
1211
        /* if PSE bit is set, then we use a 4MB page */
1212
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1213
            page_size = 4096 * 1024;
1214
            if (is_user) {
1215
                if (!(pde & PG_USER_MASK))
1216
                    goto do_fault_protect;
1217
                if (is_write && !(pde & PG_RW_MASK))
1218
                    goto do_fault_protect;
1219
            } else {
1220
                if ((env->cr[0] & CR0_WP_MASK) &&
1221
                    is_write && !(pde & PG_RW_MASK))
1222
                    goto do_fault_protect;
1223
            }
1224
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1225
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1226
                pde |= PG_ACCESSED_MASK;
1227
                if (is_dirty)
1228
                    pde |= PG_DIRTY_MASK;
1229
                stl_phys_notdirty(pde_addr, pde);
1230
            }
1231

    
1232
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1233
            ptep = pte;
1234
            virt_addr = addr & ~(page_size - 1);
1235
        } else {
1236
            if (!(pde & PG_ACCESSED_MASK)) {
1237
                pde |= PG_ACCESSED_MASK;
1238
                stl_phys_notdirty(pde_addr, pde);
1239
            }
1240

    
1241
            /* page directory entry */
1242
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1243
                env->a20_mask;
1244
            pte = ldl_phys(pte_addr);
1245
            if (!(pte & PG_PRESENT_MASK)) {
1246
                error_code = 0;
1247
                goto do_fault;
1248
            }
1249
            /* combine pde and pte user and rw protections */
1250
            ptep = pte & pde;
1251
            if (is_user) {
1252
                if (!(ptep & PG_USER_MASK))
1253
                    goto do_fault_protect;
1254
                if (is_write && !(ptep & PG_RW_MASK))
1255
                    goto do_fault_protect;
1256
            } else {
1257
                if ((env->cr[0] & CR0_WP_MASK) &&
1258
                    is_write && !(ptep & PG_RW_MASK))
1259
                    goto do_fault_protect;
1260
            }
1261
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1262
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1263
                pte |= PG_ACCESSED_MASK;
1264
                if (is_dirty)
1265
                    pte |= PG_DIRTY_MASK;
1266
                stl_phys_notdirty(pte_addr, pte);
1267
            }
1268
            page_size = 4096;
1269
            virt_addr = addr & ~0xfff;
1270
        }
1271
    }
1272
    /* the page can be put in the TLB */
1273
    prot = PAGE_READ;
1274
    if (!(ptep & PG_NX_MASK))
1275
        prot |= PAGE_EXEC;
1276
    if (pte & PG_DIRTY_MASK) {
1277
        /* only set write access if already dirty... otherwise wait
1278
           for dirty access */
1279
        if (is_user) {
1280
            if (ptep & PG_RW_MASK)
1281
                prot |= PAGE_WRITE;
1282
        } else {
1283
            if (!(env->cr[0] & CR0_WP_MASK) ||
1284
                (ptep & PG_RW_MASK))
1285
                prot |= PAGE_WRITE;
1286
        }
1287
    }
1288
 do_mapping:
1289
    pte = pte & env->a20_mask;
1290

    
1291
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1292
       avoid filling it too fast */
1293
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1294
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1295
    vaddr = virt_addr + page_offset;
1296

    
1297
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1298
    return ret;
1299
 do_fault_protect:
1300
    error_code = PG_ERROR_P_MASK;
1301
 do_fault:
1302
    error_code |= (is_write << PG_ERROR_W_BIT);
1303
    if (is_user)
1304
        error_code |= PG_ERROR_U_MASK;
1305
    if (is_write1 == 2 &&
1306
        (env->efer & MSR_EFER_NXE) &&
1307
        (env->cr[4] & CR4_PAE_MASK))
1308
        error_code |= PG_ERROR_I_D_MASK;
1309
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1310
        /* cr2 is not modified in case of exceptions */
1311
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1312
                 addr);
1313
    } else {
1314
        env->cr[2] = addr;
1315
    }
1316
    env->error_code = error_code;
1317
    env->exception_index = EXCP0E_PAGE;
1318
    return 1;
1319
}
1320

    
1321
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1322
{
1323
    target_ulong pde_addr, pte_addr;
1324
    uint64_t pte;
1325
    target_phys_addr_t paddr;
1326
    uint32_t page_offset;
1327
    int page_size;
1328

    
1329
    if (env->cr[4] & CR4_PAE_MASK) {
1330
        target_ulong pdpe_addr;
1331
        uint64_t pde, pdpe;
1332

    
1333
#ifdef TARGET_X86_64
1334
        if (env->hflags & HF_LMA_MASK) {
1335
            uint64_t pml4e_addr, pml4e;
1336
            int32_t sext;
1337

    
1338
            /* test virtual address sign extension */
1339
            sext = (int64_t)addr >> 47;
1340
            if (sext != 0 && sext != -1)
1341
                return -1;
1342

    
1343
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1344
                env->a20_mask;
1345
            pml4e = ldq_phys(pml4e_addr);
1346
            if (!(pml4e & PG_PRESENT_MASK))
1347
                return -1;
1348

    
1349
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1350
                env->a20_mask;
1351
            pdpe = ldq_phys(pdpe_addr);
1352
            if (!(pdpe & PG_PRESENT_MASK))
1353
                return -1;
1354
        } else
1355
#endif
1356
        {
1357
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1358
                env->a20_mask;
1359
            pdpe = ldq_phys(pdpe_addr);
1360
            if (!(pdpe & PG_PRESENT_MASK))
1361
                return -1;
1362
        }
1363

    
1364
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1365
            env->a20_mask;
1366
        pde = ldq_phys(pde_addr);
1367
        if (!(pde & PG_PRESENT_MASK)) {
1368
            return -1;
1369
        }
1370
        if (pde & PG_PSE_MASK) {
1371
            /* 2 MB page */
1372
            page_size = 2048 * 1024;
1373
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1374
        } else {
1375
            /* 4 KB page */
1376
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1377
                env->a20_mask;
1378
            page_size = 4096;
1379
            pte = ldq_phys(pte_addr);
1380
        }
1381
        if (!(pte & PG_PRESENT_MASK))
1382
            return -1;
1383
    } else {
1384
        uint32_t pde;
1385

    
1386
        if (!(env->cr[0] & CR0_PG_MASK)) {
1387
            pte = addr;
1388
            page_size = 4096;
1389
        } else {
1390
            /* page directory entry */
1391
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1392
            pde = ldl_phys(pde_addr);
1393
            if (!(pde & PG_PRESENT_MASK))
1394
                return -1;
1395
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1396
                pte = pde & ~0x003ff000; /* align to 4MB */
1397
                page_size = 4096 * 1024;
1398
            } else {
1399
                /* page directory entry */
1400
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1401
                pte = ldl_phys(pte_addr);
1402
                if (!(pte & PG_PRESENT_MASK))
1403
                    return -1;
1404
                page_size = 4096;
1405
            }
1406
        }
1407
        pte = pte & env->a20_mask;
1408
    }
1409

    
1410
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1411
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1412
    return paddr;
1413
}
1414

    
1415
void hw_breakpoint_insert(CPUState *env, int index)
1416
{
1417
    int type, err = 0;
1418

    
1419
    switch (hw_breakpoint_type(env->dr[7], index)) {
1420
    case 0:
1421
        if (hw_breakpoint_enabled(env->dr[7], index))
1422
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1423
                                        &env->cpu_breakpoint[index]);
1424
        break;
1425
    case 1:
1426
        type = BP_CPU | BP_MEM_WRITE;
1427
        goto insert_wp;
1428
    case 2:
1429
         /* No support for I/O watchpoints yet */
1430
        break;
1431
    case 3:
1432
        type = BP_CPU | BP_MEM_ACCESS;
1433
    insert_wp:
1434
        err = cpu_watchpoint_insert(env, env->dr[index],
1435
                                    hw_breakpoint_len(env->dr[7], index),
1436
                                    type, &env->cpu_watchpoint[index]);
1437
        break;
1438
    }
1439
    if (err)
1440
        env->cpu_breakpoint[index] = NULL;
1441
}
1442

    
1443
void hw_breakpoint_remove(CPUState *env, int index)
1444
{
1445
    if (!env->cpu_breakpoint[index])
1446
        return;
1447
    switch (hw_breakpoint_type(env->dr[7], index)) {
1448
    case 0:
1449
        if (hw_breakpoint_enabled(env->dr[7], index))
1450
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1451
        break;
1452
    case 1:
1453
    case 3:
1454
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1455
        break;
1456
    case 2:
1457
        /* No support for I/O watchpoints yet */
1458
        break;
1459
    }
1460
}
1461

    
1462
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1463
{
1464
    target_ulong dr6;
1465
    int reg, type;
1466
    int hit_enabled = 0;
1467

    
1468
    dr6 = env->dr[6] & ~0xf;
1469
    for (reg = 0; reg < 4; reg++) {
1470
        type = hw_breakpoint_type(env->dr[7], reg);
1471
        if ((type == 0 && env->dr[reg] == env->eip) ||
1472
            ((type & 1) && env->cpu_watchpoint[reg] &&
1473
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1474
            dr6 |= 1 << reg;
1475
            if (hw_breakpoint_enabled(env->dr[7], reg))
1476
                hit_enabled = 1;
1477
        }
1478
    }
1479
    if (hit_enabled || force_dr6_update)
1480
        env->dr[6] = dr6;
1481
    return hit_enabled;
1482
}
1483

    
1484
static CPUDebugExcpHandler *prev_debug_excp_handler;
1485

    
1486
void raise_exception(int exception_index);
1487

    
1488
static void breakpoint_handler(CPUState *env)
1489
{
1490
    CPUBreakpoint *bp;
1491

    
1492
    if (env->watchpoint_hit) {
1493
        if (env->watchpoint_hit->flags & BP_CPU) {
1494
            env->watchpoint_hit = NULL;
1495
            if (check_hw_breakpoints(env, 0))
1496
                raise_exception(EXCP01_DB);
1497
            else
1498
                cpu_resume_from_signal(env, NULL);
1499
        }
1500
    } else {
1501
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1502
            if (bp->pc == env->eip) {
1503
                if (bp->flags & BP_CPU) {
1504
                    check_hw_breakpoints(env, 1);
1505
                    raise_exception(EXCP01_DB);
1506
                }
1507
                break;
1508
            }
1509
    }
1510
    if (prev_debug_excp_handler)
1511
        prev_debug_excp_handler(env);
1512
}
1513
#endif /* !CONFIG_USER_ONLY */
1514

    
1515
static void host_cpuid(uint32_t function, uint32_t count,
1516
                       uint32_t *eax, uint32_t *ebx,
1517
                       uint32_t *ecx, uint32_t *edx)
1518
{
1519
#if defined(CONFIG_KVM)
1520
    uint32_t vec[4];
1521

    
1522
#ifdef __x86_64__
1523
    asm volatile("cpuid"
1524
                 : "=a"(vec[0]), "=b"(vec[1]),
1525
                   "=c"(vec[2]), "=d"(vec[3])
1526
                 : "0"(function), "c"(count) : "cc");
1527
#else
1528
    asm volatile("pusha \n\t"
1529
                 "cpuid \n\t"
1530
                 "mov %%eax, 0(%2) \n\t"
1531
                 "mov %%ebx, 4(%2) \n\t"
1532
                 "mov %%ecx, 8(%2) \n\t"
1533
                 "mov %%edx, 12(%2) \n\t"
1534
                 "popa"
1535
                 : : "a"(function), "c"(count), "S"(vec)
1536
                 : "memory", "cc");
1537
#endif
1538

    
1539
    if (eax)
1540
        *eax = vec[0];
1541
    if (ebx)
1542
        *ebx = vec[1];
1543
    if (ecx)
1544
        *ecx = vec[2];
1545
    if (edx)
1546
        *edx = vec[3];
1547
#endif
1548
}
1549

    
1550
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1551
                   uint32_t *eax, uint32_t *ebx,
1552
                   uint32_t *ecx, uint32_t *edx)
1553
{
1554
    /* test if maximum index reached */
1555
    if (index & 0x80000000) {
1556
        if (index > env->cpuid_xlevel)
1557
            index = env->cpuid_level;
1558
    } else {
1559
        if (index > env->cpuid_level)
1560
            index = env->cpuid_level;
1561
    }
1562

    
1563
    switch(index) {
1564
    case 0:
1565
        *eax = env->cpuid_level;
1566
        *ebx = env->cpuid_vendor1;
1567
        *edx = env->cpuid_vendor2;
1568
        *ecx = env->cpuid_vendor3;
1569

    
1570
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1571
         * isn't supported in compatibility mode on Intel.  so advertise the
1572
         * actuall cpu, and say goodbye to migration between different vendors
1573
         * is you use compatibility mode. */
1574
        if (kvm_enabled() && !env->cpuid_vendor_override)
1575
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1576
        break;
1577
    case 1:
1578
        *eax = env->cpuid_version;
1579
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1580
        *ecx = env->cpuid_ext_features;
1581
        *edx = env->cpuid_features;
1582
        break;
1583
    case 2:
1584
        /* cache info: needed for Pentium Pro compatibility */
1585
        *eax = 1;
1586
        *ebx = 0;
1587
        *ecx = 0;
1588
        *edx = 0x2c307d;
1589
        break;
1590
    case 4:
1591
        /* cache info: needed for Core compatibility */
1592
        switch (count) {
1593
            case 0: /* L1 dcache info */
1594
                *eax = 0x0000121;
1595
                *ebx = 0x1c0003f;
1596
                *ecx = 0x000003f;
1597
                *edx = 0x0000001;
1598
                break;
1599
            case 1: /* L1 icache info */
1600
                *eax = 0x0000122;
1601
                *ebx = 0x1c0003f;
1602
                *ecx = 0x000003f;
1603
                *edx = 0x0000001;
1604
                break;
1605
            case 2: /* L2 cache info */
1606
                *eax = 0x0000143;
1607
                *ebx = 0x3c0003f;
1608
                *ecx = 0x0000fff;
1609
                *edx = 0x0000001;
1610
                break;
1611
            default: /* end of info */
1612
                *eax = 0;
1613
                *ebx = 0;
1614
                *ecx = 0;
1615
                *edx = 0;
1616
                break;
1617
        }
1618
        break;
1619
    case 5:
1620
        /* mwait info: needed for Core compatibility */
1621
        *eax = 0; /* Smallest monitor-line size in bytes */
1622
        *ebx = 0; /* Largest monitor-line size in bytes */
1623
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1624
        *edx = 0;
1625
        break;
1626
    case 6:
1627
        /* Thermal and Power Leaf */
1628
        *eax = 0;
1629
        *ebx = 0;
1630
        *ecx = 0;
1631
        *edx = 0;
1632
        break;
1633
    case 9:
1634
        /* Direct Cache Access Information Leaf */
1635
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1636
        *ebx = 0;
1637
        *ecx = 0;
1638
        *edx = 0;
1639
        break;
1640
    case 0xA:
1641
        /* Architectural Performance Monitoring Leaf */
1642
        *eax = 0;
1643
        *ebx = 0;
1644
        *ecx = 0;
1645
        *edx = 0;
1646
        break;
1647
    case 0x80000000:
1648
        *eax = env->cpuid_xlevel;
1649
        *ebx = env->cpuid_vendor1;
1650
        *edx = env->cpuid_vendor2;
1651
        *ecx = env->cpuid_vendor3;
1652
        break;
1653
    case 0x80000001:
1654
        *eax = env->cpuid_features;
1655
        *ebx = 0;
1656
        *ecx = env->cpuid_ext3_features;
1657
        *edx = env->cpuid_ext2_features;
1658

    
1659
        if (kvm_enabled()) {
1660
            uint32_t h_eax, h_edx;
1661

    
1662
            host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1663

    
1664
            /* disable CPU features that the host does not support */
1665

    
1666
            /* long mode */
1667
            if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1668
                *edx &= ~0x20000000;
1669
            /* syscall */
1670
            if ((h_edx & 0x00000800) == 0)
1671
                *edx &= ~0x00000800;
1672
            /* nx */
1673
            if ((h_edx & 0x00100000) == 0)
1674
                *edx &= ~0x00100000;
1675

    
1676
            /* disable CPU features that KVM cannot support */
1677

    
1678
            /* svm */
1679
            *ecx &= ~4UL;
1680
            /* 3dnow */
1681
            *edx &= ~0xc0000000;
1682
        }
1683
        break;
1684
    case 0x80000002:
1685
    case 0x80000003:
1686
    case 0x80000004:
1687
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1688
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1689
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1690
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1691
        break;
1692
    case 0x80000005:
1693
        /* cache info (L1 cache) */
1694
        *eax = 0x01ff01ff;
1695
        *ebx = 0x01ff01ff;
1696
        *ecx = 0x40020140;
1697
        *edx = 0x40020140;
1698
        break;
1699
    case 0x80000006:
1700
        /* cache info (L2 cache) */
1701
        *eax = 0;
1702
        *ebx = 0x42004200;
1703
        *ecx = 0x02008140;
1704
        *edx = 0;
1705
        break;
1706
    case 0x80000008:
1707
        /* virtual & phys address size in low 2 bytes. */
1708
/* XXX: This value must match the one used in the MMU code. */ 
1709
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1710
            /* 64 bit processor */
1711
#if defined(CONFIG_KQEMU)
1712
            *eax = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1713
#else
1714
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1715
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1716
#endif
1717
        } else {
1718
#if defined(CONFIG_KQEMU)
1719
            *eax = 0x00000020;        /* 32 bits physical */
1720
#else
1721
            if (env->cpuid_features & CPUID_PSE36)
1722
                *eax = 0x00000024; /* 36 bits physical */
1723
            else
1724
                *eax = 0x00000020; /* 32 bits physical */
1725
#endif
1726
        }
1727
        *ebx = 0;
1728
        *ecx = 0;
1729
        *edx = 0;
1730
        break;
1731
    case 0x8000000A:
1732
        *eax = 0x00000001; /* SVM Revision */
1733
        *ebx = 0x00000010; /* nr of ASIDs */
1734
        *ecx = 0;
1735
        *edx = 0; /* optional features */
1736
        break;
1737
    default:
1738
        /* reserved values: zero */
1739
        *eax = 0;
1740
        *ebx = 0;
1741
        *ecx = 0;
1742
        *edx = 0;
1743
        break;
1744
    }
1745
}
1746

    
1747
CPUX86State *cpu_x86_init(const char *cpu_model)
1748
{
1749
    CPUX86State *env;
1750
    static int inited;
1751

    
1752
    env = qemu_mallocz(sizeof(CPUX86State));
1753
    cpu_exec_init(env);
1754
    env->cpu_model_str = cpu_model;
1755

    
1756
    /* init various static tables */
1757
    if (!inited) {
1758
        inited = 1;
1759
        optimize_flags_init();
1760
#ifndef CONFIG_USER_ONLY
1761
        prev_debug_excp_handler =
1762
            cpu_set_debug_excp_handler(breakpoint_handler);
1763
#endif
1764
    }
1765
    if (cpu_x86_register(env, cpu_model) < 0) {
1766
        cpu_x86_close(env);
1767
        return NULL;
1768
    }
1769
    cpu_reset(env);
1770
#ifdef CONFIG_KQEMU
1771
    kqemu_init(env);
1772
#endif
1773

    
1774
    qemu_init_vcpu(env);
1775

    
1776
    if (kvm_enabled()) {
1777
        kvm_trim_features(&env->cpuid_features,
1778
                          kvm_arch_get_supported_cpuid(env, 1, R_EDX),
1779
                          feature_name);
1780
        kvm_trim_features(&env->cpuid_ext_features,
1781
                          kvm_arch_get_supported_cpuid(env, 1, R_ECX),
1782
                          ext_feature_name);
1783
        kvm_trim_features(&env->cpuid_ext2_features,
1784
                          kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
1785
                          ext2_feature_name);
1786
        kvm_trim_features(&env->cpuid_ext3_features,
1787
                          kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
1788
                          ext3_feature_name);
1789
    }
1790

    
1791
    return env;
1792
}
1793

    
1794
#if !defined(CONFIG_USER_ONLY)
1795
void do_cpu_init(CPUState *env)
1796
{
1797
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1798
    cpu_reset(env);
1799
    env->interrupt_request = sipi;
1800
    apic_init_reset(env);
1801
}
1802

    
1803
void do_cpu_sipi(CPUState *env)
1804
{
1805
    apic_sipi(env);
1806
}
1807
#else
1808
void do_cpu_init(CPUState *env)
1809
{
1810
}
1811
void do_cpu_sipi(CPUState *env)
1812
{
1813
}
1814
#endif