Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ d1cd4bf4

History | View | Annotate | Download (62.7 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30

    
31
//#define DEBUG_MMU
32

    
33
/* feature flags taken from "Intel Processor Identification and the CPUID
34
 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35
 * about feature names, the Linux name is used. */
36
static const char *feature_name[] = {
37
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38
    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40
    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
41
};
42
static const char *ext_feature_name[] = {
43
    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44
    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45
    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
47
};
48
static const char *ext2_feature_name[] = {
49
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51
    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
53
};
54
static const char *ext3_feature_name[] = {
55
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59
};
60

    
61
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62
                                    uint32_t *ext_features,
63
                                    uint32_t *ext2_features,
64
                                    uint32_t *ext3_features)
65
{
66
    int i;
67
    int found = 0;
68

    
69
    for ( i = 0 ; i < 32 ; i++ )
70
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71
            *features |= 1 << i;
72
            found = 1;
73
        }
74
    for ( i = 0 ; i < 32 ; i++ )
75
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76
            *ext_features |= 1 << i;
77
            found = 1;
78
        }
79
    for ( i = 0 ; i < 32 ; i++ )
80
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81
            *ext2_features |= 1 << i;
82
            found = 1;
83
        }
84
    for ( i = 0 ; i < 32 ; i++ )
85
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86
            *ext3_features |= 1 << i;
87
            found = 1;
88
        }
89
    if (!found) {
90
        fprintf(stderr, "CPU feature %s not found\n", flagname);
91
    }
92
}
93

    
94
typedef struct x86_def_t {
95
    const char *name;
96
    uint32_t level;
97
    uint32_t vendor1, vendor2, vendor3;
98
    int family;
99
    int model;
100
    int stepping;
101
    uint32_t features, ext_features, ext2_features, ext3_features;
102
    uint32_t xlevel;
103
    char model_id[48];
104
    int vendor_override;
105
} x86_def_t;
106

    
107
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112
          CPUID_PSE36 | CPUID_FXSR)
113
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
118
static x86_def_t x86_defs[] = {
119
#ifdef TARGET_X86_64
120
    {
121
        .name = "qemu64",
122
        .level = 4,
123
        .vendor1 = CPUID_VENDOR_AMD_1,
124
        .vendor2 = CPUID_VENDOR_AMD_2,
125
        .vendor3 = CPUID_VENDOR_AMD_3,
126
        .family = 6,
127
        .model = 2,
128
        .stepping = 3,
129
        .features = PPRO_FEATURES | 
130
        /* these features are needed for Win64 and aren't fully implemented */
131
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132
        /* this feature is needed for Solaris and isn't fully implemented */
133
            CPUID_PSE36,
134
        .ext_features = CPUID_EXT_SSE3,
135
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
136
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137
        .ext3_features = CPUID_EXT3_SVM,
138
        .xlevel = 0x8000000A,
139
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140
    },
141
    {
142
        .name = "phenom",
143
        .level = 5,
144
        .vendor1 = CPUID_VENDOR_AMD_1,
145
        .vendor2 = CPUID_VENDOR_AMD_2,
146
        .vendor3 = CPUID_VENDOR_AMD_3,
147
        .family = 16,
148
        .model = 2,
149
        .stepping = 3,
150
        /* Missing: CPUID_VME, CPUID_HT */
151
        .features = PPRO_FEATURES | 
152
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153
            CPUID_PSE36,
154
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
155
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
156
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
157
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
158
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
159
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
160
            CPUID_EXT2_FFXSR,
161
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
162
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
163
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
164
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
165
        .ext3_features = CPUID_EXT3_SVM,
166
        .xlevel = 0x8000001A,
167
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
168
    },
169
    {
170
        .name = "core2duo",
171
        .level = 10,
172
        .family = 6,
173
        .model = 15,
174
        .stepping = 11,
175
        /* The original CPU also implements these features:
176
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
177
               CPUID_TM, CPUID_PBE */
178
        .features = PPRO_FEATURES |
179
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180
            CPUID_PSE36,
181
        /* The original CPU also implements these ext features:
182
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
183
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
184
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
185
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
186
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
187
        .xlevel = 0x80000008,
188
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
189
    },
190
    {
191
        .name = "kvm64",
192
        .level = 5,
193
        .vendor1 = CPUID_VENDOR_INTEL_1,
194
        .vendor2 = CPUID_VENDOR_INTEL_2,
195
        .vendor3 = CPUID_VENDOR_INTEL_3,
196
        .family = 15,
197
        .model = 6,
198
        .stepping = 1,
199
        /* Missing: CPUID_VME, CPUID_HT */
200
        .features = PPRO_FEATURES |
201
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
202
            CPUID_PSE36,
203
        /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
204
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
205
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
206
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
207
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
208
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
209
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
210
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
211
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
212
        .ext3_features = 0,
213
        .xlevel = 0x80000008,
214
        .model_id = "Common KVM processor"
215
    },
216
#endif
217
    {
218
        .name = "qemu32",
219
        .level = 4,
220
        .family = 6,
221
        .model = 3,
222
        .stepping = 3,
223
        .features = PPRO_FEATURES,
224
        .ext_features = CPUID_EXT_SSE3,
225
        .xlevel = 0,
226
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
227
    },
228
    {
229
        .name = "coreduo",
230
        .level = 10,
231
        .family = 6,
232
        .model = 14,
233
        .stepping = 8,
234
        /* The original CPU also implements these features:
235
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
236
               CPUID_TM, CPUID_PBE */
237
        .features = PPRO_FEATURES | CPUID_VME |
238
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
239
        /* The original CPU also implements these ext features:
240
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
241
               CPUID_EXT_PDCM */
242
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
243
        .ext2_features = CPUID_EXT2_NX,
244
        .xlevel = 0x80000008,
245
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
246
    },
247
    {
248
        .name = "486",
249
        .level = 0,
250
        .family = 4,
251
        .model = 0,
252
        .stepping = 0,
253
        .features = I486_FEATURES,
254
        .xlevel = 0,
255
    },
256
    {
257
        .name = "pentium",
258
        .level = 1,
259
        .family = 5,
260
        .model = 4,
261
        .stepping = 3,
262
        .features = PENTIUM_FEATURES,
263
        .xlevel = 0,
264
    },
265
    {
266
        .name = "pentium2",
267
        .level = 2,
268
        .family = 6,
269
        .model = 5,
270
        .stepping = 2,
271
        .features = PENTIUM2_FEATURES,
272
        .xlevel = 0,
273
    },
274
    {
275
        .name = "pentium3",
276
        .level = 2,
277
        .family = 6,
278
        .model = 7,
279
        .stepping = 3,
280
        .features = PENTIUM3_FEATURES,
281
        .xlevel = 0,
282
    },
283
    {
284
        .name = "athlon",
285
        .level = 2,
286
        .vendor1 = CPUID_VENDOR_AMD_1,
287
        .vendor2 = CPUID_VENDOR_AMD_2,
288
        .vendor3 = CPUID_VENDOR_AMD_3,
289
        .family = 6,
290
        .model = 2,
291
        .stepping = 3,
292
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
293
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
294
        .xlevel = 0x80000008,
295
        /* XXX: put another string ? */
296
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
297
    },
298
    {
299
        .name = "n270",
300
        /* original is on level 10 */
301
        .level = 5,
302
        .family = 6,
303
        .model = 28,
304
        .stepping = 2,
305
        .features = PPRO_FEATURES |
306
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
307
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
308
             * CPUID_HT | CPUID_TM | CPUID_PBE */
309
            /* Some CPUs got no CPUID_SEP */
310
        .ext_features = CPUID_EXT_MONITOR |
311
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
312
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
313
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
314
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
315
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
316
        .xlevel = 0x8000000A,
317
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
318
    },
319
};
320

    
321
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
322
                               uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
323

    
324
static int cpu_x86_fill_model_id(char *str)
325
{
326
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
327
    int i;
328

    
329
    for (i = 0; i < 3; i++) {
330
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
331
        memcpy(str + i * 16 +  0, &eax, 4);
332
        memcpy(str + i * 16 +  4, &ebx, 4);
333
        memcpy(str + i * 16 +  8, &ecx, 4);
334
        memcpy(str + i * 16 + 12, &edx, 4);
335
    }
336
    return 0;
337
}
338

    
339
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
340
{
341
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
342

    
343
    x86_cpu_def->name = "host";
344
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
345
    x86_cpu_def->level = eax;
346
    x86_cpu_def->vendor1 = ebx;
347
    x86_cpu_def->vendor2 = edx;
348
    x86_cpu_def->vendor3 = ecx;
349

    
350
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
351
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
352
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
353
    x86_cpu_def->stepping = eax & 0x0F;
354
    x86_cpu_def->ext_features = ecx;
355
    x86_cpu_def->features = edx;
356

    
357
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
358
    x86_cpu_def->xlevel = eax;
359

    
360
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
361
    x86_cpu_def->ext2_features = edx;
362
    x86_cpu_def->ext3_features = ecx;
363
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
364
    x86_cpu_def->vendor_override = 0;
365

    
366
    return 0;
367
}
368

    
369
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
370
{
371
    unsigned int i;
372
    x86_def_t *def;
373

    
374
    char *s = strdup(cpu_model);
375
    char *featurestr, *name = strtok(s, ",");
376
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
377
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
378
    uint32_t numvalue;
379

    
380
    def = NULL;
381
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
382
        if (strcmp(name, x86_defs[i].name) == 0) {
383
            def = &x86_defs[i];
384
            break;
385
        }
386
    }
387
    if (kvm_enabled() && strcmp(name, "host") == 0) {
388
        cpu_x86_fill_host(x86_cpu_def);
389
    } else if (!def) {
390
        goto error;
391
    } else {
392
        memcpy(x86_cpu_def, def, sizeof(*def));
393
    }
394

    
395
    add_flagname_to_bitmaps("hypervisor", &plus_features,
396
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
397

    
398
    featurestr = strtok(NULL, ",");
399

    
400
    while (featurestr) {
401
        char *val;
402
        if (featurestr[0] == '+') {
403
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
404
        } else if (featurestr[0] == '-') {
405
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
406
        } else if ((val = strchr(featurestr, '='))) {
407
            *val = 0; val++;
408
            if (!strcmp(featurestr, "family")) {
409
                char *err;
410
                numvalue = strtoul(val, &err, 0);
411
                if (!*val || *err) {
412
                    fprintf(stderr, "bad numerical value %s\n", val);
413
                    goto error;
414
                }
415
                x86_cpu_def->family = numvalue;
416
            } else if (!strcmp(featurestr, "model")) {
417
                char *err;
418
                numvalue = strtoul(val, &err, 0);
419
                if (!*val || *err || numvalue > 0xff) {
420
                    fprintf(stderr, "bad numerical value %s\n", val);
421
                    goto error;
422
                }
423
                x86_cpu_def->model = numvalue;
424
            } else if (!strcmp(featurestr, "stepping")) {
425
                char *err;
426
                numvalue = strtoul(val, &err, 0);
427
                if (!*val || *err || numvalue > 0xf) {
428
                    fprintf(stderr, "bad numerical value %s\n", val);
429
                    goto error;
430
                }
431
                x86_cpu_def->stepping = numvalue ;
432
            } else if (!strcmp(featurestr, "level")) {
433
                char *err;
434
                numvalue = strtoul(val, &err, 0);
435
                if (!*val || *err) {
436
                    fprintf(stderr, "bad numerical value %s\n", val);
437
                    goto error;
438
                }
439
                x86_cpu_def->level = numvalue;
440
            } else if (!strcmp(featurestr, "xlevel")) {
441
                char *err;
442
                numvalue = strtoul(val, &err, 0);
443
                if (!*val || *err) {
444
                    fprintf(stderr, "bad numerical value %s\n", val);
445
                    goto error;
446
                }
447
                if (numvalue < 0x80000000) {
448
                        numvalue += 0x80000000;
449
                }
450
                x86_cpu_def->xlevel = numvalue;
451
            } else if (!strcmp(featurestr, "vendor")) {
452
                if (strlen(val) != 12) {
453
                    fprintf(stderr, "vendor string must be 12 chars long\n");
454
                    goto error;
455
                }
456
                x86_cpu_def->vendor1 = 0;
457
                x86_cpu_def->vendor2 = 0;
458
                x86_cpu_def->vendor3 = 0;
459
                for(i = 0; i < 4; i++) {
460
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
461
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
462
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
463
                }
464
                x86_cpu_def->vendor_override = 1;
465
            } else if (!strcmp(featurestr, "model_id")) {
466
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
467
                        val);
468
            } else {
469
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
470
                goto error;
471
            }
472
        } else {
473
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
474
            goto error;
475
        }
476
        featurestr = strtok(NULL, ",");
477
    }
478
    x86_cpu_def->features |= plus_features;
479
    x86_cpu_def->ext_features |= plus_ext_features;
480
    x86_cpu_def->ext2_features |= plus_ext2_features;
481
    x86_cpu_def->ext3_features |= plus_ext3_features;
482
    x86_cpu_def->features &= ~minus_features;
483
    x86_cpu_def->ext_features &= ~minus_ext_features;
484
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
485
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
486
    free(s);
487
    return 0;
488

    
489
error:
490
    free(s);
491
    return -1;
492
}
493

    
494
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
495
{
496
    unsigned int i;
497

    
498
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
499
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
500
}
501

    
502
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
503
{
504
    x86_def_t def1, *def = &def1;
505

    
506
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
507
        return -1;
508
    if (def->vendor1) {
509
        env->cpuid_vendor1 = def->vendor1;
510
        env->cpuid_vendor2 = def->vendor2;
511
        env->cpuid_vendor3 = def->vendor3;
512
    } else {
513
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
514
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
515
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
516
    }
517
    env->cpuid_vendor_override = def->vendor_override;
518
    env->cpuid_level = def->level;
519
    if (def->family > 0x0f)
520
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
521
    else
522
        env->cpuid_version = def->family << 8;
523
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
524
    env->cpuid_version |= def->stepping;
525
    env->cpuid_features = def->features;
526
    env->pat = 0x0007040600070406ULL;
527
    env->cpuid_ext_features = def->ext_features;
528
    env->cpuid_ext2_features = def->ext2_features;
529
    env->cpuid_xlevel = def->xlevel;
530
    env->cpuid_ext3_features = def->ext3_features;
531
    {
532
        const char *model_id = def->model_id;
533
        int c, len, i;
534
        if (!model_id)
535
            model_id = "";
536
        len = strlen(model_id);
537
        for(i = 0; i < 48; i++) {
538
            if (i >= len)
539
                c = '\0';
540
            else
541
                c = (uint8_t)model_id[i];
542
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
543
        }
544
    }
545
    return 0;
546
}
547

    
548
/* NOTE: must be called outside the CPU execute loop */
549
void cpu_reset(CPUX86State *env)
550
{
551
    int i;
552

    
553
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
554
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
555
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
556
    }
557

    
558
    memset(env, 0, offsetof(CPUX86State, breakpoints));
559

    
560
    tlb_flush(env, 1);
561

    
562
    env->old_exception = -1;
563

    
564
    /* init to reset state */
565

    
566
#ifdef CONFIG_SOFTMMU
567
    env->hflags |= HF_SOFTMMU_MASK;
568
#endif
569
    env->hflags2 |= HF2_GIF_MASK;
570

    
571
    cpu_x86_update_cr0(env, 0x60000010);
572
    env->a20_mask = ~0x0;
573
    env->smbase = 0x30000;
574

    
575
    env->idt.limit = 0xffff;
576
    env->gdt.limit = 0xffff;
577
    env->ldt.limit = 0xffff;
578
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
579
    env->tr.limit = 0xffff;
580
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
581

    
582
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
583
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
584
                           DESC_R_MASK | DESC_A_MASK);
585
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
586
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
587
                           DESC_A_MASK);
588
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
589
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
590
                           DESC_A_MASK);
591
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
592
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
593
                           DESC_A_MASK);
594
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
595
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
596
                           DESC_A_MASK);
597
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
598
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
599
                           DESC_A_MASK);
600

    
601
    env->eip = 0xfff0;
602
    env->regs[R_EDX] = env->cpuid_version;
603

    
604
    env->eflags = 0x2;
605

    
606
    /* FPU init */
607
    for(i = 0;i < 8; i++)
608
        env->fptags[i] = 1;
609
    env->fpuc = 0x37f;
610

    
611
    env->mxcsr = 0x1f80;
612

    
613
    memset(env->dr, 0, sizeof(env->dr));
614
    env->dr[6] = DR6_FIXED_1;
615
    env->dr[7] = DR7_FIXED_1;
616
    cpu_breakpoint_remove_all(env, BP_CPU);
617
    cpu_watchpoint_remove_all(env, BP_CPU);
618
}
619

    
620
void cpu_x86_close(CPUX86State *env)
621
{
622
    qemu_free(env);
623
}
624

    
625
/***********************************************************/
626
/* x86 debug */
627

    
628
static const char *cc_op_str[] = {
629
    "DYNAMIC",
630
    "EFLAGS",
631

    
632
    "MULB",
633
    "MULW",
634
    "MULL",
635
    "MULQ",
636

    
637
    "ADDB",
638
    "ADDW",
639
    "ADDL",
640
    "ADDQ",
641

    
642
    "ADCB",
643
    "ADCW",
644
    "ADCL",
645
    "ADCQ",
646

    
647
    "SUBB",
648
    "SUBW",
649
    "SUBL",
650
    "SUBQ",
651

    
652
    "SBBB",
653
    "SBBW",
654
    "SBBL",
655
    "SBBQ",
656

    
657
    "LOGICB",
658
    "LOGICW",
659
    "LOGICL",
660
    "LOGICQ",
661

    
662
    "INCB",
663
    "INCW",
664
    "INCL",
665
    "INCQ",
666

    
667
    "DECB",
668
    "DECW",
669
    "DECL",
670
    "DECQ",
671

    
672
    "SHLB",
673
    "SHLW",
674
    "SHLL",
675
    "SHLQ",
676

    
677
    "SARB",
678
    "SARW",
679
    "SARL",
680
    "SARQ",
681
};
682

    
683
static void
684
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
685
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
686
                       const char *name, struct SegmentCache *sc)
687
{
688
#ifdef TARGET_X86_64
689
    if (env->hflags & HF_CS64_MASK) {
690
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
691
                    sc->selector, sc->base, sc->limit, sc->flags);
692
    } else
693
#endif
694
    {
695
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
696
                    (uint32_t)sc->base, sc->limit, sc->flags);
697
    }
698

    
699
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
700
        goto done;
701

    
702
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
703
    if (sc->flags & DESC_S_MASK) {
704
        if (sc->flags & DESC_CS_MASK) {
705
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
706
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
707
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
708
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
709
        } else {
710
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
711
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
712
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
713
        }
714
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
715
    } else {
716
        static const char *sys_type_name[2][16] = {
717
            { /* 32 bit mode */
718
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
719
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
720
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
721
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
722
            },
723
            { /* 64 bit mode */
724
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
725
                "Reserved", "Reserved", "Reserved", "Reserved",
726
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
727
                "Reserved", "IntGate64", "TrapGate64"
728
            }
729
        };
730
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
731
                                    [(sc->flags & DESC_TYPE_MASK)
732
                                     >> DESC_TYPE_SHIFT]);
733
    }
734
done:
735
    cpu_fprintf(f, "\n");
736
}
737

    
738
void cpu_dump_state(CPUState *env, FILE *f,
739
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
740
                    int flags)
741
{
742
    int eflags, i, nb;
743
    char cc_op_name[32];
744
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
745

    
746
    if (kvm_enabled())
747
        kvm_arch_get_registers(env);
748

    
749
    eflags = env->eflags;
750
#ifdef TARGET_X86_64
751
    if (env->hflags & HF_CS64_MASK) {
752
        cpu_fprintf(f,
753
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
754
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
755
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
756
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
757
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
758
                    env->regs[R_EAX],
759
                    env->regs[R_EBX],
760
                    env->regs[R_ECX],
761
                    env->regs[R_EDX],
762
                    env->regs[R_ESI],
763
                    env->regs[R_EDI],
764
                    env->regs[R_EBP],
765
                    env->regs[R_ESP],
766
                    env->regs[8],
767
                    env->regs[9],
768
                    env->regs[10],
769
                    env->regs[11],
770
                    env->regs[12],
771
                    env->regs[13],
772
                    env->regs[14],
773
                    env->regs[15],
774
                    env->eip, eflags,
775
                    eflags & DF_MASK ? 'D' : '-',
776
                    eflags & CC_O ? 'O' : '-',
777
                    eflags & CC_S ? 'S' : '-',
778
                    eflags & CC_Z ? 'Z' : '-',
779
                    eflags & CC_A ? 'A' : '-',
780
                    eflags & CC_P ? 'P' : '-',
781
                    eflags & CC_C ? 'C' : '-',
782
                    env->hflags & HF_CPL_MASK,
783
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
784
                    (int)(env->a20_mask >> 20) & 1,
785
                    (env->hflags >> HF_SMM_SHIFT) & 1,
786
                    env->halted);
787
    } else
788
#endif
789
    {
790
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
791
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
792
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
793
                    (uint32_t)env->regs[R_EAX],
794
                    (uint32_t)env->regs[R_EBX],
795
                    (uint32_t)env->regs[R_ECX],
796
                    (uint32_t)env->regs[R_EDX],
797
                    (uint32_t)env->regs[R_ESI],
798
                    (uint32_t)env->regs[R_EDI],
799
                    (uint32_t)env->regs[R_EBP],
800
                    (uint32_t)env->regs[R_ESP],
801
                    (uint32_t)env->eip, eflags,
802
                    eflags & DF_MASK ? 'D' : '-',
803
                    eflags & CC_O ? 'O' : '-',
804
                    eflags & CC_S ? 'S' : '-',
805
                    eflags & CC_Z ? 'Z' : '-',
806
                    eflags & CC_A ? 'A' : '-',
807
                    eflags & CC_P ? 'P' : '-',
808
                    eflags & CC_C ? 'C' : '-',
809
                    env->hflags & HF_CPL_MASK,
810
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
811
                    (int)(env->a20_mask >> 20) & 1,
812
                    (env->hflags >> HF_SMM_SHIFT) & 1,
813
                    env->halted);
814
    }
815

    
816
    for(i = 0; i < 6; i++) {
817
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
818
                               &env->segs[i]);
819
    }
820
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
821
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
822

    
823
#ifdef TARGET_X86_64
824
    if (env->hflags & HF_LMA_MASK) {
825
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
826
                    env->gdt.base, env->gdt.limit);
827
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
828
                    env->idt.base, env->idt.limit);
829
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
830
                    (uint32_t)env->cr[0],
831
                    env->cr[2],
832
                    env->cr[3],
833
                    (uint32_t)env->cr[4]);
834
        for(i = 0; i < 4; i++)
835
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
836
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
837
                    env->dr[6], env->dr[7]);
838
    } else
839
#endif
840
    {
841
        cpu_fprintf(f, "GDT=     %08x %08x\n",
842
                    (uint32_t)env->gdt.base, env->gdt.limit);
843
        cpu_fprintf(f, "IDT=     %08x %08x\n",
844
                    (uint32_t)env->idt.base, env->idt.limit);
845
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
846
                    (uint32_t)env->cr[0],
847
                    (uint32_t)env->cr[2],
848
                    (uint32_t)env->cr[3],
849
                    (uint32_t)env->cr[4]);
850
        for(i = 0; i < 4; i++)
851
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
852
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
853
    }
854
    if (flags & X86_DUMP_CCOP) {
855
        if ((unsigned)env->cc_op < CC_OP_NB)
856
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
857
        else
858
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
859
#ifdef TARGET_X86_64
860
        if (env->hflags & HF_CS64_MASK) {
861
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
862
                        env->cc_src, env->cc_dst,
863
                        cc_op_name);
864
        } else
865
#endif
866
        {
867
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
868
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
869
                        cc_op_name);
870
        }
871
    }
872
    if (flags & X86_DUMP_FPU) {
873
        int fptag;
874
        fptag = 0;
875
        for(i = 0; i < 8; i++) {
876
            fptag |= ((!env->fptags[i]) << i);
877
        }
878
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
879
                    env->fpuc,
880
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
881
                    env->fpstt,
882
                    fptag,
883
                    env->mxcsr);
884
        for(i=0;i<8;i++) {
885
#if defined(USE_X86LDOUBLE)
886
            union {
887
                long double d;
888
                struct {
889
                    uint64_t lower;
890
                    uint16_t upper;
891
                } l;
892
            } tmp;
893
            tmp.d = env->fpregs[i].d;
894
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
895
                        i, tmp.l.lower, tmp.l.upper);
896
#else
897
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
898
                        i, env->fpregs[i].mmx.q);
899
#endif
900
            if ((i & 1) == 1)
901
                cpu_fprintf(f, "\n");
902
            else
903
                cpu_fprintf(f, " ");
904
        }
905
        if (env->hflags & HF_CS64_MASK)
906
            nb = 16;
907
        else
908
            nb = 8;
909
        for(i=0;i<nb;i++) {
910
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
911
                        i,
912
                        env->xmm_regs[i].XMM_L(3),
913
                        env->xmm_regs[i].XMM_L(2),
914
                        env->xmm_regs[i].XMM_L(1),
915
                        env->xmm_regs[i].XMM_L(0));
916
            if ((i & 1) == 1)
917
                cpu_fprintf(f, "\n");
918
            else
919
                cpu_fprintf(f, " ");
920
        }
921
    }
922
}
923

    
924
/***********************************************************/
925
/* x86 mmu */
926
/* XXX: add PGE support */
927

    
928
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
929
{
930
    a20_state = (a20_state != 0);
931
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
932
#if defined(DEBUG_MMU)
933
        printf("A20 update: a20=%d\n", a20_state);
934
#endif
935
        /* if the cpu is currently executing code, we must unlink it and
936
           all the potentially executing TB */
937
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
938

    
939
        /* when a20 is changed, all the MMU mappings are invalid, so
940
           we must flush everything */
941
        tlb_flush(env, 1);
942
        env->a20_mask = (~0x100000) | (a20_state << 20);
943
    }
944
}
945

    
946
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
947
{
948
    int pe_state;
949

    
950
#if defined(DEBUG_MMU)
951
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
952
#endif
953
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
954
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
955
        tlb_flush(env, 1);
956
    }
957

    
958
#ifdef TARGET_X86_64
959
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
960
        (env->efer & MSR_EFER_LME)) {
961
        /* enter in long mode */
962
        /* XXX: generate an exception */
963
        if (!(env->cr[4] & CR4_PAE_MASK))
964
            return;
965
        env->efer |= MSR_EFER_LMA;
966
        env->hflags |= HF_LMA_MASK;
967
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
968
               (env->efer & MSR_EFER_LMA)) {
969
        /* exit long mode */
970
        env->efer &= ~MSR_EFER_LMA;
971
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
972
        env->eip &= 0xffffffff;
973
    }
974
#endif
975
    env->cr[0] = new_cr0 | CR0_ET_MASK;
976

    
977
    /* update PE flag in hidden flags */
978
    pe_state = (env->cr[0] & CR0_PE_MASK);
979
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
980
    /* ensure that ADDSEG is always set in real mode */
981
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
982
    /* update FPU flags */
983
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
984
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
985
}
986

    
987
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
988
   the PDPT */
989
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
990
{
991
    env->cr[3] = new_cr3;
992
    if (env->cr[0] & CR0_PG_MASK) {
993
#if defined(DEBUG_MMU)
994
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
995
#endif
996
        tlb_flush(env, 0);
997
    }
998
}
999

    
1000
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1001
{
1002
#if defined(DEBUG_MMU)
1003
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1004
#endif
1005
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1006
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1007
        tlb_flush(env, 1);
1008
    }
1009
    /* SSE handling */
1010
    if (!(env->cpuid_features & CPUID_SSE))
1011
        new_cr4 &= ~CR4_OSFXSR_MASK;
1012
    if (new_cr4 & CR4_OSFXSR_MASK)
1013
        env->hflags |= HF_OSFXSR_MASK;
1014
    else
1015
        env->hflags &= ~HF_OSFXSR_MASK;
1016

    
1017
    env->cr[4] = new_cr4;
1018
}
1019

    
1020
#if defined(CONFIG_USER_ONLY)
1021

    
1022
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1023
                             int is_write, int mmu_idx, int is_softmmu)
1024
{
1025
    /* user mode only emulation */
1026
    is_write &= 1;
1027
    env->cr[2] = addr;
1028
    env->error_code = (is_write << PG_ERROR_W_BIT);
1029
    env->error_code |= PG_ERROR_U_MASK;
1030
    env->exception_index = EXCP0E_PAGE;
1031
    return 1;
1032
}
1033

    
1034
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1035
{
1036
    return addr;
1037
}
1038

    
1039
#else
1040

    
1041
/* XXX: This value should match the one returned by CPUID
1042
 * and in exec.c */
1043
# if defined(TARGET_X86_64)
1044
# define PHYS_ADDR_MASK 0xfffffff000LL
1045
# else
1046
# define PHYS_ADDR_MASK 0xffffff000LL
1047
# endif
1048

    
1049
/* return value:
1050
   -1 = cannot handle fault
1051
   0  = nothing more to do
1052
   1  = generate PF fault
1053
   2  = soft MMU activation required for this block
1054
*/
1055
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1056
                             int is_write1, int mmu_idx, int is_softmmu)
1057
{
1058
    uint64_t ptep, pte;
1059
    target_ulong pde_addr, pte_addr;
1060
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1061
    target_phys_addr_t paddr;
1062
    uint32_t page_offset;
1063
    target_ulong vaddr, virt_addr;
1064

    
1065
    is_user = mmu_idx == MMU_USER_IDX;
1066
#if defined(DEBUG_MMU)
1067
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1068
           addr, is_write1, is_user, env->eip);
1069
#endif
1070
    is_write = is_write1 & 1;
1071

    
1072
    if (!(env->cr[0] & CR0_PG_MASK)) {
1073
        pte = addr;
1074
        virt_addr = addr & TARGET_PAGE_MASK;
1075
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1076
        page_size = 4096;
1077
        goto do_mapping;
1078
    }
1079

    
1080
    if (env->cr[4] & CR4_PAE_MASK) {
1081
        uint64_t pde, pdpe;
1082
        target_ulong pdpe_addr;
1083

    
1084
#ifdef TARGET_X86_64
1085
        if (env->hflags & HF_LMA_MASK) {
1086
            uint64_t pml4e_addr, pml4e;
1087
            int32_t sext;
1088

    
1089
            /* test virtual address sign extension */
1090
            sext = (int64_t)addr >> 47;
1091
            if (sext != 0 && sext != -1) {
1092
                env->error_code = 0;
1093
                env->exception_index = EXCP0D_GPF;
1094
                return 1;
1095
            }
1096

    
1097
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1098
                env->a20_mask;
1099
            pml4e = ldq_phys(pml4e_addr);
1100
            if (!(pml4e & PG_PRESENT_MASK)) {
1101
                error_code = 0;
1102
                goto do_fault;
1103
            }
1104
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1105
                error_code = PG_ERROR_RSVD_MASK;
1106
                goto do_fault;
1107
            }
1108
            if (!(pml4e & PG_ACCESSED_MASK)) {
1109
                pml4e |= PG_ACCESSED_MASK;
1110
                stl_phys_notdirty(pml4e_addr, pml4e);
1111
            }
1112
            ptep = pml4e ^ PG_NX_MASK;
1113
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1114
                env->a20_mask;
1115
            pdpe = ldq_phys(pdpe_addr);
1116
            if (!(pdpe & PG_PRESENT_MASK)) {
1117
                error_code = 0;
1118
                goto do_fault;
1119
            }
1120
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1121
                error_code = PG_ERROR_RSVD_MASK;
1122
                goto do_fault;
1123
            }
1124
            ptep &= pdpe ^ PG_NX_MASK;
1125
            if (!(pdpe & PG_ACCESSED_MASK)) {
1126
                pdpe |= PG_ACCESSED_MASK;
1127
                stl_phys_notdirty(pdpe_addr, pdpe);
1128
            }
1129
        } else
1130
#endif
1131
        {
1132
            /* XXX: load them when cr3 is loaded ? */
1133
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1134
                env->a20_mask;
1135
            pdpe = ldq_phys(pdpe_addr);
1136
            if (!(pdpe & PG_PRESENT_MASK)) {
1137
                error_code = 0;
1138
                goto do_fault;
1139
            }
1140
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1141
        }
1142

    
1143
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1144
            env->a20_mask;
1145
        pde = ldq_phys(pde_addr);
1146
        if (!(pde & PG_PRESENT_MASK)) {
1147
            error_code = 0;
1148
            goto do_fault;
1149
        }
1150
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1151
            error_code = PG_ERROR_RSVD_MASK;
1152
            goto do_fault;
1153
        }
1154
        ptep &= pde ^ PG_NX_MASK;
1155
        if (pde & PG_PSE_MASK) {
1156
            /* 2 MB page */
1157
            page_size = 2048 * 1024;
1158
            ptep ^= PG_NX_MASK;
1159
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1160
                goto do_fault_protect;
1161
            if (is_user) {
1162
                if (!(ptep & PG_USER_MASK))
1163
                    goto do_fault_protect;
1164
                if (is_write && !(ptep & PG_RW_MASK))
1165
                    goto do_fault_protect;
1166
            } else {
1167
                if ((env->cr[0] & CR0_WP_MASK) &&
1168
                    is_write && !(ptep & PG_RW_MASK))
1169
                    goto do_fault_protect;
1170
            }
1171
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1172
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1173
                pde |= PG_ACCESSED_MASK;
1174
                if (is_dirty)
1175
                    pde |= PG_DIRTY_MASK;
1176
                stl_phys_notdirty(pde_addr, pde);
1177
            }
1178
            /* align to page_size */
1179
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1180
            virt_addr = addr & ~(page_size - 1);
1181
        } else {
1182
            /* 4 KB page */
1183
            if (!(pde & PG_ACCESSED_MASK)) {
1184
                pde |= PG_ACCESSED_MASK;
1185
                stl_phys_notdirty(pde_addr, pde);
1186
            }
1187
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1188
                env->a20_mask;
1189
            pte = ldq_phys(pte_addr);
1190
            if (!(pte & PG_PRESENT_MASK)) {
1191
                error_code = 0;
1192
                goto do_fault;
1193
            }
1194
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1195
                error_code = PG_ERROR_RSVD_MASK;
1196
                goto do_fault;
1197
            }
1198
            /* combine pde and pte nx, user and rw protections */
1199
            ptep &= pte ^ PG_NX_MASK;
1200
            ptep ^= PG_NX_MASK;
1201
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1202
                goto do_fault_protect;
1203
            if (is_user) {
1204
                if (!(ptep & PG_USER_MASK))
1205
                    goto do_fault_protect;
1206
                if (is_write && !(ptep & PG_RW_MASK))
1207
                    goto do_fault_protect;
1208
            } else {
1209
                if ((env->cr[0] & CR0_WP_MASK) &&
1210
                    is_write && !(ptep & PG_RW_MASK))
1211
                    goto do_fault_protect;
1212
            }
1213
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1214
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1215
                pte |= PG_ACCESSED_MASK;
1216
                if (is_dirty)
1217
                    pte |= PG_DIRTY_MASK;
1218
                stl_phys_notdirty(pte_addr, pte);
1219
            }
1220
            page_size = 4096;
1221
            virt_addr = addr & ~0xfff;
1222
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1223
        }
1224
    } else {
1225
        uint32_t pde;
1226

    
1227
        /* page directory entry */
1228
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1229
            env->a20_mask;
1230
        pde = ldl_phys(pde_addr);
1231
        if (!(pde & PG_PRESENT_MASK)) {
1232
            error_code = 0;
1233
            goto do_fault;
1234
        }
1235
        /* if PSE bit is set, then we use a 4MB page */
1236
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1237
            page_size = 4096 * 1024;
1238
            if (is_user) {
1239
                if (!(pde & PG_USER_MASK))
1240
                    goto do_fault_protect;
1241
                if (is_write && !(pde & PG_RW_MASK))
1242
                    goto do_fault_protect;
1243
            } else {
1244
                if ((env->cr[0] & CR0_WP_MASK) &&
1245
                    is_write && !(pde & PG_RW_MASK))
1246
                    goto do_fault_protect;
1247
            }
1248
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1249
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1250
                pde |= PG_ACCESSED_MASK;
1251
                if (is_dirty)
1252
                    pde |= PG_DIRTY_MASK;
1253
                stl_phys_notdirty(pde_addr, pde);
1254
            }
1255

    
1256
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1257
            ptep = pte;
1258
            virt_addr = addr & ~(page_size - 1);
1259
        } else {
1260
            if (!(pde & PG_ACCESSED_MASK)) {
1261
                pde |= PG_ACCESSED_MASK;
1262
                stl_phys_notdirty(pde_addr, pde);
1263
            }
1264

    
1265
            /* page directory entry */
1266
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1267
                env->a20_mask;
1268
            pte = ldl_phys(pte_addr);
1269
            if (!(pte & PG_PRESENT_MASK)) {
1270
                error_code = 0;
1271
                goto do_fault;
1272
            }
1273
            /* combine pde and pte user and rw protections */
1274
            ptep = pte & pde;
1275
            if (is_user) {
1276
                if (!(ptep & PG_USER_MASK))
1277
                    goto do_fault_protect;
1278
                if (is_write && !(ptep & PG_RW_MASK))
1279
                    goto do_fault_protect;
1280
            } else {
1281
                if ((env->cr[0] & CR0_WP_MASK) &&
1282
                    is_write && !(ptep & PG_RW_MASK))
1283
                    goto do_fault_protect;
1284
            }
1285
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1286
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1287
                pte |= PG_ACCESSED_MASK;
1288
                if (is_dirty)
1289
                    pte |= PG_DIRTY_MASK;
1290
                stl_phys_notdirty(pte_addr, pte);
1291
            }
1292
            page_size = 4096;
1293
            virt_addr = addr & ~0xfff;
1294
        }
1295
    }
1296
    /* the page can be put in the TLB */
1297
    prot = PAGE_READ;
1298
    if (!(ptep & PG_NX_MASK))
1299
        prot |= PAGE_EXEC;
1300
    if (pte & PG_DIRTY_MASK) {
1301
        /* only set write access if already dirty... otherwise wait
1302
           for dirty access */
1303
        if (is_user) {
1304
            if (ptep & PG_RW_MASK)
1305
                prot |= PAGE_WRITE;
1306
        } else {
1307
            if (!(env->cr[0] & CR0_WP_MASK) ||
1308
                (ptep & PG_RW_MASK))
1309
                prot |= PAGE_WRITE;
1310
        }
1311
    }
1312
 do_mapping:
1313
    pte = pte & env->a20_mask;
1314

    
1315
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1316
       avoid filling it too fast */
1317
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1318
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1319
    vaddr = virt_addr + page_offset;
1320

    
1321
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1322
    return ret;
1323
 do_fault_protect:
1324
    error_code = PG_ERROR_P_MASK;
1325
 do_fault:
1326
    error_code |= (is_write << PG_ERROR_W_BIT);
1327
    if (is_user)
1328
        error_code |= PG_ERROR_U_MASK;
1329
    if (is_write1 == 2 &&
1330
        (env->efer & MSR_EFER_NXE) &&
1331
        (env->cr[4] & CR4_PAE_MASK))
1332
        error_code |= PG_ERROR_I_D_MASK;
1333
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1334
        /* cr2 is not modified in case of exceptions */
1335
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1336
                 addr);
1337
    } else {
1338
        env->cr[2] = addr;
1339
    }
1340
    env->error_code = error_code;
1341
    env->exception_index = EXCP0E_PAGE;
1342
    return 1;
1343
}
1344

    
1345
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1346
{
1347
    target_ulong pde_addr, pte_addr;
1348
    uint64_t pte;
1349
    target_phys_addr_t paddr;
1350
    uint32_t page_offset;
1351
    int page_size;
1352

    
1353
    if (env->cr[4] & CR4_PAE_MASK) {
1354
        target_ulong pdpe_addr;
1355
        uint64_t pde, pdpe;
1356

    
1357
#ifdef TARGET_X86_64
1358
        if (env->hflags & HF_LMA_MASK) {
1359
            uint64_t pml4e_addr, pml4e;
1360
            int32_t sext;
1361

    
1362
            /* test virtual address sign extension */
1363
            sext = (int64_t)addr >> 47;
1364
            if (sext != 0 && sext != -1)
1365
                return -1;
1366

    
1367
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1368
                env->a20_mask;
1369
            pml4e = ldq_phys(pml4e_addr);
1370
            if (!(pml4e & PG_PRESENT_MASK))
1371
                return -1;
1372

    
1373
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1374
                env->a20_mask;
1375
            pdpe = ldq_phys(pdpe_addr);
1376
            if (!(pdpe & PG_PRESENT_MASK))
1377
                return -1;
1378
        } else
1379
#endif
1380
        {
1381
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1382
                env->a20_mask;
1383
            pdpe = ldq_phys(pdpe_addr);
1384
            if (!(pdpe & PG_PRESENT_MASK))
1385
                return -1;
1386
        }
1387

    
1388
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1389
            env->a20_mask;
1390
        pde = ldq_phys(pde_addr);
1391
        if (!(pde & PG_PRESENT_MASK)) {
1392
            return -1;
1393
        }
1394
        if (pde & PG_PSE_MASK) {
1395
            /* 2 MB page */
1396
            page_size = 2048 * 1024;
1397
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1398
        } else {
1399
            /* 4 KB page */
1400
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1401
                env->a20_mask;
1402
            page_size = 4096;
1403
            pte = ldq_phys(pte_addr);
1404
        }
1405
        if (!(pte & PG_PRESENT_MASK))
1406
            return -1;
1407
    } else {
1408
        uint32_t pde;
1409

    
1410
        if (!(env->cr[0] & CR0_PG_MASK)) {
1411
            pte = addr;
1412
            page_size = 4096;
1413
        } else {
1414
            /* page directory entry */
1415
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1416
            pde = ldl_phys(pde_addr);
1417
            if (!(pde & PG_PRESENT_MASK))
1418
                return -1;
1419
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1420
                pte = pde & ~0x003ff000; /* align to 4MB */
1421
                page_size = 4096 * 1024;
1422
            } else {
1423
                /* page directory entry */
1424
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1425
                pte = ldl_phys(pte_addr);
1426
                if (!(pte & PG_PRESENT_MASK))
1427
                    return -1;
1428
                page_size = 4096;
1429
            }
1430
        }
1431
        pte = pte & env->a20_mask;
1432
    }
1433

    
1434
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1435
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1436
    return paddr;
1437
}
1438

    
1439
void hw_breakpoint_insert(CPUState *env, int index)
1440
{
1441
    int type, err = 0;
1442

    
1443
    switch (hw_breakpoint_type(env->dr[7], index)) {
1444
    case 0:
1445
        if (hw_breakpoint_enabled(env->dr[7], index))
1446
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1447
                                        &env->cpu_breakpoint[index]);
1448
        break;
1449
    case 1:
1450
        type = BP_CPU | BP_MEM_WRITE;
1451
        goto insert_wp;
1452
    case 2:
1453
         /* No support for I/O watchpoints yet */
1454
        break;
1455
    case 3:
1456
        type = BP_CPU | BP_MEM_ACCESS;
1457
    insert_wp:
1458
        err = cpu_watchpoint_insert(env, env->dr[index],
1459
                                    hw_breakpoint_len(env->dr[7], index),
1460
                                    type, &env->cpu_watchpoint[index]);
1461
        break;
1462
    }
1463
    if (err)
1464
        env->cpu_breakpoint[index] = NULL;
1465
}
1466

    
1467
void hw_breakpoint_remove(CPUState *env, int index)
1468
{
1469
    if (!env->cpu_breakpoint[index])
1470
        return;
1471
    switch (hw_breakpoint_type(env->dr[7], index)) {
1472
    case 0:
1473
        if (hw_breakpoint_enabled(env->dr[7], index))
1474
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1475
        break;
1476
    case 1:
1477
    case 3:
1478
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1479
        break;
1480
    case 2:
1481
        /* No support for I/O watchpoints yet */
1482
        break;
1483
    }
1484
}
1485

    
1486
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1487
{
1488
    target_ulong dr6;
1489
    int reg, type;
1490
    int hit_enabled = 0;
1491

    
1492
    dr6 = env->dr[6] & ~0xf;
1493
    for (reg = 0; reg < 4; reg++) {
1494
        type = hw_breakpoint_type(env->dr[7], reg);
1495
        if ((type == 0 && env->dr[reg] == env->eip) ||
1496
            ((type & 1) && env->cpu_watchpoint[reg] &&
1497
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1498
            dr6 |= 1 << reg;
1499
            if (hw_breakpoint_enabled(env->dr[7], reg))
1500
                hit_enabled = 1;
1501
        }
1502
    }
1503
    if (hit_enabled || force_dr6_update)
1504
        env->dr[6] = dr6;
1505
    return hit_enabled;
1506
}
1507

    
1508
static CPUDebugExcpHandler *prev_debug_excp_handler;
1509

    
1510
void raise_exception(int exception_index);
1511

    
1512
static void breakpoint_handler(CPUState *env)
1513
{
1514
    CPUBreakpoint *bp;
1515

    
1516
    if (env->watchpoint_hit) {
1517
        if (env->watchpoint_hit->flags & BP_CPU) {
1518
            env->watchpoint_hit = NULL;
1519
            if (check_hw_breakpoints(env, 0))
1520
                raise_exception(EXCP01_DB);
1521
            else
1522
                cpu_resume_from_signal(env, NULL);
1523
        }
1524
    } else {
1525
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1526
            if (bp->pc == env->eip) {
1527
                if (bp->flags & BP_CPU) {
1528
                    check_hw_breakpoints(env, 1);
1529
                    raise_exception(EXCP01_DB);
1530
                }
1531
                break;
1532
            }
1533
    }
1534
    if (prev_debug_excp_handler)
1535
        prev_debug_excp_handler(env);
1536
}
1537

    
1538
/* This should come from sysemu.h - if we could include it here... */
1539
void qemu_system_reset_request(void);
1540

    
1541
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1542
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1543
{
1544
    uint64_t mcg_cap = cenv->mcg_cap;
1545
    unsigned bank_num = mcg_cap & 0xff;
1546
    uint64_t *banks = cenv->mce_banks;
1547

    
1548
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1549
        return;
1550

    
1551
    /*
1552
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1553
     * reporting is disabled
1554
     */
1555
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1556
        cenv->mcg_ctl != ~(uint64_t)0)
1557
        return;
1558
    banks += 4 * bank;
1559
    /*
1560
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1561
     * reporting is disabled for the bank
1562
     */
1563
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1564
        return;
1565
    if (status & MCI_STATUS_UC) {
1566
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1567
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1568
            fprintf(stderr, "injects mce exception while previous "
1569
                    "one is in progress!\n");
1570
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1571
            qemu_system_reset_request();
1572
            return;
1573
        }
1574
        if (banks[1] & MCI_STATUS_VAL)
1575
            status |= MCI_STATUS_OVER;
1576
        banks[2] = addr;
1577
        banks[3] = misc;
1578
        cenv->mcg_status = mcg_status;
1579
        banks[1] = status;
1580
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1581
    } else if (!(banks[1] & MCI_STATUS_VAL)
1582
               || !(banks[1] & MCI_STATUS_UC)) {
1583
        if (banks[1] & MCI_STATUS_VAL)
1584
            status |= MCI_STATUS_OVER;
1585
        banks[2] = addr;
1586
        banks[3] = misc;
1587
        banks[1] = status;
1588
    } else
1589
        banks[1] |= MCI_STATUS_OVER;
1590
}
1591
#endif /* !CONFIG_USER_ONLY */
1592

    
1593
static void mce_init(CPUX86State *cenv)
1594
{
1595
    unsigned int bank, bank_num;
1596

    
1597
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1598
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1599
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1600
        cenv->mcg_ctl = ~(uint64_t)0;
1601
        bank_num = cenv->mcg_cap & 0xff;
1602
        cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1603
        for (bank = 0; bank < bank_num; bank++)
1604
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1605
    }
1606
}
1607

    
1608
static void host_cpuid(uint32_t function, uint32_t count,
1609
                       uint32_t *eax, uint32_t *ebx,
1610
                       uint32_t *ecx, uint32_t *edx)
1611
{
1612
#if defined(CONFIG_KVM)
1613
    uint32_t vec[4];
1614

    
1615
#ifdef __x86_64__
1616
    asm volatile("cpuid"
1617
                 : "=a"(vec[0]), "=b"(vec[1]),
1618
                   "=c"(vec[2]), "=d"(vec[3])
1619
                 : "0"(function), "c"(count) : "cc");
1620
#else
1621
    asm volatile("pusha \n\t"
1622
                 "cpuid \n\t"
1623
                 "mov %%eax, 0(%2) \n\t"
1624
                 "mov %%ebx, 4(%2) \n\t"
1625
                 "mov %%ecx, 8(%2) \n\t"
1626
                 "mov %%edx, 12(%2) \n\t"
1627
                 "popa"
1628
                 : : "a"(function), "c"(count), "S"(vec)
1629
                 : "memory", "cc");
1630
#endif
1631

    
1632
    if (eax)
1633
        *eax = vec[0];
1634
    if (ebx)
1635
        *ebx = vec[1];
1636
    if (ecx)
1637
        *ecx = vec[2];
1638
    if (edx)
1639
        *edx = vec[3];
1640
#endif
1641
}
1642

    
1643
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1644
                   uint32_t *eax, uint32_t *ebx,
1645
                   uint32_t *ecx, uint32_t *edx)
1646
{
1647
    /* test if maximum index reached */
1648
    if (index & 0x80000000) {
1649
        if (index > env->cpuid_xlevel)
1650
            index = env->cpuid_level;
1651
    } else {
1652
        if (index > env->cpuid_level)
1653
            index = env->cpuid_level;
1654
    }
1655

    
1656
    switch(index) {
1657
    case 0:
1658
        *eax = env->cpuid_level;
1659
        *ebx = env->cpuid_vendor1;
1660
        *edx = env->cpuid_vendor2;
1661
        *ecx = env->cpuid_vendor3;
1662

    
1663
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1664
         * isn't supported in compatibility mode on Intel.  so advertise the
1665
         * actuall cpu, and say goodbye to migration between different vendors
1666
         * is you use compatibility mode. */
1667
        if (kvm_enabled() && !env->cpuid_vendor_override)
1668
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1669
        break;
1670
    case 1:
1671
        *eax = env->cpuid_version;
1672
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1673
        *ecx = env->cpuid_ext_features;
1674
        *edx = env->cpuid_features;
1675
        if (env->nr_cores * env->nr_threads > 1) {
1676
            *ebx |= (env->nr_cores * env->nr_threads) << 16;
1677
            *edx |= 1 << 28;    /* HTT bit */
1678
        }
1679
        break;
1680
    case 2:
1681
        /* cache info: needed for Pentium Pro compatibility */
1682
        *eax = 1;
1683
        *ebx = 0;
1684
        *ecx = 0;
1685
        *edx = 0x2c307d;
1686
        break;
1687
    case 4:
1688
        /* cache info: needed for Core compatibility */
1689
        if (env->nr_cores > 1) {
1690
                *eax = (env->nr_cores - 1) << 26;
1691
        } else {
1692
                *eax = 0;
1693
        }
1694
        switch (count) {
1695
            case 0: /* L1 dcache info */
1696
                *eax |= 0x0000121;
1697
                *ebx = 0x1c0003f;
1698
                *ecx = 0x000003f;
1699
                *edx = 0x0000001;
1700
                break;
1701
            case 1: /* L1 icache info */
1702
                *eax |= 0x0000122;
1703
                *ebx = 0x1c0003f;
1704
                *ecx = 0x000003f;
1705
                *edx = 0x0000001;
1706
                break;
1707
            case 2: /* L2 cache info */
1708
                *eax |= 0x0000143;
1709
                if (env->nr_threads > 1) {
1710
                    *eax |= (env->nr_threads - 1) << 14;
1711
                }
1712
                *ebx = 0x3c0003f;
1713
                *ecx = 0x0000fff;
1714
                *edx = 0x0000001;
1715
                break;
1716
            default: /* end of info */
1717
                *eax = 0;
1718
                *ebx = 0;
1719
                *ecx = 0;
1720
                *edx = 0;
1721
                break;
1722
        }
1723
        break;
1724
    case 5:
1725
        /* mwait info: needed for Core compatibility */
1726
        *eax = 0; /* Smallest monitor-line size in bytes */
1727
        *ebx = 0; /* Largest monitor-line size in bytes */
1728
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1729
        *edx = 0;
1730
        break;
1731
    case 6:
1732
        /* Thermal and Power Leaf */
1733
        *eax = 0;
1734
        *ebx = 0;
1735
        *ecx = 0;
1736
        *edx = 0;
1737
        break;
1738
    case 9:
1739
        /* Direct Cache Access Information Leaf */
1740
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1741
        *ebx = 0;
1742
        *ecx = 0;
1743
        *edx = 0;
1744
        break;
1745
    case 0xA:
1746
        /* Architectural Performance Monitoring Leaf */
1747
        *eax = 0;
1748
        *ebx = 0;
1749
        *ecx = 0;
1750
        *edx = 0;
1751
        break;
1752
    case 0x80000000:
1753
        *eax = env->cpuid_xlevel;
1754
        *ebx = env->cpuid_vendor1;
1755
        *edx = env->cpuid_vendor2;
1756
        *ecx = env->cpuid_vendor3;
1757
        break;
1758
    case 0x80000001:
1759
        *eax = env->cpuid_version;
1760
        *ebx = 0;
1761
        *ecx = env->cpuid_ext3_features;
1762
        *edx = env->cpuid_ext2_features;
1763

    
1764
        if (env->nr_cores * env->nr_threads > 1 &&
1765
            env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
1766
            env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
1767
            env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
1768
            *ecx |= 1 << 1;    /* CmpLegacy bit */
1769
        }
1770

    
1771
        if (kvm_enabled()) {
1772
            /* Nested SVM not yet supported in KVM */
1773
            *ecx &= ~CPUID_EXT3_SVM;
1774
        } else {
1775
            /* AMD 3DNow! is not supported in QEMU */
1776
            *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1777
        }
1778
        break;
1779
    case 0x80000002:
1780
    case 0x80000003:
1781
    case 0x80000004:
1782
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1783
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1784
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1785
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1786
        break;
1787
    case 0x80000005:
1788
        /* cache info (L1 cache) */
1789
        *eax = 0x01ff01ff;
1790
        *ebx = 0x01ff01ff;
1791
        *ecx = 0x40020140;
1792
        *edx = 0x40020140;
1793
        break;
1794
    case 0x80000006:
1795
        /* cache info (L2 cache) */
1796
        *eax = 0;
1797
        *ebx = 0x42004200;
1798
        *ecx = 0x02008140;
1799
        *edx = 0;
1800
        break;
1801
    case 0x80000008:
1802
        /* virtual & phys address size in low 2 bytes. */
1803
/* XXX: This value must match the one used in the MMU code. */ 
1804
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1805
            /* 64 bit processor */
1806
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1807
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1808
        } else {
1809
            if (env->cpuid_features & CPUID_PSE36)
1810
                *eax = 0x00000024; /* 36 bits physical */
1811
            else
1812
                *eax = 0x00000020; /* 32 bits physical */
1813
        }
1814
        *ebx = 0;
1815
        *ecx = 0;
1816
        *edx = 0;
1817
        if (env->nr_cores * env->nr_threads > 1) {
1818
            *ecx |= (env->nr_cores * env->nr_threads) - 1;
1819
        }
1820
        break;
1821
    case 0x8000000A:
1822
        *eax = 0x00000001; /* SVM Revision */
1823
        *ebx = 0x00000010; /* nr of ASIDs */
1824
        *ecx = 0;
1825
        *edx = 0; /* optional features */
1826
        break;
1827
    default:
1828
        /* reserved values: zero */
1829
        *eax = 0;
1830
        *ebx = 0;
1831
        *ecx = 0;
1832
        *edx = 0;
1833
        break;
1834
    }
1835
}
1836

    
1837

    
1838
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1839
                            target_ulong *base, unsigned int *limit,
1840
                            unsigned int *flags)
1841
{
1842
    SegmentCache *dt;
1843
    target_ulong ptr;
1844
    uint32_t e1, e2;
1845
    int index;
1846

    
1847
    if (selector & 0x4)
1848
        dt = &env->ldt;
1849
    else
1850
        dt = &env->gdt;
1851
    index = selector & ~7;
1852
    ptr = dt->base + index;
1853
    if ((index + 7) > dt->limit
1854
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1855
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1856
        return 0;
1857

    
1858
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1859
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1860
    if (e2 & DESC_G_MASK)
1861
        *limit = (*limit << 12) | 0xfff;
1862
    *flags = e2;
1863

    
1864
    return 1;
1865
}
1866

    
1867
CPUX86State *cpu_x86_init(const char *cpu_model)
1868
{
1869
    CPUX86State *env;
1870
    static int inited;
1871

    
1872
    env = qemu_mallocz(sizeof(CPUX86State));
1873
    cpu_exec_init(env);
1874
    env->cpu_model_str = cpu_model;
1875

    
1876
    /* init various static tables */
1877
    if (!inited) {
1878
        inited = 1;
1879
        optimize_flags_init();
1880
#ifndef CONFIG_USER_ONLY
1881
        prev_debug_excp_handler =
1882
            cpu_set_debug_excp_handler(breakpoint_handler);
1883
#endif
1884
    }
1885
    if (cpu_x86_register(env, cpu_model) < 0) {
1886
        cpu_x86_close(env);
1887
        return NULL;
1888
    }
1889
    mce_init(env);
1890
    cpu_reset(env);
1891

    
1892
    qemu_init_vcpu(env);
1893

    
1894
    return env;
1895
}
1896

    
1897
#if !defined(CONFIG_USER_ONLY)
1898
void do_cpu_init(CPUState *env)
1899
{
1900
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1901
    cpu_reset(env);
1902
    env->interrupt_request = sipi;
1903
    apic_init_reset(env);
1904
}
1905

    
1906
void do_cpu_sipi(CPUState *env)
1907
{
1908
    apic_sipi(env);
1909
}
1910
#else
1911
void do_cpu_init(CPUState *env)
1912
{
1913
}
1914
void do_cpu_sipi(CPUState *env)
1915
{
1916
}
1917
#endif