Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ ac72472b

History | View | Annotate | Download (60.1 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30

    
31
//#define DEBUG_MMU
32

    
33
/* feature flags taken from "Intel Processor Identification and the CPUID
34
 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35
 * about feature names, the Linux name is used. */
36
static const char *feature_name[] = {
37
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38
    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40
    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
41
};
42
static const char *ext_feature_name[] = {
43
    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44
    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45
    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
47
};
48
static const char *ext2_feature_name[] = {
49
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51
    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
53
};
54
static const char *ext3_feature_name[] = {
55
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59
};
60

    
61
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62
                                    uint32_t *ext_features,
63
                                    uint32_t *ext2_features,
64
                                    uint32_t *ext3_features)
65
{
66
    int i;
67
    int found = 0;
68

    
69
    for ( i = 0 ; i < 32 ; i++ )
70
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71
            *features |= 1 << i;
72
            found = 1;
73
        }
74
    for ( i = 0 ; i < 32 ; i++ )
75
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76
            *ext_features |= 1 << i;
77
            found = 1;
78
        }
79
    for ( i = 0 ; i < 32 ; i++ )
80
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81
            *ext2_features |= 1 << i;
82
            found = 1;
83
        }
84
    for ( i = 0 ; i < 32 ; i++ )
85
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86
            *ext3_features |= 1 << i;
87
            found = 1;
88
        }
89
    if (!found) {
90
        fprintf(stderr, "CPU feature %s not found\n", flagname);
91
    }
92
}
93

    
94
typedef struct x86_def_t {
95
    const char *name;
96
    uint32_t level;
97
    uint32_t vendor1, vendor2, vendor3;
98
    int family;
99
    int model;
100
    int stepping;
101
    uint32_t features, ext_features, ext2_features, ext3_features;
102
    uint32_t xlevel;
103
    char model_id[48];
104
    int vendor_override;
105
} x86_def_t;
106

    
107
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112
          CPUID_PSE36 | CPUID_FXSR)
113
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
118
static x86_def_t x86_defs[] = {
119
#ifdef TARGET_X86_64
120
    {
121
        .name = "qemu64",
122
        .level = 4,
123
        .vendor1 = CPUID_VENDOR_AMD_1,
124
        .vendor2 = CPUID_VENDOR_AMD_2,
125
        .vendor3 = CPUID_VENDOR_AMD_3,
126
        .family = 6,
127
        .model = 2,
128
        .stepping = 3,
129
        .features = PPRO_FEATURES | 
130
        /* these features are needed for Win64 and aren't fully implemented */
131
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132
        /* this feature is needed for Solaris and isn't fully implemented */
133
            CPUID_PSE36,
134
        .ext_features = CPUID_EXT_SSE3,
135
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
136
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137
        .ext3_features = CPUID_EXT3_SVM,
138
        .xlevel = 0x8000000A,
139
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140
    },
141
    {
142
        .name = "phenom",
143
        .level = 5,
144
        .vendor1 = CPUID_VENDOR_AMD_1,
145
        .vendor2 = CPUID_VENDOR_AMD_2,
146
        .vendor3 = CPUID_VENDOR_AMD_3,
147
        .family = 16,
148
        .model = 2,
149
        .stepping = 3,
150
        /* Missing: CPUID_VME, CPUID_HT */
151
        .features = PPRO_FEATURES | 
152
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153
            CPUID_PSE36,
154
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
155
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
156
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
157
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
158
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
159
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
160
            CPUID_EXT2_FFXSR,
161
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
162
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
163
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
164
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
165
        .ext3_features = CPUID_EXT3_SVM,
166
        .xlevel = 0x8000001A,
167
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
168
    },
169
    {
170
        .name = "core2duo",
171
        .level = 10,
172
        .family = 6,
173
        .model = 15,
174
        .stepping = 11,
175
        /* The original CPU also implements these features:
176
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
177
               CPUID_TM, CPUID_PBE */
178
        .features = PPRO_FEATURES |
179
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180
            CPUID_PSE36,
181
        /* The original CPU also implements these ext features:
182
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
183
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
184
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
185
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
186
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
187
        .xlevel = 0x80000008,
188
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
189
    },
190
#endif
191
    {
192
        .name = "qemu32",
193
        .level = 4,
194
        .family = 6,
195
        .model = 3,
196
        .stepping = 3,
197
        .features = PPRO_FEATURES,
198
        .ext_features = CPUID_EXT_SSE3,
199
        .xlevel = 0,
200
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
201
    },
202
    {
203
        .name = "coreduo",
204
        .level = 10,
205
        .family = 6,
206
        .model = 14,
207
        .stepping = 8,
208
        /* The original CPU also implements these features:
209
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
210
               CPUID_TM, CPUID_PBE */
211
        .features = PPRO_FEATURES | CPUID_VME |
212
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
213
        /* The original CPU also implements these ext features:
214
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
215
               CPUID_EXT_PDCM */
216
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
217
        .ext2_features = CPUID_EXT2_NX,
218
        .xlevel = 0x80000008,
219
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
220
    },
221
    {
222
        .name = "486",
223
        .level = 0,
224
        .family = 4,
225
        .model = 0,
226
        .stepping = 0,
227
        .features = I486_FEATURES,
228
        .xlevel = 0,
229
    },
230
    {
231
        .name = "pentium",
232
        .level = 1,
233
        .family = 5,
234
        .model = 4,
235
        .stepping = 3,
236
        .features = PENTIUM_FEATURES,
237
        .xlevel = 0,
238
    },
239
    {
240
        .name = "pentium2",
241
        .level = 2,
242
        .family = 6,
243
        .model = 5,
244
        .stepping = 2,
245
        .features = PENTIUM2_FEATURES,
246
        .xlevel = 0,
247
    },
248
    {
249
        .name = "pentium3",
250
        .level = 2,
251
        .family = 6,
252
        .model = 7,
253
        .stepping = 3,
254
        .features = PENTIUM3_FEATURES,
255
        .xlevel = 0,
256
    },
257
    {
258
        .name = "athlon",
259
        .level = 2,
260
        .vendor1 = CPUID_VENDOR_AMD_1,
261
        .vendor2 = CPUID_VENDOR_AMD_2,
262
        .vendor3 = CPUID_VENDOR_AMD_3,
263
        .family = 6,
264
        .model = 2,
265
        .stepping = 3,
266
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
267
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
268
        .xlevel = 0x80000008,
269
        /* XXX: put another string ? */
270
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
271
    },
272
    {
273
        .name = "n270",
274
        /* original is on level 10 */
275
        .level = 5,
276
        .family = 6,
277
        .model = 28,
278
        .stepping = 2,
279
        .features = PPRO_FEATURES |
280
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
281
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
282
             * CPUID_HT | CPUID_TM | CPUID_PBE */
283
            /* Some CPUs got no CPUID_SEP */
284
        .ext_features = CPUID_EXT_MONITOR |
285
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
286
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
287
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
288
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
289
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
290
        .xlevel = 0x8000000A,
291
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
292
    },
293
};
294

    
295
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
296
                               uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
297

    
298
static int cpu_x86_fill_model_id(char *str)
299
{
300
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
301
    int i;
302

    
303
    for (i = 0; i < 3; i++) {
304
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
305
        memcpy(str + i * 16 +  0, &eax, 4);
306
        memcpy(str + i * 16 +  4, &ebx, 4);
307
        memcpy(str + i * 16 +  8, &ecx, 4);
308
        memcpy(str + i * 16 + 12, &edx, 4);
309
    }
310
    return 0;
311
}
312

    
313
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
314
{
315
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
316

    
317
    x86_cpu_def->name = "host";
318
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
319
    x86_cpu_def->level = eax;
320
    x86_cpu_def->vendor1 = ebx;
321
    x86_cpu_def->vendor2 = edx;
322
    x86_cpu_def->vendor3 = ecx;
323

    
324
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
325
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
326
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
327
    x86_cpu_def->stepping = eax & 0x0F;
328
    x86_cpu_def->ext_features = ecx;
329
    x86_cpu_def->features = edx;
330

    
331
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
332
    x86_cpu_def->xlevel = eax;
333

    
334
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
335
    x86_cpu_def->ext2_features = edx;
336
    x86_cpu_def->ext3_features = ecx;
337
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
338
    x86_cpu_def->vendor_override = 0;
339

    
340
    return 0;
341
}
342

    
343
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
344
{
345
    unsigned int i;
346
    x86_def_t *def;
347

    
348
    char *s = strdup(cpu_model);
349
    char *featurestr, *name = strtok(s, ",");
350
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
351
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
352
    int family = -1, model = -1, stepping = -1;
353

    
354
    def = NULL;
355
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
356
        if (strcmp(name, x86_defs[i].name) == 0) {
357
            def = &x86_defs[i];
358
            break;
359
        }
360
    }
361
    if (kvm_enabled() && strcmp(name, "host") == 0) {
362
        cpu_x86_fill_host(x86_cpu_def);
363
    } else if (!def) {
364
        goto error;
365
    } else {
366
        memcpy(x86_cpu_def, def, sizeof(*def));
367
    }
368

    
369
    add_flagname_to_bitmaps("hypervisor", &plus_features,
370
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
371

    
372
    featurestr = strtok(NULL, ",");
373

    
374
    while (featurestr) {
375
        char *val;
376
        if (featurestr[0] == '+') {
377
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
378
        } else if (featurestr[0] == '-') {
379
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
380
        } else if ((val = strchr(featurestr, '='))) {
381
            *val = 0; val++;
382
            if (!strcmp(featurestr, "family")) {
383
                char *err;
384
                family = strtol(val, &err, 10);
385
                if (!*val || *err || family < 0) {
386
                    fprintf(stderr, "bad numerical value %s\n", val);
387
                    goto error;
388
                }
389
                x86_cpu_def->family = family;
390
            } else if (!strcmp(featurestr, "model")) {
391
                char *err;
392
                model = strtol(val, &err, 10);
393
                if (!*val || *err || model < 0 || model > 0xff) {
394
                    fprintf(stderr, "bad numerical value %s\n", val);
395
                    goto error;
396
                }
397
                x86_cpu_def->model = model;
398
            } else if (!strcmp(featurestr, "stepping")) {
399
                char *err;
400
                stepping = strtol(val, &err, 10);
401
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
402
                    fprintf(stderr, "bad numerical value %s\n", val);
403
                    goto error;
404
                }
405
                x86_cpu_def->stepping = stepping;
406
            } else if (!strcmp(featurestr, "vendor")) {
407
                if (strlen(val) != 12) {
408
                    fprintf(stderr, "vendor string must be 12 chars long\n");
409
                    goto error;
410
                }
411
                x86_cpu_def->vendor1 = 0;
412
                x86_cpu_def->vendor2 = 0;
413
                x86_cpu_def->vendor3 = 0;
414
                for(i = 0; i < 4; i++) {
415
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
416
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
417
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
418
                }
419
                x86_cpu_def->vendor_override = 1;
420
            } else if (!strcmp(featurestr, "model_id")) {
421
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
422
                        val);
423
            } else {
424
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
425
                goto error;
426
            }
427
        } else {
428
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
429
            goto error;
430
        }
431
        featurestr = strtok(NULL, ",");
432
    }
433
    x86_cpu_def->features |= plus_features;
434
    x86_cpu_def->ext_features |= plus_ext_features;
435
    x86_cpu_def->ext2_features |= plus_ext2_features;
436
    x86_cpu_def->ext3_features |= plus_ext3_features;
437
    x86_cpu_def->features &= ~minus_features;
438
    x86_cpu_def->ext_features &= ~minus_ext_features;
439
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
440
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
441
    free(s);
442
    return 0;
443

    
444
error:
445
    free(s);
446
    return -1;
447
}
448

    
449
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
450
{
451
    unsigned int i;
452

    
453
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
454
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
455
}
456

    
457
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
458
{
459
    x86_def_t def1, *def = &def1;
460

    
461
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
462
        return -1;
463
    if (def->vendor1) {
464
        env->cpuid_vendor1 = def->vendor1;
465
        env->cpuid_vendor2 = def->vendor2;
466
        env->cpuid_vendor3 = def->vendor3;
467
    } else {
468
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
469
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
470
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
471
    }
472
    env->cpuid_vendor_override = def->vendor_override;
473
    env->cpuid_level = def->level;
474
    if (def->family > 0x0f)
475
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
476
    else
477
        env->cpuid_version = def->family << 8;
478
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
479
    env->cpuid_version |= def->stepping;
480
    env->cpuid_features = def->features;
481
    env->pat = 0x0007040600070406ULL;
482
    env->cpuid_ext_features = def->ext_features;
483
    env->cpuid_ext2_features = def->ext2_features;
484
    env->cpuid_xlevel = def->xlevel;
485
    env->cpuid_ext3_features = def->ext3_features;
486
    {
487
        const char *model_id = def->model_id;
488
        int c, len, i;
489
        if (!model_id)
490
            model_id = "";
491
        len = strlen(model_id);
492
        for(i = 0; i < 48; i++) {
493
            if (i >= len)
494
                c = '\0';
495
            else
496
                c = (uint8_t)model_id[i];
497
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
498
        }
499
    }
500
    return 0;
501
}
502

    
503
/* NOTE: must be called outside the CPU execute loop */
504
void cpu_reset(CPUX86State *env)
505
{
506
    int i;
507

    
508
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
509
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
510
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
511
    }
512

    
513
    memset(env, 0, offsetof(CPUX86State, breakpoints));
514

    
515
    tlb_flush(env, 1);
516

    
517
    env->old_exception = -1;
518

    
519
    /* init to reset state */
520

    
521
#ifdef CONFIG_SOFTMMU
522
    env->hflags |= HF_SOFTMMU_MASK;
523
#endif
524
    env->hflags2 |= HF2_GIF_MASK;
525

    
526
    cpu_x86_update_cr0(env, 0x60000010);
527
    env->a20_mask = ~0x0;
528
    env->smbase = 0x30000;
529

    
530
    env->idt.limit = 0xffff;
531
    env->gdt.limit = 0xffff;
532
    env->ldt.limit = 0xffff;
533
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
534
    env->tr.limit = 0xffff;
535
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
536

    
537
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
538
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
539
                           DESC_R_MASK | DESC_A_MASK);
540
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
541
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
542
                           DESC_A_MASK);
543
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
544
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
545
                           DESC_A_MASK);
546
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
547
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
548
                           DESC_A_MASK);
549
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
550
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
551
                           DESC_A_MASK);
552
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
553
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
554
                           DESC_A_MASK);
555

    
556
    env->eip = 0xfff0;
557
    env->regs[R_EDX] = env->cpuid_version;
558

    
559
    env->eflags = 0x2;
560

    
561
    /* FPU init */
562
    for(i = 0;i < 8; i++)
563
        env->fptags[i] = 1;
564
    env->fpuc = 0x37f;
565

    
566
    env->mxcsr = 0x1f80;
567

    
568
    memset(env->dr, 0, sizeof(env->dr));
569
    env->dr[6] = DR6_FIXED_1;
570
    env->dr[7] = DR7_FIXED_1;
571
    cpu_breakpoint_remove_all(env, BP_CPU);
572
    cpu_watchpoint_remove_all(env, BP_CPU);
573
}
574

    
575
void cpu_x86_close(CPUX86State *env)
576
{
577
    qemu_free(env);
578
}
579

    
580
/***********************************************************/
581
/* x86 debug */
582

    
583
static const char *cc_op_str[] = {
584
    "DYNAMIC",
585
    "EFLAGS",
586

    
587
    "MULB",
588
    "MULW",
589
    "MULL",
590
    "MULQ",
591

    
592
    "ADDB",
593
    "ADDW",
594
    "ADDL",
595
    "ADDQ",
596

    
597
    "ADCB",
598
    "ADCW",
599
    "ADCL",
600
    "ADCQ",
601

    
602
    "SUBB",
603
    "SUBW",
604
    "SUBL",
605
    "SUBQ",
606

    
607
    "SBBB",
608
    "SBBW",
609
    "SBBL",
610
    "SBBQ",
611

    
612
    "LOGICB",
613
    "LOGICW",
614
    "LOGICL",
615
    "LOGICQ",
616

    
617
    "INCB",
618
    "INCW",
619
    "INCL",
620
    "INCQ",
621

    
622
    "DECB",
623
    "DECW",
624
    "DECL",
625
    "DECQ",
626

    
627
    "SHLB",
628
    "SHLW",
629
    "SHLL",
630
    "SHLQ",
631

    
632
    "SARB",
633
    "SARW",
634
    "SARL",
635
    "SARQ",
636
};
637

    
638
static void
639
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
640
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
641
                       const char *name, struct SegmentCache *sc)
642
{
643
#ifdef TARGET_X86_64
644
    if (env->hflags & HF_CS64_MASK) {
645
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
646
                    sc->selector, sc->base, sc->limit, sc->flags);
647
    } else
648
#endif
649
    {
650
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
651
                    (uint32_t)sc->base, sc->limit, sc->flags);
652
    }
653

    
654
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
655
        goto done;
656

    
657
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
658
    if (sc->flags & DESC_S_MASK) {
659
        if (sc->flags & DESC_CS_MASK) {
660
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
661
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
662
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
663
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
664
        } else {
665
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
666
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
667
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
668
        }
669
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
670
    } else {
671
        static const char *sys_type_name[2][16] = {
672
            { /* 32 bit mode */
673
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
674
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
675
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
676
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
677
            },
678
            { /* 64 bit mode */
679
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
680
                "Reserved", "Reserved", "Reserved", "Reserved",
681
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
682
                "Reserved", "IntGate64", "TrapGate64"
683
            }
684
        };
685
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
686
                                    [(sc->flags & DESC_TYPE_MASK)
687
                                     >> DESC_TYPE_SHIFT]);
688
    }
689
done:
690
    cpu_fprintf(f, "\n");
691
}
692

    
693
void cpu_dump_state(CPUState *env, FILE *f,
694
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
695
                    int flags)
696
{
697
    int eflags, i, nb;
698
    char cc_op_name[32];
699
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
700

    
701
    if (kvm_enabled())
702
        kvm_arch_get_registers(env);
703

    
704
    eflags = env->eflags;
705
#ifdef TARGET_X86_64
706
    if (env->hflags & HF_CS64_MASK) {
707
        cpu_fprintf(f,
708
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
709
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
710
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
711
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
712
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
713
                    env->regs[R_EAX],
714
                    env->regs[R_EBX],
715
                    env->regs[R_ECX],
716
                    env->regs[R_EDX],
717
                    env->regs[R_ESI],
718
                    env->regs[R_EDI],
719
                    env->regs[R_EBP],
720
                    env->regs[R_ESP],
721
                    env->regs[8],
722
                    env->regs[9],
723
                    env->regs[10],
724
                    env->regs[11],
725
                    env->regs[12],
726
                    env->regs[13],
727
                    env->regs[14],
728
                    env->regs[15],
729
                    env->eip, eflags,
730
                    eflags & DF_MASK ? 'D' : '-',
731
                    eflags & CC_O ? 'O' : '-',
732
                    eflags & CC_S ? 'S' : '-',
733
                    eflags & CC_Z ? 'Z' : '-',
734
                    eflags & CC_A ? 'A' : '-',
735
                    eflags & CC_P ? 'P' : '-',
736
                    eflags & CC_C ? 'C' : '-',
737
                    env->hflags & HF_CPL_MASK,
738
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
739
                    (int)(env->a20_mask >> 20) & 1,
740
                    (env->hflags >> HF_SMM_SHIFT) & 1,
741
                    env->halted);
742
    } else
743
#endif
744
    {
745
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
746
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
747
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
748
                    (uint32_t)env->regs[R_EAX],
749
                    (uint32_t)env->regs[R_EBX],
750
                    (uint32_t)env->regs[R_ECX],
751
                    (uint32_t)env->regs[R_EDX],
752
                    (uint32_t)env->regs[R_ESI],
753
                    (uint32_t)env->regs[R_EDI],
754
                    (uint32_t)env->regs[R_EBP],
755
                    (uint32_t)env->regs[R_ESP],
756
                    (uint32_t)env->eip, eflags,
757
                    eflags & DF_MASK ? 'D' : '-',
758
                    eflags & CC_O ? 'O' : '-',
759
                    eflags & CC_S ? 'S' : '-',
760
                    eflags & CC_Z ? 'Z' : '-',
761
                    eflags & CC_A ? 'A' : '-',
762
                    eflags & CC_P ? 'P' : '-',
763
                    eflags & CC_C ? 'C' : '-',
764
                    env->hflags & HF_CPL_MASK,
765
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
766
                    (int)(env->a20_mask >> 20) & 1,
767
                    (env->hflags >> HF_SMM_SHIFT) & 1,
768
                    env->halted);
769
    }
770

    
771
    for(i = 0; i < 6; i++) {
772
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
773
                               &env->segs[i]);
774
    }
775
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
776
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
777

    
778
#ifdef TARGET_X86_64
779
    if (env->hflags & HF_LMA_MASK) {
780
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
781
                    env->gdt.base, env->gdt.limit);
782
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
783
                    env->idt.base, env->idt.limit);
784
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
785
                    (uint32_t)env->cr[0],
786
                    env->cr[2],
787
                    env->cr[3],
788
                    (uint32_t)env->cr[4]);
789
        for(i = 0; i < 4; i++)
790
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
791
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
792
                    env->dr[6], env->dr[7]);
793
    } else
794
#endif
795
    {
796
        cpu_fprintf(f, "GDT=     %08x %08x\n",
797
                    (uint32_t)env->gdt.base, env->gdt.limit);
798
        cpu_fprintf(f, "IDT=     %08x %08x\n",
799
                    (uint32_t)env->idt.base, env->idt.limit);
800
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
801
                    (uint32_t)env->cr[0],
802
                    (uint32_t)env->cr[2],
803
                    (uint32_t)env->cr[3],
804
                    (uint32_t)env->cr[4]);
805
        for(i = 0; i < 4; i++)
806
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
807
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
808
    }
809
    if (flags & X86_DUMP_CCOP) {
810
        if ((unsigned)env->cc_op < CC_OP_NB)
811
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
812
        else
813
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
814
#ifdef TARGET_X86_64
815
        if (env->hflags & HF_CS64_MASK) {
816
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
817
                        env->cc_src, env->cc_dst,
818
                        cc_op_name);
819
        } else
820
#endif
821
        {
822
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
823
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
824
                        cc_op_name);
825
        }
826
    }
827
    if (flags & X86_DUMP_FPU) {
828
        int fptag;
829
        fptag = 0;
830
        for(i = 0; i < 8; i++) {
831
            fptag |= ((!env->fptags[i]) << i);
832
        }
833
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
834
                    env->fpuc,
835
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
836
                    env->fpstt,
837
                    fptag,
838
                    env->mxcsr);
839
        for(i=0;i<8;i++) {
840
#if defined(USE_X86LDOUBLE)
841
            union {
842
                long double d;
843
                struct {
844
                    uint64_t lower;
845
                    uint16_t upper;
846
                } l;
847
            } tmp;
848
            tmp.d = env->fpregs[i].d;
849
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
850
                        i, tmp.l.lower, tmp.l.upper);
851
#else
852
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
853
                        i, env->fpregs[i].mmx.q);
854
#endif
855
            if ((i & 1) == 1)
856
                cpu_fprintf(f, "\n");
857
            else
858
                cpu_fprintf(f, " ");
859
        }
860
        if (env->hflags & HF_CS64_MASK)
861
            nb = 16;
862
        else
863
            nb = 8;
864
        for(i=0;i<nb;i++) {
865
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
866
                        i,
867
                        env->xmm_regs[i].XMM_L(3),
868
                        env->xmm_regs[i].XMM_L(2),
869
                        env->xmm_regs[i].XMM_L(1),
870
                        env->xmm_regs[i].XMM_L(0));
871
            if ((i & 1) == 1)
872
                cpu_fprintf(f, "\n");
873
            else
874
                cpu_fprintf(f, " ");
875
        }
876
    }
877
}
878

    
879
/***********************************************************/
880
/* x86 mmu */
881
/* XXX: add PGE support */
882

    
883
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
884
{
885
    a20_state = (a20_state != 0);
886
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
887
#if defined(DEBUG_MMU)
888
        printf("A20 update: a20=%d\n", a20_state);
889
#endif
890
        /* if the cpu is currently executing code, we must unlink it and
891
           all the potentially executing TB */
892
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
893

    
894
        /* when a20 is changed, all the MMU mappings are invalid, so
895
           we must flush everything */
896
        tlb_flush(env, 1);
897
        env->a20_mask = (~0x100000) | (a20_state << 20);
898
    }
899
}
900

    
901
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
902
{
903
    int pe_state;
904

    
905
#if defined(DEBUG_MMU)
906
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
907
#endif
908
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
909
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
910
        tlb_flush(env, 1);
911
    }
912

    
913
#ifdef TARGET_X86_64
914
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
915
        (env->efer & MSR_EFER_LME)) {
916
        /* enter in long mode */
917
        /* XXX: generate an exception */
918
        if (!(env->cr[4] & CR4_PAE_MASK))
919
            return;
920
        env->efer |= MSR_EFER_LMA;
921
        env->hflags |= HF_LMA_MASK;
922
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
923
               (env->efer & MSR_EFER_LMA)) {
924
        /* exit long mode */
925
        env->efer &= ~MSR_EFER_LMA;
926
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
927
        env->eip &= 0xffffffff;
928
    }
929
#endif
930
    env->cr[0] = new_cr0 | CR0_ET_MASK;
931

    
932
    /* update PE flag in hidden flags */
933
    pe_state = (env->cr[0] & CR0_PE_MASK);
934
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
935
    /* ensure that ADDSEG is always set in real mode */
936
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
937
    /* update FPU flags */
938
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
939
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
940
}
941

    
942
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
943
   the PDPT */
944
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
945
{
946
    env->cr[3] = new_cr3;
947
    if (env->cr[0] & CR0_PG_MASK) {
948
#if defined(DEBUG_MMU)
949
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
950
#endif
951
        tlb_flush(env, 0);
952
    }
953
}
954

    
955
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
956
{
957
#if defined(DEBUG_MMU)
958
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
959
#endif
960
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
961
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
962
        tlb_flush(env, 1);
963
    }
964
    /* SSE handling */
965
    if (!(env->cpuid_features & CPUID_SSE))
966
        new_cr4 &= ~CR4_OSFXSR_MASK;
967
    if (new_cr4 & CR4_OSFXSR_MASK)
968
        env->hflags |= HF_OSFXSR_MASK;
969
    else
970
        env->hflags &= ~HF_OSFXSR_MASK;
971

    
972
    env->cr[4] = new_cr4;
973
}
974

    
975
#if defined(CONFIG_USER_ONLY)
976

    
977
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
978
                             int is_write, int mmu_idx, int is_softmmu)
979
{
980
    /* user mode only emulation */
981
    is_write &= 1;
982
    env->cr[2] = addr;
983
    env->error_code = (is_write << PG_ERROR_W_BIT);
984
    env->error_code |= PG_ERROR_U_MASK;
985
    env->exception_index = EXCP0E_PAGE;
986
    return 1;
987
}
988

    
989
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
990
{
991
    return addr;
992
}
993

    
994
#else
995

    
996
/* XXX: This value should match the one returned by CPUID
997
 * and in exec.c */
998
# if defined(TARGET_X86_64)
999
# define PHYS_ADDR_MASK 0xfffffff000LL
1000
# else
1001
# define PHYS_ADDR_MASK 0xffffff000LL
1002
# endif
1003

    
1004
/* return value:
1005
   -1 = cannot handle fault
1006
   0  = nothing more to do
1007
   1  = generate PF fault
1008
   2  = soft MMU activation required for this block
1009
*/
1010
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1011
                             int is_write1, int mmu_idx, int is_softmmu)
1012
{
1013
    uint64_t ptep, pte;
1014
    target_ulong pde_addr, pte_addr;
1015
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1016
    target_phys_addr_t paddr;
1017
    uint32_t page_offset;
1018
    target_ulong vaddr, virt_addr;
1019

    
1020
    is_user = mmu_idx == MMU_USER_IDX;
1021
#if defined(DEBUG_MMU)
1022
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1023
           addr, is_write1, is_user, env->eip);
1024
#endif
1025
    is_write = is_write1 & 1;
1026

    
1027
    if (!(env->cr[0] & CR0_PG_MASK)) {
1028
        pte = addr;
1029
        virt_addr = addr & TARGET_PAGE_MASK;
1030
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1031
        page_size = 4096;
1032
        goto do_mapping;
1033
    }
1034

    
1035
    if (env->cr[4] & CR4_PAE_MASK) {
1036
        uint64_t pde, pdpe;
1037
        target_ulong pdpe_addr;
1038

    
1039
#ifdef TARGET_X86_64
1040
        if (env->hflags & HF_LMA_MASK) {
1041
            uint64_t pml4e_addr, pml4e;
1042
            int32_t sext;
1043

    
1044
            /* test virtual address sign extension */
1045
            sext = (int64_t)addr >> 47;
1046
            if (sext != 0 && sext != -1) {
1047
                env->error_code = 0;
1048
                env->exception_index = EXCP0D_GPF;
1049
                return 1;
1050
            }
1051

    
1052
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1053
                env->a20_mask;
1054
            pml4e = ldq_phys(pml4e_addr);
1055
            if (!(pml4e & PG_PRESENT_MASK)) {
1056
                error_code = 0;
1057
                goto do_fault;
1058
            }
1059
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1060
                error_code = PG_ERROR_RSVD_MASK;
1061
                goto do_fault;
1062
            }
1063
            if (!(pml4e & PG_ACCESSED_MASK)) {
1064
                pml4e |= PG_ACCESSED_MASK;
1065
                stl_phys_notdirty(pml4e_addr, pml4e);
1066
            }
1067
            ptep = pml4e ^ PG_NX_MASK;
1068
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1069
                env->a20_mask;
1070
            pdpe = ldq_phys(pdpe_addr);
1071
            if (!(pdpe & PG_PRESENT_MASK)) {
1072
                error_code = 0;
1073
                goto do_fault;
1074
            }
1075
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1076
                error_code = PG_ERROR_RSVD_MASK;
1077
                goto do_fault;
1078
            }
1079
            ptep &= pdpe ^ PG_NX_MASK;
1080
            if (!(pdpe & PG_ACCESSED_MASK)) {
1081
                pdpe |= PG_ACCESSED_MASK;
1082
                stl_phys_notdirty(pdpe_addr, pdpe);
1083
            }
1084
        } else
1085
#endif
1086
        {
1087
            /* XXX: load them when cr3 is loaded ? */
1088
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1089
                env->a20_mask;
1090
            pdpe = ldq_phys(pdpe_addr);
1091
            if (!(pdpe & PG_PRESENT_MASK)) {
1092
                error_code = 0;
1093
                goto do_fault;
1094
            }
1095
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1096
        }
1097

    
1098
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1099
            env->a20_mask;
1100
        pde = ldq_phys(pde_addr);
1101
        if (!(pde & PG_PRESENT_MASK)) {
1102
            error_code = 0;
1103
            goto do_fault;
1104
        }
1105
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1106
            error_code = PG_ERROR_RSVD_MASK;
1107
            goto do_fault;
1108
        }
1109
        ptep &= pde ^ PG_NX_MASK;
1110
        if (pde & PG_PSE_MASK) {
1111
            /* 2 MB page */
1112
            page_size = 2048 * 1024;
1113
            ptep ^= PG_NX_MASK;
1114
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1115
                goto do_fault_protect;
1116
            if (is_user) {
1117
                if (!(ptep & PG_USER_MASK))
1118
                    goto do_fault_protect;
1119
                if (is_write && !(ptep & PG_RW_MASK))
1120
                    goto do_fault_protect;
1121
            } else {
1122
                if ((env->cr[0] & CR0_WP_MASK) &&
1123
                    is_write && !(ptep & PG_RW_MASK))
1124
                    goto do_fault_protect;
1125
            }
1126
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1127
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1128
                pde |= PG_ACCESSED_MASK;
1129
                if (is_dirty)
1130
                    pde |= PG_DIRTY_MASK;
1131
                stl_phys_notdirty(pde_addr, pde);
1132
            }
1133
            /* align to page_size */
1134
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1135
            virt_addr = addr & ~(page_size - 1);
1136
        } else {
1137
            /* 4 KB page */
1138
            if (!(pde & PG_ACCESSED_MASK)) {
1139
                pde |= PG_ACCESSED_MASK;
1140
                stl_phys_notdirty(pde_addr, pde);
1141
            }
1142
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1143
                env->a20_mask;
1144
            pte = ldq_phys(pte_addr);
1145
            if (!(pte & PG_PRESENT_MASK)) {
1146
                error_code = 0;
1147
                goto do_fault;
1148
            }
1149
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1150
                error_code = PG_ERROR_RSVD_MASK;
1151
                goto do_fault;
1152
            }
1153
            /* combine pde and pte nx, user and rw protections */
1154
            ptep &= pte ^ PG_NX_MASK;
1155
            ptep ^= PG_NX_MASK;
1156
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1157
                goto do_fault_protect;
1158
            if (is_user) {
1159
                if (!(ptep & PG_USER_MASK))
1160
                    goto do_fault_protect;
1161
                if (is_write && !(ptep & PG_RW_MASK))
1162
                    goto do_fault_protect;
1163
            } else {
1164
                if ((env->cr[0] & CR0_WP_MASK) &&
1165
                    is_write && !(ptep & PG_RW_MASK))
1166
                    goto do_fault_protect;
1167
            }
1168
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1169
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1170
                pte |= PG_ACCESSED_MASK;
1171
                if (is_dirty)
1172
                    pte |= PG_DIRTY_MASK;
1173
                stl_phys_notdirty(pte_addr, pte);
1174
            }
1175
            page_size = 4096;
1176
            virt_addr = addr & ~0xfff;
1177
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1178
        }
1179
    } else {
1180
        uint32_t pde;
1181

    
1182
        /* page directory entry */
1183
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1184
            env->a20_mask;
1185
        pde = ldl_phys(pde_addr);
1186
        if (!(pde & PG_PRESENT_MASK)) {
1187
            error_code = 0;
1188
            goto do_fault;
1189
        }
1190
        /* if PSE bit is set, then we use a 4MB page */
1191
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1192
            page_size = 4096 * 1024;
1193
            if (is_user) {
1194
                if (!(pde & PG_USER_MASK))
1195
                    goto do_fault_protect;
1196
                if (is_write && !(pde & PG_RW_MASK))
1197
                    goto do_fault_protect;
1198
            } else {
1199
                if ((env->cr[0] & CR0_WP_MASK) &&
1200
                    is_write && !(pde & PG_RW_MASK))
1201
                    goto do_fault_protect;
1202
            }
1203
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1204
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1205
                pde |= PG_ACCESSED_MASK;
1206
                if (is_dirty)
1207
                    pde |= PG_DIRTY_MASK;
1208
                stl_phys_notdirty(pde_addr, pde);
1209
            }
1210

    
1211
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1212
            ptep = pte;
1213
            virt_addr = addr & ~(page_size - 1);
1214
        } else {
1215
            if (!(pde & PG_ACCESSED_MASK)) {
1216
                pde |= PG_ACCESSED_MASK;
1217
                stl_phys_notdirty(pde_addr, pde);
1218
            }
1219

    
1220
            /* page directory entry */
1221
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1222
                env->a20_mask;
1223
            pte = ldl_phys(pte_addr);
1224
            if (!(pte & PG_PRESENT_MASK)) {
1225
                error_code = 0;
1226
                goto do_fault;
1227
            }
1228
            /* combine pde and pte user and rw protections */
1229
            ptep = pte & pde;
1230
            if (is_user) {
1231
                if (!(ptep & PG_USER_MASK))
1232
                    goto do_fault_protect;
1233
                if (is_write && !(ptep & PG_RW_MASK))
1234
                    goto do_fault_protect;
1235
            } else {
1236
                if ((env->cr[0] & CR0_WP_MASK) &&
1237
                    is_write && !(ptep & PG_RW_MASK))
1238
                    goto do_fault_protect;
1239
            }
1240
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1241
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1242
                pte |= PG_ACCESSED_MASK;
1243
                if (is_dirty)
1244
                    pte |= PG_DIRTY_MASK;
1245
                stl_phys_notdirty(pte_addr, pte);
1246
            }
1247
            page_size = 4096;
1248
            virt_addr = addr & ~0xfff;
1249
        }
1250
    }
1251
    /* the page can be put in the TLB */
1252
    prot = PAGE_READ;
1253
    if (!(ptep & PG_NX_MASK))
1254
        prot |= PAGE_EXEC;
1255
    if (pte & PG_DIRTY_MASK) {
1256
        /* only set write access if already dirty... otherwise wait
1257
           for dirty access */
1258
        if (is_user) {
1259
            if (ptep & PG_RW_MASK)
1260
                prot |= PAGE_WRITE;
1261
        } else {
1262
            if (!(env->cr[0] & CR0_WP_MASK) ||
1263
                (ptep & PG_RW_MASK))
1264
                prot |= PAGE_WRITE;
1265
        }
1266
    }
1267
 do_mapping:
1268
    pte = pte & env->a20_mask;
1269

    
1270
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1271
       avoid filling it too fast */
1272
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1273
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1274
    vaddr = virt_addr + page_offset;
1275

    
1276
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1277
    return ret;
1278
 do_fault_protect:
1279
    error_code = PG_ERROR_P_MASK;
1280
 do_fault:
1281
    error_code |= (is_write << PG_ERROR_W_BIT);
1282
    if (is_user)
1283
        error_code |= PG_ERROR_U_MASK;
1284
    if (is_write1 == 2 &&
1285
        (env->efer & MSR_EFER_NXE) &&
1286
        (env->cr[4] & CR4_PAE_MASK))
1287
        error_code |= PG_ERROR_I_D_MASK;
1288
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1289
        /* cr2 is not modified in case of exceptions */
1290
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1291
                 addr);
1292
    } else {
1293
        env->cr[2] = addr;
1294
    }
1295
    env->error_code = error_code;
1296
    env->exception_index = EXCP0E_PAGE;
1297
    return 1;
1298
}
1299

    
1300
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1301
{
1302
    target_ulong pde_addr, pte_addr;
1303
    uint64_t pte;
1304
    target_phys_addr_t paddr;
1305
    uint32_t page_offset;
1306
    int page_size;
1307

    
1308
    if (env->cr[4] & CR4_PAE_MASK) {
1309
        target_ulong pdpe_addr;
1310
        uint64_t pde, pdpe;
1311

    
1312
#ifdef TARGET_X86_64
1313
        if (env->hflags & HF_LMA_MASK) {
1314
            uint64_t pml4e_addr, pml4e;
1315
            int32_t sext;
1316

    
1317
            /* test virtual address sign extension */
1318
            sext = (int64_t)addr >> 47;
1319
            if (sext != 0 && sext != -1)
1320
                return -1;
1321

    
1322
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1323
                env->a20_mask;
1324
            pml4e = ldq_phys(pml4e_addr);
1325
            if (!(pml4e & PG_PRESENT_MASK))
1326
                return -1;
1327

    
1328
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1329
                env->a20_mask;
1330
            pdpe = ldq_phys(pdpe_addr);
1331
            if (!(pdpe & PG_PRESENT_MASK))
1332
                return -1;
1333
        } else
1334
#endif
1335
        {
1336
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1337
                env->a20_mask;
1338
            pdpe = ldq_phys(pdpe_addr);
1339
            if (!(pdpe & PG_PRESENT_MASK))
1340
                return -1;
1341
        }
1342

    
1343
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1344
            env->a20_mask;
1345
        pde = ldq_phys(pde_addr);
1346
        if (!(pde & PG_PRESENT_MASK)) {
1347
            return -1;
1348
        }
1349
        if (pde & PG_PSE_MASK) {
1350
            /* 2 MB page */
1351
            page_size = 2048 * 1024;
1352
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1353
        } else {
1354
            /* 4 KB page */
1355
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1356
                env->a20_mask;
1357
            page_size = 4096;
1358
            pte = ldq_phys(pte_addr);
1359
        }
1360
        if (!(pte & PG_PRESENT_MASK))
1361
            return -1;
1362
    } else {
1363
        uint32_t pde;
1364

    
1365
        if (!(env->cr[0] & CR0_PG_MASK)) {
1366
            pte = addr;
1367
            page_size = 4096;
1368
        } else {
1369
            /* page directory entry */
1370
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1371
            pde = ldl_phys(pde_addr);
1372
            if (!(pde & PG_PRESENT_MASK))
1373
                return -1;
1374
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1375
                pte = pde & ~0x003ff000; /* align to 4MB */
1376
                page_size = 4096 * 1024;
1377
            } else {
1378
                /* page directory entry */
1379
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1380
                pte = ldl_phys(pte_addr);
1381
                if (!(pte & PG_PRESENT_MASK))
1382
                    return -1;
1383
                page_size = 4096;
1384
            }
1385
        }
1386
        pte = pte & env->a20_mask;
1387
    }
1388

    
1389
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1390
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1391
    return paddr;
1392
}
1393

    
1394
void hw_breakpoint_insert(CPUState *env, int index)
1395
{
1396
    int type, err = 0;
1397

    
1398
    switch (hw_breakpoint_type(env->dr[7], index)) {
1399
    case 0:
1400
        if (hw_breakpoint_enabled(env->dr[7], index))
1401
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1402
                                        &env->cpu_breakpoint[index]);
1403
        break;
1404
    case 1:
1405
        type = BP_CPU | BP_MEM_WRITE;
1406
        goto insert_wp;
1407
    case 2:
1408
         /* No support for I/O watchpoints yet */
1409
        break;
1410
    case 3:
1411
        type = BP_CPU | BP_MEM_ACCESS;
1412
    insert_wp:
1413
        err = cpu_watchpoint_insert(env, env->dr[index],
1414
                                    hw_breakpoint_len(env->dr[7], index),
1415
                                    type, &env->cpu_watchpoint[index]);
1416
        break;
1417
    }
1418
    if (err)
1419
        env->cpu_breakpoint[index] = NULL;
1420
}
1421

    
1422
void hw_breakpoint_remove(CPUState *env, int index)
1423
{
1424
    if (!env->cpu_breakpoint[index])
1425
        return;
1426
    switch (hw_breakpoint_type(env->dr[7], index)) {
1427
    case 0:
1428
        if (hw_breakpoint_enabled(env->dr[7], index))
1429
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1430
        break;
1431
    case 1:
1432
    case 3:
1433
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1434
        break;
1435
    case 2:
1436
        /* No support for I/O watchpoints yet */
1437
        break;
1438
    }
1439
}
1440

    
1441
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1442
{
1443
    target_ulong dr6;
1444
    int reg, type;
1445
    int hit_enabled = 0;
1446

    
1447
    dr6 = env->dr[6] & ~0xf;
1448
    for (reg = 0; reg < 4; reg++) {
1449
        type = hw_breakpoint_type(env->dr[7], reg);
1450
        if ((type == 0 && env->dr[reg] == env->eip) ||
1451
            ((type & 1) && env->cpu_watchpoint[reg] &&
1452
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1453
            dr6 |= 1 << reg;
1454
            if (hw_breakpoint_enabled(env->dr[7], reg))
1455
                hit_enabled = 1;
1456
        }
1457
    }
1458
    if (hit_enabled || force_dr6_update)
1459
        env->dr[6] = dr6;
1460
    return hit_enabled;
1461
}
1462

    
1463
static CPUDebugExcpHandler *prev_debug_excp_handler;
1464

    
1465
void raise_exception(int exception_index);
1466

    
1467
static void breakpoint_handler(CPUState *env)
1468
{
1469
    CPUBreakpoint *bp;
1470

    
1471
    if (env->watchpoint_hit) {
1472
        if (env->watchpoint_hit->flags & BP_CPU) {
1473
            env->watchpoint_hit = NULL;
1474
            if (check_hw_breakpoints(env, 0))
1475
                raise_exception(EXCP01_DB);
1476
            else
1477
                cpu_resume_from_signal(env, NULL);
1478
        }
1479
    } else {
1480
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1481
            if (bp->pc == env->eip) {
1482
                if (bp->flags & BP_CPU) {
1483
                    check_hw_breakpoints(env, 1);
1484
                    raise_exception(EXCP01_DB);
1485
                }
1486
                break;
1487
            }
1488
    }
1489
    if (prev_debug_excp_handler)
1490
        prev_debug_excp_handler(env);
1491
}
1492

    
1493
/* This should come from sysemu.h - if we could include it here... */
1494
void qemu_system_reset_request(void);
1495

    
1496
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1497
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1498
{
1499
    uint64_t mcg_cap = cenv->mcg_cap;
1500
    unsigned bank_num = mcg_cap & 0xff;
1501
    uint64_t *banks = cenv->mce_banks;
1502

    
1503
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1504
        return;
1505

    
1506
    /*
1507
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1508
     * reporting is disabled
1509
     */
1510
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1511
        cenv->mcg_ctl != ~(uint64_t)0)
1512
        return;
1513
    banks += 4 * bank;
1514
    /*
1515
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1516
     * reporting is disabled for the bank
1517
     */
1518
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1519
        return;
1520
    if (status & MCI_STATUS_UC) {
1521
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1522
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1523
            fprintf(stderr, "injects mce exception while previous "
1524
                    "one is in progress!\n");
1525
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1526
            qemu_system_reset_request();
1527
            return;
1528
        }
1529
        if (banks[1] & MCI_STATUS_VAL)
1530
            status |= MCI_STATUS_OVER;
1531
        banks[2] = addr;
1532
        banks[3] = misc;
1533
        cenv->mcg_status = mcg_status;
1534
        banks[1] = status;
1535
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1536
    } else if (!(banks[1] & MCI_STATUS_VAL)
1537
               || !(banks[1] & MCI_STATUS_UC)) {
1538
        if (banks[1] & MCI_STATUS_VAL)
1539
            status |= MCI_STATUS_OVER;
1540
        banks[2] = addr;
1541
        banks[3] = misc;
1542
        banks[1] = status;
1543
    } else
1544
        banks[1] |= MCI_STATUS_OVER;
1545
}
1546
#endif /* !CONFIG_USER_ONLY */
1547

    
1548
static void mce_init(CPUX86State *cenv)
1549
{
1550
    unsigned int bank, bank_num;
1551

    
1552
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1553
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1554
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1555
        cenv->mcg_ctl = ~(uint64_t)0;
1556
        bank_num = cenv->mcg_cap & 0xff;
1557
        cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1558
        for (bank = 0; bank < bank_num; bank++)
1559
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1560
    }
1561
}
1562

    
1563
static void host_cpuid(uint32_t function, uint32_t count,
1564
                       uint32_t *eax, uint32_t *ebx,
1565
                       uint32_t *ecx, uint32_t *edx)
1566
{
1567
#if defined(CONFIG_KVM)
1568
    uint32_t vec[4];
1569

    
1570
#ifdef __x86_64__
1571
    asm volatile("cpuid"
1572
                 : "=a"(vec[0]), "=b"(vec[1]),
1573
                   "=c"(vec[2]), "=d"(vec[3])
1574
                 : "0"(function), "c"(count) : "cc");
1575
#else
1576
    asm volatile("pusha \n\t"
1577
                 "cpuid \n\t"
1578
                 "mov %%eax, 0(%2) \n\t"
1579
                 "mov %%ebx, 4(%2) \n\t"
1580
                 "mov %%ecx, 8(%2) \n\t"
1581
                 "mov %%edx, 12(%2) \n\t"
1582
                 "popa"
1583
                 : : "a"(function), "c"(count), "S"(vec)
1584
                 : "memory", "cc");
1585
#endif
1586

    
1587
    if (eax)
1588
        *eax = vec[0];
1589
    if (ebx)
1590
        *ebx = vec[1];
1591
    if (ecx)
1592
        *ecx = vec[2];
1593
    if (edx)
1594
        *edx = vec[3];
1595
#endif
1596
}
1597

    
1598
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1599
                   uint32_t *eax, uint32_t *ebx,
1600
                   uint32_t *ecx, uint32_t *edx)
1601
{
1602
    /* test if maximum index reached */
1603
    if (index & 0x80000000) {
1604
        if (index > env->cpuid_xlevel)
1605
            index = env->cpuid_level;
1606
    } else {
1607
        if (index > env->cpuid_level)
1608
            index = env->cpuid_level;
1609
    }
1610

    
1611
    switch(index) {
1612
    case 0:
1613
        *eax = env->cpuid_level;
1614
        *ebx = env->cpuid_vendor1;
1615
        *edx = env->cpuid_vendor2;
1616
        *ecx = env->cpuid_vendor3;
1617

    
1618
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1619
         * isn't supported in compatibility mode on Intel.  so advertise the
1620
         * actuall cpu, and say goodbye to migration between different vendors
1621
         * is you use compatibility mode. */
1622
        if (kvm_enabled() && !env->cpuid_vendor_override)
1623
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1624
        break;
1625
    case 1:
1626
        *eax = env->cpuid_version;
1627
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1628
        *ecx = env->cpuid_ext_features;
1629
        *edx = env->cpuid_features;
1630
        break;
1631
    case 2:
1632
        /* cache info: needed for Pentium Pro compatibility */
1633
        *eax = 1;
1634
        *ebx = 0;
1635
        *ecx = 0;
1636
        *edx = 0x2c307d;
1637
        break;
1638
    case 4:
1639
        /* cache info: needed for Core compatibility */
1640
        switch (count) {
1641
            case 0: /* L1 dcache info */
1642
                *eax = 0x0000121;
1643
                *ebx = 0x1c0003f;
1644
                *ecx = 0x000003f;
1645
                *edx = 0x0000001;
1646
                break;
1647
            case 1: /* L1 icache info */
1648
                *eax = 0x0000122;
1649
                *ebx = 0x1c0003f;
1650
                *ecx = 0x000003f;
1651
                *edx = 0x0000001;
1652
                break;
1653
            case 2: /* L2 cache info */
1654
                *eax = 0x0000143;
1655
                *ebx = 0x3c0003f;
1656
                *ecx = 0x0000fff;
1657
                *edx = 0x0000001;
1658
                break;
1659
            default: /* end of info */
1660
                *eax = 0;
1661
                *ebx = 0;
1662
                *ecx = 0;
1663
                *edx = 0;
1664
                break;
1665
        }
1666
        break;
1667
    case 5:
1668
        /* mwait info: needed for Core compatibility */
1669
        *eax = 0; /* Smallest monitor-line size in bytes */
1670
        *ebx = 0; /* Largest monitor-line size in bytes */
1671
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1672
        *edx = 0;
1673
        break;
1674
    case 6:
1675
        /* Thermal and Power Leaf */
1676
        *eax = 0;
1677
        *ebx = 0;
1678
        *ecx = 0;
1679
        *edx = 0;
1680
        break;
1681
    case 9:
1682
        /* Direct Cache Access Information Leaf */
1683
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1684
        *ebx = 0;
1685
        *ecx = 0;
1686
        *edx = 0;
1687
        break;
1688
    case 0xA:
1689
        /* Architectural Performance Monitoring Leaf */
1690
        *eax = 0;
1691
        *ebx = 0;
1692
        *ecx = 0;
1693
        *edx = 0;
1694
        break;
1695
    case 0x80000000:
1696
        *eax = env->cpuid_xlevel;
1697
        *ebx = env->cpuid_vendor1;
1698
        *edx = env->cpuid_vendor2;
1699
        *ecx = env->cpuid_vendor3;
1700
        break;
1701
    case 0x80000001:
1702
        *eax = env->cpuid_version;
1703
        *ebx = 0;
1704
        *ecx = env->cpuid_ext3_features;
1705
        *edx = env->cpuid_ext2_features;
1706

    
1707
        if (kvm_enabled()) {
1708
            /* Nested SVM not yet supported in KVM */
1709
            *ecx &= ~CPUID_EXT3_SVM;
1710
        } else {
1711
            /* AMD 3DNow! is not supported in QEMU */
1712
            *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1713
        }
1714
        break;
1715
    case 0x80000002:
1716
    case 0x80000003:
1717
    case 0x80000004:
1718
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1719
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1720
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1721
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1722
        break;
1723
    case 0x80000005:
1724
        /* cache info (L1 cache) */
1725
        *eax = 0x01ff01ff;
1726
        *ebx = 0x01ff01ff;
1727
        *ecx = 0x40020140;
1728
        *edx = 0x40020140;
1729
        break;
1730
    case 0x80000006:
1731
        /* cache info (L2 cache) */
1732
        *eax = 0;
1733
        *ebx = 0x42004200;
1734
        *ecx = 0x02008140;
1735
        *edx = 0;
1736
        break;
1737
    case 0x80000008:
1738
        /* virtual & phys address size in low 2 bytes. */
1739
/* XXX: This value must match the one used in the MMU code. */ 
1740
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1741
            /* 64 bit processor */
1742
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1743
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1744
        } else {
1745
            if (env->cpuid_features & CPUID_PSE36)
1746
                *eax = 0x00000024; /* 36 bits physical */
1747
            else
1748
                *eax = 0x00000020; /* 32 bits physical */
1749
        }
1750
        *ebx = 0;
1751
        *ecx = 0;
1752
        *edx = 0;
1753
        break;
1754
    case 0x8000000A:
1755
        *eax = 0x00000001; /* SVM Revision */
1756
        *ebx = 0x00000010; /* nr of ASIDs */
1757
        *ecx = 0;
1758
        *edx = 0; /* optional features */
1759
        break;
1760
    default:
1761
        /* reserved values: zero */
1762
        *eax = 0;
1763
        *ebx = 0;
1764
        *ecx = 0;
1765
        *edx = 0;
1766
        break;
1767
    }
1768
}
1769

    
1770

    
1771
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1772
                            target_ulong *base, unsigned int *limit,
1773
                            unsigned int *flags)
1774
{
1775
    SegmentCache *dt;
1776
    target_ulong ptr;
1777
    uint32_t e1, e2;
1778
    int index;
1779

    
1780
    if (selector & 0x4)
1781
        dt = &env->ldt;
1782
    else
1783
        dt = &env->gdt;
1784
    index = selector & ~7;
1785
    ptr = dt->base + index;
1786
    if ((index + 7) > dt->limit
1787
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1788
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1789
        return 0;
1790

    
1791
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1792
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1793
    if (e2 & DESC_G_MASK)
1794
        *limit = (*limit << 12) | 0xfff;
1795
    *flags = e2;
1796

    
1797
    return 1;
1798
}
1799

    
1800
CPUX86State *cpu_x86_init(const char *cpu_model)
1801
{
1802
    CPUX86State *env;
1803
    static int inited;
1804

    
1805
    env = qemu_mallocz(sizeof(CPUX86State));
1806
    cpu_exec_init(env);
1807
    env->cpu_model_str = cpu_model;
1808

    
1809
    /* init various static tables */
1810
    if (!inited) {
1811
        inited = 1;
1812
        optimize_flags_init();
1813
#ifndef CONFIG_USER_ONLY
1814
        prev_debug_excp_handler =
1815
            cpu_set_debug_excp_handler(breakpoint_handler);
1816
#endif
1817
    }
1818
    if (cpu_x86_register(env, cpu_model) < 0) {
1819
        cpu_x86_close(env);
1820
        return NULL;
1821
    }
1822
    mce_init(env);
1823
    cpu_reset(env);
1824

    
1825
    qemu_init_vcpu(env);
1826

    
1827
    return env;
1828
}
1829

    
1830
#if !defined(CONFIG_USER_ONLY)
1831
void do_cpu_init(CPUState *env)
1832
{
1833
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1834
    cpu_reset(env);
1835
    env->interrupt_request = sipi;
1836
    apic_init_reset(env);
1837
}
1838

    
1839
void do_cpu_sipi(CPUState *env)
1840
{
1841
    apic_sipi(env);
1842
}
1843
#else
1844
void do_cpu_init(CPUState *env)
1845
{
1846
}
1847
void do_cpu_sipi(CPUState *env)
1848
{
1849
}
1850
#endif