Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 109819e0

History | View | Annotate | Download (60.5 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30

    
31
//#define DEBUG_MMU
32

    
33
/* feature flags taken from "Intel Processor Identification and the CPUID
34
 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35
 * about feature names, the Linux name is used. */
36
static const char *feature_name[] = {
37
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38
    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40
    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
41
};
42
static const char *ext_feature_name[] = {
43
    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44
    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45
    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
47
};
48
static const char *ext2_feature_name[] = {
49
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51
    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
53
};
54
static const char *ext3_feature_name[] = {
55
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59
};
60

    
61
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62
                                    uint32_t *ext_features,
63
                                    uint32_t *ext2_features,
64
                                    uint32_t *ext3_features)
65
{
66
    int i;
67
    int found = 0;
68

    
69
    for ( i = 0 ; i < 32 ; i++ )
70
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71
            *features |= 1 << i;
72
            found = 1;
73
        }
74
    for ( i = 0 ; i < 32 ; i++ )
75
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76
            *ext_features |= 1 << i;
77
            found = 1;
78
        }
79
    for ( i = 0 ; i < 32 ; i++ )
80
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81
            *ext2_features |= 1 << i;
82
            found = 1;
83
        }
84
    for ( i = 0 ; i < 32 ; i++ )
85
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86
            *ext3_features |= 1 << i;
87
            found = 1;
88
        }
89
    if (!found) {
90
        fprintf(stderr, "CPU feature %s not found\n", flagname);
91
    }
92
}
93

    
94
typedef struct x86_def_t {
95
    const char *name;
96
    uint32_t level;
97
    uint32_t vendor1, vendor2, vendor3;
98
    int family;
99
    int model;
100
    int stepping;
101
    uint32_t features, ext_features, ext2_features, ext3_features;
102
    uint32_t xlevel;
103
    char model_id[48];
104
    int vendor_override;
105
} x86_def_t;
106

    
107
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112
          CPUID_PSE36 | CPUID_FXSR)
113
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
118
static x86_def_t x86_defs[] = {
119
#ifdef TARGET_X86_64
120
    {
121
        .name = "qemu64",
122
        .level = 2,
123
        .vendor1 = CPUID_VENDOR_AMD_1,
124
        .vendor2 = CPUID_VENDOR_AMD_2,
125
        .vendor3 = CPUID_VENDOR_AMD_3,
126
        .family = 6,
127
        .model = 2,
128
        .stepping = 3,
129
        .features = PPRO_FEATURES | 
130
        /* these features are needed for Win64 and aren't fully implemented */
131
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132
        /* this feature is needed for Solaris and isn't fully implemented */
133
            CPUID_PSE36,
134
        .ext_features = CPUID_EXT_SSE3,
135
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
136
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137
        .ext3_features = CPUID_EXT3_SVM,
138
        .xlevel = 0x8000000A,
139
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140
    },
141
    {
142
        .name = "phenom",
143
        .level = 5,
144
        .vendor1 = CPUID_VENDOR_AMD_1,
145
        .vendor2 = CPUID_VENDOR_AMD_2,
146
        .vendor3 = CPUID_VENDOR_AMD_3,
147
        .family = 16,
148
        .model = 2,
149
        .stepping = 3,
150
        /* Missing: CPUID_VME, CPUID_HT */
151
        .features = PPRO_FEATURES | 
152
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153
            CPUID_PSE36,
154
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
155
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
156
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
157
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
158
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
159
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
160
            CPUID_EXT2_FFXSR,
161
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
162
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
163
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
164
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
165
        .ext3_features = CPUID_EXT3_SVM,
166
        .xlevel = 0x8000001A,
167
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
168
    },
169
    {
170
        .name = "core2duo",
171
        .level = 10,
172
        .family = 6,
173
        .model = 15,
174
        .stepping = 11,
175
        /* The original CPU also implements these features:
176
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
177
               CPUID_TM, CPUID_PBE */
178
        .features = PPRO_FEATURES |
179
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180
            CPUID_PSE36,
181
        /* The original CPU also implements these ext features:
182
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
183
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
184
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
185
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
186
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
187
        .xlevel = 0x80000008,
188
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
189
    },
190
#endif
191
    {
192
        .name = "qemu32",
193
        .level = 2,
194
        .family = 6,
195
        .model = 3,
196
        .stepping = 3,
197
        .features = PPRO_FEATURES,
198
        .ext_features = CPUID_EXT_SSE3,
199
        .xlevel = 0,
200
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
201
    },
202
    {
203
        .name = "coreduo",
204
        .level = 10,
205
        .family = 6,
206
        .model = 14,
207
        .stepping = 8,
208
        /* The original CPU also implements these features:
209
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
210
               CPUID_TM, CPUID_PBE */
211
        .features = PPRO_FEATURES | CPUID_VME |
212
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
213
        /* The original CPU also implements these ext features:
214
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
215
               CPUID_EXT_PDCM */
216
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
217
        .ext2_features = CPUID_EXT2_NX,
218
        .xlevel = 0x80000008,
219
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
220
    },
221
    {
222
        .name = "486",
223
        .level = 0,
224
        .family = 4,
225
        .model = 0,
226
        .stepping = 0,
227
        .features = I486_FEATURES,
228
        .xlevel = 0,
229
    },
230
    {
231
        .name = "pentium",
232
        .level = 1,
233
        .family = 5,
234
        .model = 4,
235
        .stepping = 3,
236
        .features = PENTIUM_FEATURES,
237
        .xlevel = 0,
238
    },
239
    {
240
        .name = "pentium2",
241
        .level = 2,
242
        .family = 6,
243
        .model = 5,
244
        .stepping = 2,
245
        .features = PENTIUM2_FEATURES,
246
        .xlevel = 0,
247
    },
248
    {
249
        .name = "pentium3",
250
        .level = 2,
251
        .family = 6,
252
        .model = 7,
253
        .stepping = 3,
254
        .features = PENTIUM3_FEATURES,
255
        .xlevel = 0,
256
    },
257
    {
258
        .name = "athlon",
259
        .level = 2,
260
        .vendor1 = CPUID_VENDOR_AMD_1,
261
        .vendor2 = CPUID_VENDOR_AMD_2,
262
        .vendor3 = CPUID_VENDOR_AMD_3,
263
        .family = 6,
264
        .model = 2,
265
        .stepping = 3,
266
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
267
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
268
        .xlevel = 0x80000008,
269
        /* XXX: put another string ? */
270
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
271
    },
272
    {
273
        .name = "n270",
274
        /* original is on level 10 */
275
        .level = 5,
276
        .family = 6,
277
        .model = 28,
278
        .stepping = 2,
279
        .features = PPRO_FEATURES |
280
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
281
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
282
             * CPUID_HT | CPUID_TM | CPUID_PBE */
283
            /* Some CPUs got no CPUID_SEP */
284
        .ext_features = CPUID_EXT_MONITOR |
285
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
286
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
287
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
288
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
289
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
290
        .xlevel = 0x8000000A,
291
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
292
    },
293
};
294

    
295
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
296
                               uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
297

    
298
static int cpu_x86_fill_model_id(char *str)
299
{
300
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
301
    int i;
302

    
303
    for (i = 0; i < 3; i++) {
304
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
305
        memcpy(str + i * 16 +  0, &eax, 4);
306
        memcpy(str + i * 16 +  4, &ebx, 4);
307
        memcpy(str + i * 16 +  8, &ecx, 4);
308
        memcpy(str + i * 16 + 12, &edx, 4);
309
    }
310
    return 0;
311
}
312

    
313
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
314
{
315
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
316

    
317
    x86_cpu_def->name = "host";
318
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
319
    x86_cpu_def->level = eax;
320
    x86_cpu_def->vendor1 = ebx;
321
    x86_cpu_def->vendor2 = edx;
322
    x86_cpu_def->vendor3 = ecx;
323

    
324
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
325
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
326
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
327
    x86_cpu_def->stepping = eax & 0x0F;
328
    x86_cpu_def->ext_features = ecx;
329
    x86_cpu_def->features = edx;
330

    
331
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
332
    x86_cpu_def->xlevel = eax;
333

    
334
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
335
    x86_cpu_def->ext2_features = edx;
336
    x86_cpu_def->ext3_features = ecx;
337
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
338
    x86_cpu_def->vendor_override = 0;
339

    
340
    return 0;
341
}
342

    
343
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
344
{
345
    unsigned int i;
346
    x86_def_t *def;
347

    
348
    char *s = strdup(cpu_model);
349
    char *featurestr, *name = strtok(s, ",");
350
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
351
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
352
    int family = -1, model = -1, stepping = -1;
353

    
354
    def = NULL;
355
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
356
        if (strcmp(name, x86_defs[i].name) == 0) {
357
            def = &x86_defs[i];
358
            break;
359
        }
360
    }
361
    if (kvm_enabled() && strcmp(name, "host") == 0) {
362
        cpu_x86_fill_host(x86_cpu_def);
363
    } else if (!def) {
364
        goto error;
365
    } else {
366
        memcpy(x86_cpu_def, def, sizeof(*def));
367
    }
368

    
369
    add_flagname_to_bitmaps("hypervisor", &plus_features,
370
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
371

    
372
    featurestr = strtok(NULL, ",");
373

    
374
    while (featurestr) {
375
        char *val;
376
        if (featurestr[0] == '+') {
377
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
378
        } else if (featurestr[0] == '-') {
379
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
380
        } else if ((val = strchr(featurestr, '='))) {
381
            *val = 0; val++;
382
            if (!strcmp(featurestr, "family")) {
383
                char *err;
384
                family = strtol(val, &err, 10);
385
                if (!*val || *err || family < 0) {
386
                    fprintf(stderr, "bad numerical value %s\n", val);
387
                    goto error;
388
                }
389
                x86_cpu_def->family = family;
390
            } else if (!strcmp(featurestr, "model")) {
391
                char *err;
392
                model = strtol(val, &err, 10);
393
                if (!*val || *err || model < 0 || model > 0xff) {
394
                    fprintf(stderr, "bad numerical value %s\n", val);
395
                    goto error;
396
                }
397
                x86_cpu_def->model = model;
398
            } else if (!strcmp(featurestr, "stepping")) {
399
                char *err;
400
                stepping = strtol(val, &err, 10);
401
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
402
                    fprintf(stderr, "bad numerical value %s\n", val);
403
                    goto error;
404
                }
405
                x86_cpu_def->stepping = stepping;
406
            } else if (!strcmp(featurestr, "vendor")) {
407
                if (strlen(val) != 12) {
408
                    fprintf(stderr, "vendor string must be 12 chars long\n");
409
                    goto error;
410
                }
411
                x86_cpu_def->vendor1 = 0;
412
                x86_cpu_def->vendor2 = 0;
413
                x86_cpu_def->vendor3 = 0;
414
                for(i = 0; i < 4; i++) {
415
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
416
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
417
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
418
                }
419
                x86_cpu_def->vendor_override = 1;
420
            } else if (!strcmp(featurestr, "model_id")) {
421
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
422
                        val);
423
            } else {
424
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
425
                goto error;
426
            }
427
        } else {
428
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
429
            goto error;
430
        }
431
        featurestr = strtok(NULL, ",");
432
    }
433
    x86_cpu_def->features |= plus_features;
434
    x86_cpu_def->ext_features |= plus_ext_features;
435
    x86_cpu_def->ext2_features |= plus_ext2_features;
436
    x86_cpu_def->ext3_features |= plus_ext3_features;
437
    x86_cpu_def->features &= ~minus_features;
438
    x86_cpu_def->ext_features &= ~minus_ext_features;
439
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
440
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
441
    free(s);
442
    return 0;
443

    
444
error:
445
    free(s);
446
    return -1;
447
}
448

    
449
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
450
{
451
    unsigned int i;
452

    
453
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
454
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
455
}
456

    
457
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
458
{
459
    x86_def_t def1, *def = &def1;
460

    
461
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
462
        return -1;
463
    if (def->vendor1) {
464
        env->cpuid_vendor1 = def->vendor1;
465
        env->cpuid_vendor2 = def->vendor2;
466
        env->cpuid_vendor3 = def->vendor3;
467
    } else {
468
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
469
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
470
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
471
    }
472
    env->cpuid_vendor_override = def->vendor_override;
473
    env->cpuid_level = def->level;
474
    if (def->family > 0x0f)
475
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
476
    else
477
        env->cpuid_version = def->family << 8;
478
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
479
    env->cpuid_version |= def->stepping;
480
    env->cpuid_features = def->features;
481
    env->pat = 0x0007040600070406ULL;
482
    env->cpuid_ext_features = def->ext_features;
483
    env->cpuid_ext2_features = def->ext2_features;
484
    env->cpuid_xlevel = def->xlevel;
485
    env->cpuid_ext3_features = def->ext3_features;
486
    {
487
        const char *model_id = def->model_id;
488
        int c, len, i;
489
        if (!model_id)
490
            model_id = "";
491
        len = strlen(model_id);
492
        for(i = 0; i < 48; i++) {
493
            if (i >= len)
494
                c = '\0';
495
            else
496
                c = (uint8_t)model_id[i];
497
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
498
        }
499
    }
500
    return 0;
501
}
502

    
503
/* NOTE: must be called outside the CPU execute loop */
504
void cpu_reset(CPUX86State *env)
505
{
506
    int i;
507

    
508
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
509
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
510
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
511
    }
512

    
513
    memset(env, 0, offsetof(CPUX86State, breakpoints));
514

    
515
    tlb_flush(env, 1);
516

    
517
    env->old_exception = -1;
518

    
519
    /* init to reset state */
520

    
521
#ifdef CONFIG_SOFTMMU
522
    env->hflags |= HF_SOFTMMU_MASK;
523
#endif
524
    env->hflags2 |= HF2_GIF_MASK;
525

    
526
    cpu_x86_update_cr0(env, 0x60000010);
527
    env->a20_mask = ~0x0;
528
    env->smbase = 0x30000;
529

    
530
    env->idt.limit = 0xffff;
531
    env->gdt.limit = 0xffff;
532
    env->ldt.limit = 0xffff;
533
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
534
    env->tr.limit = 0xffff;
535
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
536

    
537
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
538
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
539
                           DESC_R_MASK | DESC_A_MASK);
540
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
541
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
542
                           DESC_A_MASK);
543
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
544
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
545
                           DESC_A_MASK);
546
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
547
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
548
                           DESC_A_MASK);
549
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
550
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
551
                           DESC_A_MASK);
552
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
553
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
554
                           DESC_A_MASK);
555

    
556
    env->eip = 0xfff0;
557
    env->regs[R_EDX] = env->cpuid_version;
558

    
559
    env->eflags = 0x2;
560

    
561
    /* FPU init */
562
    for(i = 0;i < 8; i++)
563
        env->fptags[i] = 1;
564
    env->fpuc = 0x37f;
565

    
566
    env->mxcsr = 0x1f80;
567

    
568
    memset(env->dr, 0, sizeof(env->dr));
569
    env->dr[6] = DR6_FIXED_1;
570
    env->dr[7] = DR7_FIXED_1;
571
    cpu_breakpoint_remove_all(env, BP_CPU);
572
    cpu_watchpoint_remove_all(env, BP_CPU);
573
}
574

    
575
void cpu_x86_close(CPUX86State *env)
576
{
577
    qemu_free(env);
578
}
579

    
580
/***********************************************************/
581
/* x86 debug */
582

    
583
static const char *cc_op_str[] = {
584
    "DYNAMIC",
585
    "EFLAGS",
586

    
587
    "MULB",
588
    "MULW",
589
    "MULL",
590
    "MULQ",
591

    
592
    "ADDB",
593
    "ADDW",
594
    "ADDL",
595
    "ADDQ",
596

    
597
    "ADCB",
598
    "ADCW",
599
    "ADCL",
600
    "ADCQ",
601

    
602
    "SUBB",
603
    "SUBW",
604
    "SUBL",
605
    "SUBQ",
606

    
607
    "SBBB",
608
    "SBBW",
609
    "SBBL",
610
    "SBBQ",
611

    
612
    "LOGICB",
613
    "LOGICW",
614
    "LOGICL",
615
    "LOGICQ",
616

    
617
    "INCB",
618
    "INCW",
619
    "INCL",
620
    "INCQ",
621

    
622
    "DECB",
623
    "DECW",
624
    "DECL",
625
    "DECQ",
626

    
627
    "SHLB",
628
    "SHLW",
629
    "SHLL",
630
    "SHLQ",
631

    
632
    "SARB",
633
    "SARW",
634
    "SARL",
635
    "SARQ",
636
};
637

    
638
static void
639
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
640
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
641
                       const char *name, struct SegmentCache *sc)
642
{
643
#ifdef TARGET_X86_64
644
    if (env->hflags & HF_CS64_MASK) {
645
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
646
                    sc->selector, sc->base, sc->limit, sc->flags);
647
    } else
648
#endif
649
    {
650
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
651
                    (uint32_t)sc->base, sc->limit, sc->flags);
652
    }
653

    
654
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
655
        goto done;
656

    
657
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
658
    if (sc->flags & DESC_S_MASK) {
659
        if (sc->flags & DESC_CS_MASK) {
660
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
661
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
662
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
663
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
664
        } else {
665
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
666
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
667
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
668
        }
669
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
670
    } else {
671
        static const char *sys_type_name[2][16] = {
672
            { /* 32 bit mode */
673
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
674
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
675
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
676
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
677
            },
678
            { /* 64 bit mode */
679
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
680
                "Reserved", "Reserved", "Reserved", "Reserved",
681
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
682
                "Reserved", "IntGate64", "TrapGate64"
683
            }
684
        };
685
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
686
                                    [(sc->flags & DESC_TYPE_MASK)
687
                                     >> DESC_TYPE_SHIFT]);
688
    }
689
done:
690
    cpu_fprintf(f, "\n");
691
}
692

    
693
void cpu_dump_state(CPUState *env, FILE *f,
694
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
695
                    int flags)
696
{
697
    int eflags, i, nb;
698
    char cc_op_name[32];
699
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
700

    
701
    if (kvm_enabled())
702
        kvm_arch_get_registers(env);
703

    
704
    eflags = env->eflags;
705
#ifdef TARGET_X86_64
706
    if (env->hflags & HF_CS64_MASK) {
707
        cpu_fprintf(f,
708
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
709
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
710
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
711
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
712
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
713
                    env->regs[R_EAX],
714
                    env->regs[R_EBX],
715
                    env->regs[R_ECX],
716
                    env->regs[R_EDX],
717
                    env->regs[R_ESI],
718
                    env->regs[R_EDI],
719
                    env->regs[R_EBP],
720
                    env->regs[R_ESP],
721
                    env->regs[8],
722
                    env->regs[9],
723
                    env->regs[10],
724
                    env->regs[11],
725
                    env->regs[12],
726
                    env->regs[13],
727
                    env->regs[14],
728
                    env->regs[15],
729
                    env->eip, eflags,
730
                    eflags & DF_MASK ? 'D' : '-',
731
                    eflags & CC_O ? 'O' : '-',
732
                    eflags & CC_S ? 'S' : '-',
733
                    eflags & CC_Z ? 'Z' : '-',
734
                    eflags & CC_A ? 'A' : '-',
735
                    eflags & CC_P ? 'P' : '-',
736
                    eflags & CC_C ? 'C' : '-',
737
                    env->hflags & HF_CPL_MASK,
738
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
739
                    (int)(env->a20_mask >> 20) & 1,
740
                    (env->hflags >> HF_SMM_SHIFT) & 1,
741
                    env->halted);
742
    } else
743
#endif
744
    {
745
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
746
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
747
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
748
                    (uint32_t)env->regs[R_EAX],
749
                    (uint32_t)env->regs[R_EBX],
750
                    (uint32_t)env->regs[R_ECX],
751
                    (uint32_t)env->regs[R_EDX],
752
                    (uint32_t)env->regs[R_ESI],
753
                    (uint32_t)env->regs[R_EDI],
754
                    (uint32_t)env->regs[R_EBP],
755
                    (uint32_t)env->regs[R_ESP],
756
                    (uint32_t)env->eip, eflags,
757
                    eflags & DF_MASK ? 'D' : '-',
758
                    eflags & CC_O ? 'O' : '-',
759
                    eflags & CC_S ? 'S' : '-',
760
                    eflags & CC_Z ? 'Z' : '-',
761
                    eflags & CC_A ? 'A' : '-',
762
                    eflags & CC_P ? 'P' : '-',
763
                    eflags & CC_C ? 'C' : '-',
764
                    env->hflags & HF_CPL_MASK,
765
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
766
                    (int)(env->a20_mask >> 20) & 1,
767
                    (env->hflags >> HF_SMM_SHIFT) & 1,
768
                    env->halted);
769
    }
770

    
771
    for(i = 0; i < 6; i++) {
772
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
773
                               &env->segs[i]);
774
    }
775
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
776
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
777

    
778
#ifdef TARGET_X86_64
779
    if (env->hflags & HF_LMA_MASK) {
780
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
781
                    env->gdt.base, env->gdt.limit);
782
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
783
                    env->idt.base, env->idt.limit);
784
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
785
                    (uint32_t)env->cr[0],
786
                    env->cr[2],
787
                    env->cr[3],
788
                    (uint32_t)env->cr[4]);
789
        for(i = 0; i < 4; i++)
790
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
791
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
792
                    env->dr[6], env->dr[7]);
793
    } else
794
#endif
795
    {
796
        cpu_fprintf(f, "GDT=     %08x %08x\n",
797
                    (uint32_t)env->gdt.base, env->gdt.limit);
798
        cpu_fprintf(f, "IDT=     %08x %08x\n",
799
                    (uint32_t)env->idt.base, env->idt.limit);
800
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
801
                    (uint32_t)env->cr[0],
802
                    (uint32_t)env->cr[2],
803
                    (uint32_t)env->cr[3],
804
                    (uint32_t)env->cr[4]);
805
        for(i = 0; i < 4; i++)
806
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
807
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
808
    }
809
    if (flags & X86_DUMP_CCOP) {
810
        if ((unsigned)env->cc_op < CC_OP_NB)
811
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
812
        else
813
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
814
#ifdef TARGET_X86_64
815
        if (env->hflags & HF_CS64_MASK) {
816
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
817
                        env->cc_src, env->cc_dst,
818
                        cc_op_name);
819
        } else
820
#endif
821
        {
822
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
823
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
824
                        cc_op_name);
825
        }
826
    }
827
    if (flags & X86_DUMP_FPU) {
828
        int fptag;
829
        fptag = 0;
830
        for(i = 0; i < 8; i++) {
831
            fptag |= ((!env->fptags[i]) << i);
832
        }
833
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
834
                    env->fpuc,
835
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
836
                    env->fpstt,
837
                    fptag,
838
                    env->mxcsr);
839
        for(i=0;i<8;i++) {
840
#if defined(USE_X86LDOUBLE)
841
            union {
842
                long double d;
843
                struct {
844
                    uint64_t lower;
845
                    uint16_t upper;
846
                } l;
847
            } tmp;
848
            tmp.d = env->fpregs[i].d;
849
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
850
                        i, tmp.l.lower, tmp.l.upper);
851
#else
852
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
853
                        i, env->fpregs[i].mmx.q);
854
#endif
855
            if ((i & 1) == 1)
856
                cpu_fprintf(f, "\n");
857
            else
858
                cpu_fprintf(f, " ");
859
        }
860
        if (env->hflags & HF_CS64_MASK)
861
            nb = 16;
862
        else
863
            nb = 8;
864
        for(i=0;i<nb;i++) {
865
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
866
                        i,
867
                        env->xmm_regs[i].XMM_L(3),
868
                        env->xmm_regs[i].XMM_L(2),
869
                        env->xmm_regs[i].XMM_L(1),
870
                        env->xmm_regs[i].XMM_L(0));
871
            if ((i & 1) == 1)
872
                cpu_fprintf(f, "\n");
873
            else
874
                cpu_fprintf(f, " ");
875
        }
876
    }
877
}
878

    
879
/***********************************************************/
880
/* x86 mmu */
881
/* XXX: add PGE support */
882

    
883
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
884
{
885
    a20_state = (a20_state != 0);
886
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
887
#if defined(DEBUG_MMU)
888
        printf("A20 update: a20=%d\n", a20_state);
889
#endif
890
        /* if the cpu is currently executing code, we must unlink it and
891
           all the potentially executing TB */
892
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
893

    
894
        /* when a20 is changed, all the MMU mappings are invalid, so
895
           we must flush everything */
896
        tlb_flush(env, 1);
897
        env->a20_mask = (~0x100000) | (a20_state << 20);
898
    }
899
}
900

    
901
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
902
{
903
    int pe_state;
904

    
905
#if defined(DEBUG_MMU)
906
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
907
#endif
908
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
909
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
910
        tlb_flush(env, 1);
911
    }
912

    
913
#ifdef TARGET_X86_64
914
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
915
        (env->efer & MSR_EFER_LME)) {
916
        /* enter in long mode */
917
        /* XXX: generate an exception */
918
        if (!(env->cr[4] & CR4_PAE_MASK))
919
            return;
920
        env->efer |= MSR_EFER_LMA;
921
        env->hflags |= HF_LMA_MASK;
922
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
923
               (env->efer & MSR_EFER_LMA)) {
924
        /* exit long mode */
925
        env->efer &= ~MSR_EFER_LMA;
926
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
927
        env->eip &= 0xffffffff;
928
    }
929
#endif
930
    env->cr[0] = new_cr0 | CR0_ET_MASK;
931

    
932
    /* update PE flag in hidden flags */
933
    pe_state = (env->cr[0] & CR0_PE_MASK);
934
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
935
    /* ensure that ADDSEG is always set in real mode */
936
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
937
    /* update FPU flags */
938
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
939
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
940
}
941

    
942
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
943
   the PDPT */
944
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
945
{
946
    env->cr[3] = new_cr3;
947
    if (env->cr[0] & CR0_PG_MASK) {
948
#if defined(DEBUG_MMU)
949
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
950
#endif
951
        tlb_flush(env, 0);
952
    }
953
}
954

    
955
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
956
{
957
#if defined(DEBUG_MMU)
958
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
959
#endif
960
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
961
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
962
        tlb_flush(env, 1);
963
    }
964
    /* SSE handling */
965
    if (!(env->cpuid_features & CPUID_SSE))
966
        new_cr4 &= ~CR4_OSFXSR_MASK;
967
    if (new_cr4 & CR4_OSFXSR_MASK)
968
        env->hflags |= HF_OSFXSR_MASK;
969
    else
970
        env->hflags &= ~HF_OSFXSR_MASK;
971

    
972
    env->cr[4] = new_cr4;
973
}
974

    
975
#if defined(CONFIG_USER_ONLY)
976

    
977
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
978
                             int is_write, int mmu_idx, int is_softmmu)
979
{
980
    /* user mode only emulation */
981
    is_write &= 1;
982
    env->cr[2] = addr;
983
    env->error_code = (is_write << PG_ERROR_W_BIT);
984
    env->error_code |= PG_ERROR_U_MASK;
985
    env->exception_index = EXCP0E_PAGE;
986
    return 1;
987
}
988

    
989
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
990
{
991
    return addr;
992
}
993

    
994
#else
995

    
996
/* XXX: This value should match the one returned by CPUID
997
 * and in exec.c */
998
#if defined(CONFIG_KQEMU)
999
#define PHYS_ADDR_MASK 0xfffff000LL
1000
#else
1001
# if defined(TARGET_X86_64)
1002
# define PHYS_ADDR_MASK 0xfffffff000LL
1003
# else
1004
# define PHYS_ADDR_MASK 0xffffff000LL
1005
# endif
1006
#endif
1007

    
1008
/* return value:
1009
   -1 = cannot handle fault
1010
   0  = nothing more to do
1011
   1  = generate PF fault
1012
   2  = soft MMU activation required for this block
1013
*/
1014
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1015
                             int is_write1, int mmu_idx, int is_softmmu)
1016
{
1017
    uint64_t ptep, pte;
1018
    target_ulong pde_addr, pte_addr;
1019
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1020
    target_phys_addr_t paddr;
1021
    uint32_t page_offset;
1022
    target_ulong vaddr, virt_addr;
1023

    
1024
    is_user = mmu_idx == MMU_USER_IDX;
1025
#if defined(DEBUG_MMU)
1026
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1027
           addr, is_write1, is_user, env->eip);
1028
#endif
1029
    is_write = is_write1 & 1;
1030

    
1031
    if (!(env->cr[0] & CR0_PG_MASK)) {
1032
        pte = addr;
1033
        virt_addr = addr & TARGET_PAGE_MASK;
1034
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1035
        page_size = 4096;
1036
        goto do_mapping;
1037
    }
1038

    
1039
    if (env->cr[4] & CR4_PAE_MASK) {
1040
        uint64_t pde, pdpe;
1041
        target_ulong pdpe_addr;
1042

    
1043
#ifdef TARGET_X86_64
1044
        if (env->hflags & HF_LMA_MASK) {
1045
            uint64_t pml4e_addr, pml4e;
1046
            int32_t sext;
1047

    
1048
            /* test virtual address sign extension */
1049
            sext = (int64_t)addr >> 47;
1050
            if (sext != 0 && sext != -1) {
1051
                env->error_code = 0;
1052
                env->exception_index = EXCP0D_GPF;
1053
                return 1;
1054
            }
1055

    
1056
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1057
                env->a20_mask;
1058
            pml4e = ldq_phys(pml4e_addr);
1059
            if (!(pml4e & PG_PRESENT_MASK)) {
1060
                error_code = 0;
1061
                goto do_fault;
1062
            }
1063
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1064
                error_code = PG_ERROR_RSVD_MASK;
1065
                goto do_fault;
1066
            }
1067
            if (!(pml4e & PG_ACCESSED_MASK)) {
1068
                pml4e |= PG_ACCESSED_MASK;
1069
                stl_phys_notdirty(pml4e_addr, pml4e);
1070
            }
1071
            ptep = pml4e ^ PG_NX_MASK;
1072
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1073
                env->a20_mask;
1074
            pdpe = ldq_phys(pdpe_addr);
1075
            if (!(pdpe & PG_PRESENT_MASK)) {
1076
                error_code = 0;
1077
                goto do_fault;
1078
            }
1079
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1080
                error_code = PG_ERROR_RSVD_MASK;
1081
                goto do_fault;
1082
            }
1083
            ptep &= pdpe ^ PG_NX_MASK;
1084
            if (!(pdpe & PG_ACCESSED_MASK)) {
1085
                pdpe |= PG_ACCESSED_MASK;
1086
                stl_phys_notdirty(pdpe_addr, pdpe);
1087
            }
1088
        } else
1089
#endif
1090
        {
1091
            /* XXX: load them when cr3 is loaded ? */
1092
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1093
                env->a20_mask;
1094
            pdpe = ldq_phys(pdpe_addr);
1095
            if (!(pdpe & PG_PRESENT_MASK)) {
1096
                error_code = 0;
1097
                goto do_fault;
1098
            }
1099
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1100
        }
1101

    
1102
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1103
            env->a20_mask;
1104
        pde = ldq_phys(pde_addr);
1105
        if (!(pde & PG_PRESENT_MASK)) {
1106
            error_code = 0;
1107
            goto do_fault;
1108
        }
1109
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1110
            error_code = PG_ERROR_RSVD_MASK;
1111
            goto do_fault;
1112
        }
1113
        ptep &= pde ^ PG_NX_MASK;
1114
        if (pde & PG_PSE_MASK) {
1115
            /* 2 MB page */
1116
            page_size = 2048 * 1024;
1117
            ptep ^= PG_NX_MASK;
1118
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1119
                goto do_fault_protect;
1120
            if (is_user) {
1121
                if (!(ptep & PG_USER_MASK))
1122
                    goto do_fault_protect;
1123
                if (is_write && !(ptep & PG_RW_MASK))
1124
                    goto do_fault_protect;
1125
            } else {
1126
                if ((env->cr[0] & CR0_WP_MASK) &&
1127
                    is_write && !(ptep & PG_RW_MASK))
1128
                    goto do_fault_protect;
1129
            }
1130
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1131
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1132
                pde |= PG_ACCESSED_MASK;
1133
                if (is_dirty)
1134
                    pde |= PG_DIRTY_MASK;
1135
                stl_phys_notdirty(pde_addr, pde);
1136
            }
1137
            /* align to page_size */
1138
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1139
            virt_addr = addr & ~(page_size - 1);
1140
        } else {
1141
            /* 4 KB page */
1142
            if (!(pde & PG_ACCESSED_MASK)) {
1143
                pde |= PG_ACCESSED_MASK;
1144
                stl_phys_notdirty(pde_addr, pde);
1145
            }
1146
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1147
                env->a20_mask;
1148
            pte = ldq_phys(pte_addr);
1149
            if (!(pte & PG_PRESENT_MASK)) {
1150
                error_code = 0;
1151
                goto do_fault;
1152
            }
1153
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1154
                error_code = PG_ERROR_RSVD_MASK;
1155
                goto do_fault;
1156
            }
1157
            /* combine pde and pte nx, user and rw protections */
1158
            ptep &= pte ^ PG_NX_MASK;
1159
            ptep ^= PG_NX_MASK;
1160
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1161
                goto do_fault_protect;
1162
            if (is_user) {
1163
                if (!(ptep & PG_USER_MASK))
1164
                    goto do_fault_protect;
1165
                if (is_write && !(ptep & PG_RW_MASK))
1166
                    goto do_fault_protect;
1167
            } else {
1168
                if ((env->cr[0] & CR0_WP_MASK) &&
1169
                    is_write && !(ptep & PG_RW_MASK))
1170
                    goto do_fault_protect;
1171
            }
1172
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1173
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1174
                pte |= PG_ACCESSED_MASK;
1175
                if (is_dirty)
1176
                    pte |= PG_DIRTY_MASK;
1177
                stl_phys_notdirty(pte_addr, pte);
1178
            }
1179
            page_size = 4096;
1180
            virt_addr = addr & ~0xfff;
1181
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1182
        }
1183
    } else {
1184
        uint32_t pde;
1185

    
1186
        /* page directory entry */
1187
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1188
            env->a20_mask;
1189
        pde = ldl_phys(pde_addr);
1190
        if (!(pde & PG_PRESENT_MASK)) {
1191
            error_code = 0;
1192
            goto do_fault;
1193
        }
1194
        /* if PSE bit is set, then we use a 4MB page */
1195
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1196
            page_size = 4096 * 1024;
1197
            if (is_user) {
1198
                if (!(pde & PG_USER_MASK))
1199
                    goto do_fault_protect;
1200
                if (is_write && !(pde & PG_RW_MASK))
1201
                    goto do_fault_protect;
1202
            } else {
1203
                if ((env->cr[0] & CR0_WP_MASK) &&
1204
                    is_write && !(pde & PG_RW_MASK))
1205
                    goto do_fault_protect;
1206
            }
1207
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1208
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1209
                pde |= PG_ACCESSED_MASK;
1210
                if (is_dirty)
1211
                    pde |= PG_DIRTY_MASK;
1212
                stl_phys_notdirty(pde_addr, pde);
1213
            }
1214

    
1215
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1216
            ptep = pte;
1217
            virt_addr = addr & ~(page_size - 1);
1218
        } else {
1219
            if (!(pde & PG_ACCESSED_MASK)) {
1220
                pde |= PG_ACCESSED_MASK;
1221
                stl_phys_notdirty(pde_addr, pde);
1222
            }
1223

    
1224
            /* page directory entry */
1225
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1226
                env->a20_mask;
1227
            pte = ldl_phys(pte_addr);
1228
            if (!(pte & PG_PRESENT_MASK)) {
1229
                error_code = 0;
1230
                goto do_fault;
1231
            }
1232
            /* combine pde and pte user and rw protections */
1233
            ptep = pte & pde;
1234
            if (is_user) {
1235
                if (!(ptep & PG_USER_MASK))
1236
                    goto do_fault_protect;
1237
                if (is_write && !(ptep & PG_RW_MASK))
1238
                    goto do_fault_protect;
1239
            } else {
1240
                if ((env->cr[0] & CR0_WP_MASK) &&
1241
                    is_write && !(ptep & PG_RW_MASK))
1242
                    goto do_fault_protect;
1243
            }
1244
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1245
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1246
                pte |= PG_ACCESSED_MASK;
1247
                if (is_dirty)
1248
                    pte |= PG_DIRTY_MASK;
1249
                stl_phys_notdirty(pte_addr, pte);
1250
            }
1251
            page_size = 4096;
1252
            virt_addr = addr & ~0xfff;
1253
        }
1254
    }
1255
    /* the page can be put in the TLB */
1256
    prot = PAGE_READ;
1257
    if (!(ptep & PG_NX_MASK))
1258
        prot |= PAGE_EXEC;
1259
    if (pte & PG_DIRTY_MASK) {
1260
        /* only set write access if already dirty... otherwise wait
1261
           for dirty access */
1262
        if (is_user) {
1263
            if (ptep & PG_RW_MASK)
1264
                prot |= PAGE_WRITE;
1265
        } else {
1266
            if (!(env->cr[0] & CR0_WP_MASK) ||
1267
                (ptep & PG_RW_MASK))
1268
                prot |= PAGE_WRITE;
1269
        }
1270
    }
1271
 do_mapping:
1272
    pte = pte & env->a20_mask;
1273

    
1274
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1275
       avoid filling it too fast */
1276
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1277
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1278
    vaddr = virt_addr + page_offset;
1279

    
1280
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1281
    return ret;
1282
 do_fault_protect:
1283
    error_code = PG_ERROR_P_MASK;
1284
 do_fault:
1285
    error_code |= (is_write << PG_ERROR_W_BIT);
1286
    if (is_user)
1287
        error_code |= PG_ERROR_U_MASK;
1288
    if (is_write1 == 2 &&
1289
        (env->efer & MSR_EFER_NXE) &&
1290
        (env->cr[4] & CR4_PAE_MASK))
1291
        error_code |= PG_ERROR_I_D_MASK;
1292
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1293
        /* cr2 is not modified in case of exceptions */
1294
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1295
                 addr);
1296
    } else {
1297
        env->cr[2] = addr;
1298
    }
1299
    env->error_code = error_code;
1300
    env->exception_index = EXCP0E_PAGE;
1301
    return 1;
1302
}
1303

    
1304
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1305
{
1306
    target_ulong pde_addr, pte_addr;
1307
    uint64_t pte;
1308
    target_phys_addr_t paddr;
1309
    uint32_t page_offset;
1310
    int page_size;
1311

    
1312
    if (env->cr[4] & CR4_PAE_MASK) {
1313
        target_ulong pdpe_addr;
1314
        uint64_t pde, pdpe;
1315

    
1316
#ifdef TARGET_X86_64
1317
        if (env->hflags & HF_LMA_MASK) {
1318
            uint64_t pml4e_addr, pml4e;
1319
            int32_t sext;
1320

    
1321
            /* test virtual address sign extension */
1322
            sext = (int64_t)addr >> 47;
1323
            if (sext != 0 && sext != -1)
1324
                return -1;
1325

    
1326
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1327
                env->a20_mask;
1328
            pml4e = ldq_phys(pml4e_addr);
1329
            if (!(pml4e & PG_PRESENT_MASK))
1330
                return -1;
1331

    
1332
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1333
                env->a20_mask;
1334
            pdpe = ldq_phys(pdpe_addr);
1335
            if (!(pdpe & PG_PRESENT_MASK))
1336
                return -1;
1337
        } else
1338
#endif
1339
        {
1340
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1341
                env->a20_mask;
1342
            pdpe = ldq_phys(pdpe_addr);
1343
            if (!(pdpe & PG_PRESENT_MASK))
1344
                return -1;
1345
        }
1346

    
1347
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1348
            env->a20_mask;
1349
        pde = ldq_phys(pde_addr);
1350
        if (!(pde & PG_PRESENT_MASK)) {
1351
            return -1;
1352
        }
1353
        if (pde & PG_PSE_MASK) {
1354
            /* 2 MB page */
1355
            page_size = 2048 * 1024;
1356
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1357
        } else {
1358
            /* 4 KB page */
1359
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1360
                env->a20_mask;
1361
            page_size = 4096;
1362
            pte = ldq_phys(pte_addr);
1363
        }
1364
        if (!(pte & PG_PRESENT_MASK))
1365
            return -1;
1366
    } else {
1367
        uint32_t pde;
1368

    
1369
        if (!(env->cr[0] & CR0_PG_MASK)) {
1370
            pte = addr;
1371
            page_size = 4096;
1372
        } else {
1373
            /* page directory entry */
1374
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1375
            pde = ldl_phys(pde_addr);
1376
            if (!(pde & PG_PRESENT_MASK))
1377
                return -1;
1378
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1379
                pte = pde & ~0x003ff000; /* align to 4MB */
1380
                page_size = 4096 * 1024;
1381
            } else {
1382
                /* page directory entry */
1383
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1384
                pte = ldl_phys(pte_addr);
1385
                if (!(pte & PG_PRESENT_MASK))
1386
                    return -1;
1387
                page_size = 4096;
1388
            }
1389
        }
1390
        pte = pte & env->a20_mask;
1391
    }
1392

    
1393
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1394
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1395
    return paddr;
1396
}
1397

    
1398
void hw_breakpoint_insert(CPUState *env, int index)
1399
{
1400
    int type, err = 0;
1401

    
1402
    switch (hw_breakpoint_type(env->dr[7], index)) {
1403
    case 0:
1404
        if (hw_breakpoint_enabled(env->dr[7], index))
1405
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1406
                                        &env->cpu_breakpoint[index]);
1407
        break;
1408
    case 1:
1409
        type = BP_CPU | BP_MEM_WRITE;
1410
        goto insert_wp;
1411
    case 2:
1412
         /* No support for I/O watchpoints yet */
1413
        break;
1414
    case 3:
1415
        type = BP_CPU | BP_MEM_ACCESS;
1416
    insert_wp:
1417
        err = cpu_watchpoint_insert(env, env->dr[index],
1418
                                    hw_breakpoint_len(env->dr[7], index),
1419
                                    type, &env->cpu_watchpoint[index]);
1420
        break;
1421
    }
1422
    if (err)
1423
        env->cpu_breakpoint[index] = NULL;
1424
}
1425

    
1426
void hw_breakpoint_remove(CPUState *env, int index)
1427
{
1428
    if (!env->cpu_breakpoint[index])
1429
        return;
1430
    switch (hw_breakpoint_type(env->dr[7], index)) {
1431
    case 0:
1432
        if (hw_breakpoint_enabled(env->dr[7], index))
1433
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1434
        break;
1435
    case 1:
1436
    case 3:
1437
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1438
        break;
1439
    case 2:
1440
        /* No support for I/O watchpoints yet */
1441
        break;
1442
    }
1443
}
1444

    
1445
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1446
{
1447
    target_ulong dr6;
1448
    int reg, type;
1449
    int hit_enabled = 0;
1450

    
1451
    dr6 = env->dr[6] & ~0xf;
1452
    for (reg = 0; reg < 4; reg++) {
1453
        type = hw_breakpoint_type(env->dr[7], reg);
1454
        if ((type == 0 && env->dr[reg] == env->eip) ||
1455
            ((type & 1) && env->cpu_watchpoint[reg] &&
1456
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1457
            dr6 |= 1 << reg;
1458
            if (hw_breakpoint_enabled(env->dr[7], reg))
1459
                hit_enabled = 1;
1460
        }
1461
    }
1462
    if (hit_enabled || force_dr6_update)
1463
        env->dr[6] = dr6;
1464
    return hit_enabled;
1465
}
1466

    
1467
static CPUDebugExcpHandler *prev_debug_excp_handler;
1468

    
1469
void raise_exception(int exception_index);
1470

    
1471
static void breakpoint_handler(CPUState *env)
1472
{
1473
    CPUBreakpoint *bp;
1474

    
1475
    if (env->watchpoint_hit) {
1476
        if (env->watchpoint_hit->flags & BP_CPU) {
1477
            env->watchpoint_hit = NULL;
1478
            if (check_hw_breakpoints(env, 0))
1479
                raise_exception(EXCP01_DB);
1480
            else
1481
                cpu_resume_from_signal(env, NULL);
1482
        }
1483
    } else {
1484
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1485
            if (bp->pc == env->eip) {
1486
                if (bp->flags & BP_CPU) {
1487
                    check_hw_breakpoints(env, 1);
1488
                    raise_exception(EXCP01_DB);
1489
                }
1490
                break;
1491
            }
1492
    }
1493
    if (prev_debug_excp_handler)
1494
        prev_debug_excp_handler(env);
1495
}
1496

    
1497
/* This should come from sysemu.h - if we could include it here... */
1498
void qemu_system_reset_request(void);
1499

    
1500
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1501
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1502
{
1503
    uint64_t mcg_cap = cenv->mcg_cap;
1504
    unsigned bank_num = mcg_cap & 0xff;
1505
    uint64_t *banks = cenv->mce_banks;
1506

    
1507
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1508
        return;
1509

    
1510
    /*
1511
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1512
     * reporting is disabled
1513
     */
1514
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1515
        cenv->mcg_ctl != ~(uint64_t)0)
1516
        return;
1517
    banks += 4 * bank;
1518
    /*
1519
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1520
     * reporting is disabled for the bank
1521
     */
1522
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1523
        return;
1524
    if (status & MCI_STATUS_UC) {
1525
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1526
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1527
            fprintf(stderr, "injects mce exception while previous "
1528
                    "one is in progress!\n");
1529
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1530
            qemu_system_reset_request();
1531
            return;
1532
        }
1533
        if (banks[1] & MCI_STATUS_VAL)
1534
            status |= MCI_STATUS_OVER;
1535
        banks[2] = addr;
1536
        banks[3] = misc;
1537
        cenv->mcg_status = mcg_status;
1538
        banks[1] = status;
1539
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1540
    } else if (!(banks[1] & MCI_STATUS_VAL)
1541
               || !(banks[1] & MCI_STATUS_UC)) {
1542
        if (banks[1] & MCI_STATUS_VAL)
1543
            status |= MCI_STATUS_OVER;
1544
        banks[2] = addr;
1545
        banks[3] = misc;
1546
        banks[1] = status;
1547
    } else
1548
        banks[1] |= MCI_STATUS_OVER;
1549
}
1550
#endif /* !CONFIG_USER_ONLY */
1551

    
1552
static void mce_init(CPUX86State *cenv)
1553
{
1554
    unsigned int bank, bank_num;
1555

    
1556
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1557
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1558
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1559
        cenv->mcg_ctl = ~(uint64_t)0;
1560
        bank_num = cenv->mcg_cap & 0xff;
1561
        cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1562
        for (bank = 0; bank < bank_num; bank++)
1563
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1564
    }
1565
}
1566

    
1567
static void host_cpuid(uint32_t function, uint32_t count,
1568
                       uint32_t *eax, uint32_t *ebx,
1569
                       uint32_t *ecx, uint32_t *edx)
1570
{
1571
#if defined(CONFIG_KVM)
1572
    uint32_t vec[4];
1573

    
1574
#ifdef __x86_64__
1575
    asm volatile("cpuid"
1576
                 : "=a"(vec[0]), "=b"(vec[1]),
1577
                   "=c"(vec[2]), "=d"(vec[3])
1578
                 : "0"(function), "c"(count) : "cc");
1579
#else
1580
    asm volatile("pusha \n\t"
1581
                 "cpuid \n\t"
1582
                 "mov %%eax, 0(%2) \n\t"
1583
                 "mov %%ebx, 4(%2) \n\t"
1584
                 "mov %%ecx, 8(%2) \n\t"
1585
                 "mov %%edx, 12(%2) \n\t"
1586
                 "popa"
1587
                 : : "a"(function), "c"(count), "S"(vec)
1588
                 : "memory", "cc");
1589
#endif
1590

    
1591
    if (eax)
1592
        *eax = vec[0];
1593
    if (ebx)
1594
        *ebx = vec[1];
1595
    if (ecx)
1596
        *ecx = vec[2];
1597
    if (edx)
1598
        *edx = vec[3];
1599
#endif
1600
}
1601

    
1602
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1603
                   uint32_t *eax, uint32_t *ebx,
1604
                   uint32_t *ecx, uint32_t *edx)
1605
{
1606
    /* test if maximum index reached */
1607
    if (index & 0x80000000) {
1608
        if (index > env->cpuid_xlevel)
1609
            index = env->cpuid_level;
1610
    } else {
1611
        if (index > env->cpuid_level)
1612
            index = env->cpuid_level;
1613
    }
1614

    
1615
    switch(index) {
1616
    case 0:
1617
        *eax = env->cpuid_level;
1618
        *ebx = env->cpuid_vendor1;
1619
        *edx = env->cpuid_vendor2;
1620
        *ecx = env->cpuid_vendor3;
1621

    
1622
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1623
         * isn't supported in compatibility mode on Intel.  so advertise the
1624
         * actuall cpu, and say goodbye to migration between different vendors
1625
         * is you use compatibility mode. */
1626
        if (kvm_enabled() && !env->cpuid_vendor_override)
1627
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1628
        break;
1629
    case 1:
1630
        *eax = env->cpuid_version;
1631
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1632
        *ecx = env->cpuid_ext_features;
1633
        *edx = env->cpuid_features;
1634
        break;
1635
    case 2:
1636
        /* cache info: needed for Pentium Pro compatibility */
1637
        *eax = 1;
1638
        *ebx = 0;
1639
        *ecx = 0;
1640
        *edx = 0x2c307d;
1641
        break;
1642
    case 4:
1643
        /* cache info: needed for Core compatibility */
1644
        switch (count) {
1645
            case 0: /* L1 dcache info */
1646
                *eax = 0x0000121;
1647
                *ebx = 0x1c0003f;
1648
                *ecx = 0x000003f;
1649
                *edx = 0x0000001;
1650
                break;
1651
            case 1: /* L1 icache info */
1652
                *eax = 0x0000122;
1653
                *ebx = 0x1c0003f;
1654
                *ecx = 0x000003f;
1655
                *edx = 0x0000001;
1656
                break;
1657
            case 2: /* L2 cache info */
1658
                *eax = 0x0000143;
1659
                *ebx = 0x3c0003f;
1660
                *ecx = 0x0000fff;
1661
                *edx = 0x0000001;
1662
                break;
1663
            default: /* end of info */
1664
                *eax = 0;
1665
                *ebx = 0;
1666
                *ecx = 0;
1667
                *edx = 0;
1668
                break;
1669
        }
1670
        break;
1671
    case 5:
1672
        /* mwait info: needed for Core compatibility */
1673
        *eax = 0; /* Smallest monitor-line size in bytes */
1674
        *ebx = 0; /* Largest monitor-line size in bytes */
1675
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1676
        *edx = 0;
1677
        break;
1678
    case 6:
1679
        /* Thermal and Power Leaf */
1680
        *eax = 0;
1681
        *ebx = 0;
1682
        *ecx = 0;
1683
        *edx = 0;
1684
        break;
1685
    case 9:
1686
        /* Direct Cache Access Information Leaf */
1687
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1688
        *ebx = 0;
1689
        *ecx = 0;
1690
        *edx = 0;
1691
        break;
1692
    case 0xA:
1693
        /* Architectural Performance Monitoring Leaf */
1694
        *eax = 0;
1695
        *ebx = 0;
1696
        *ecx = 0;
1697
        *edx = 0;
1698
        break;
1699
    case 0x80000000:
1700
        *eax = env->cpuid_xlevel;
1701
        *ebx = env->cpuid_vendor1;
1702
        *edx = env->cpuid_vendor2;
1703
        *ecx = env->cpuid_vendor3;
1704
        break;
1705
    case 0x80000001:
1706
        *eax = env->cpuid_version;
1707
        *ebx = 0;
1708
        *ecx = env->cpuid_ext3_features;
1709
        *edx = env->cpuid_ext2_features;
1710

    
1711
        if (kvm_enabled()) {
1712
            /* Nested SVM not yet supported in KVM */
1713
            *ecx &= ~CPUID_EXT3_SVM;
1714
        } else {
1715
            /* AMD 3DNow! is not supported in QEMU */
1716
            *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1717
        }
1718
        break;
1719
    case 0x80000002:
1720
    case 0x80000003:
1721
    case 0x80000004:
1722
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1723
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1724
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1725
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1726
        break;
1727
    case 0x80000005:
1728
        /* cache info (L1 cache) */
1729
        *eax = 0x01ff01ff;
1730
        *ebx = 0x01ff01ff;
1731
        *ecx = 0x40020140;
1732
        *edx = 0x40020140;
1733
        break;
1734
    case 0x80000006:
1735
        /* cache info (L2 cache) */
1736
        *eax = 0;
1737
        *ebx = 0x42004200;
1738
        *ecx = 0x02008140;
1739
        *edx = 0;
1740
        break;
1741
    case 0x80000008:
1742
        /* virtual & phys address size in low 2 bytes. */
1743
/* XXX: This value must match the one used in the MMU code. */ 
1744
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1745
            /* 64 bit processor */
1746
#if defined(CONFIG_KQEMU)
1747
            *eax = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1748
#else
1749
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1750
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1751
#endif
1752
        } else {
1753
#if defined(CONFIG_KQEMU)
1754
            *eax = 0x00000020;        /* 32 bits physical */
1755
#else
1756
            if (env->cpuid_features & CPUID_PSE36)
1757
                *eax = 0x00000024; /* 36 bits physical */
1758
            else
1759
                *eax = 0x00000020; /* 32 bits physical */
1760
#endif
1761
        }
1762
        *ebx = 0;
1763
        *ecx = 0;
1764
        *edx = 0;
1765
        break;
1766
    case 0x8000000A:
1767
        *eax = 0x00000001; /* SVM Revision */
1768
        *ebx = 0x00000010; /* nr of ASIDs */
1769
        *ecx = 0;
1770
        *edx = 0; /* optional features */
1771
        break;
1772
    default:
1773
        /* reserved values: zero */
1774
        *eax = 0;
1775
        *ebx = 0;
1776
        *ecx = 0;
1777
        *edx = 0;
1778
        break;
1779
    }
1780
}
1781

    
1782

    
1783
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1784
                            target_ulong *base, unsigned int *limit,
1785
                            unsigned int *flags)
1786
{
1787
    SegmentCache *dt;
1788
    target_ulong ptr;
1789
    uint32_t e1, e2;
1790
    int index;
1791

    
1792
    if (selector & 0x4)
1793
        dt = &env->ldt;
1794
    else
1795
        dt = &env->gdt;
1796
    index = selector & ~7;
1797
    ptr = dt->base + index;
1798
    if ((index + 7) > dt->limit
1799
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1800
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1801
        return 0;
1802

    
1803
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1804
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1805
    if (e2 & DESC_G_MASK)
1806
        *limit = (*limit << 12) | 0xfff;
1807
    *flags = e2;
1808

    
1809
    return 1;
1810
}
1811

    
1812
CPUX86State *cpu_x86_init(const char *cpu_model)
1813
{
1814
    CPUX86State *env;
1815
    static int inited;
1816

    
1817
    env = qemu_mallocz(sizeof(CPUX86State));
1818
    cpu_exec_init(env);
1819
    env->cpu_model_str = cpu_model;
1820

    
1821
    /* init various static tables */
1822
    if (!inited) {
1823
        inited = 1;
1824
        optimize_flags_init();
1825
#ifndef CONFIG_USER_ONLY
1826
        prev_debug_excp_handler =
1827
            cpu_set_debug_excp_handler(breakpoint_handler);
1828
#endif
1829
    }
1830
    if (cpu_x86_register(env, cpu_model) < 0) {
1831
        cpu_x86_close(env);
1832
        return NULL;
1833
    }
1834
    mce_init(env);
1835
    cpu_reset(env);
1836
#ifdef CONFIG_KQEMU
1837
    kqemu_init(env);
1838
#endif
1839

    
1840
    qemu_init_vcpu(env);
1841

    
1842
    return env;
1843
}
1844

    
1845
#if !defined(CONFIG_USER_ONLY)
1846
void do_cpu_init(CPUState *env)
1847
{
1848
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1849
    cpu_reset(env);
1850
    env->interrupt_request = sipi;
1851
    apic_init_reset(env);
1852
}
1853

    
1854
void do_cpu_sipi(CPUState *env)
1855
{
1856
    apic_sipi(env);
1857
}
1858
#else
1859
void do_cpu_init(CPUState *env)
1860
{
1861
}
1862
void do_cpu_sipi(CPUState *env)
1863
{
1864
}
1865
#endif