Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 09ac35ac

History | View | Annotate | Download (60.4 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30

    
31
//#define DEBUG_MMU
32

    
33
/* feature flags taken from "Intel Processor Identification and the CPUID
34
 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35
 * about feature names, the Linux name is used. */
36
static const char *feature_name[] = {
37
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38
    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40
    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
41
};
42
static const char *ext_feature_name[] = {
43
    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44
    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45
    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
47
};
48
static const char *ext2_feature_name[] = {
49
    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51
    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
53
};
54
static const char *ext3_feature_name[] = {
55
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59
};
60

    
61
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62
                                    uint32_t *ext_features,
63
                                    uint32_t *ext2_features,
64
                                    uint32_t *ext3_features)
65
{
66
    int i;
67
    int found = 0;
68

    
69
    for ( i = 0 ; i < 32 ; i++ )
70
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71
            *features |= 1 << i;
72
            found = 1;
73
        }
74
    for ( i = 0 ; i < 32 ; i++ )
75
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76
            *ext_features |= 1 << i;
77
            found = 1;
78
        }
79
    for ( i = 0 ; i < 32 ; i++ )
80
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81
            *ext2_features |= 1 << i;
82
            found = 1;
83
        }
84
    for ( i = 0 ; i < 32 ; i++ )
85
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86
            *ext3_features |= 1 << i;
87
            found = 1;
88
        }
89
    if (!found) {
90
        fprintf(stderr, "CPU feature %s not found\n", flagname);
91
    }
92
}
93

    
94
typedef struct x86_def_t {
95
    const char *name;
96
    uint32_t level;
97
    uint32_t vendor1, vendor2, vendor3;
98
    int family;
99
    int model;
100
    int stepping;
101
    uint32_t features, ext_features, ext2_features, ext3_features;
102
    uint32_t xlevel;
103
    char model_id[48];
104
    int vendor_override;
105
} x86_def_t;
106

    
107
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112
          CPUID_PSE36 | CPUID_FXSR)
113
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
118
static x86_def_t x86_defs[] = {
119
#ifdef TARGET_X86_64
120
    {
121
        .name = "qemu64",
122
        .level = 2,
123
        .vendor1 = CPUID_VENDOR_AMD_1,
124
        .vendor2 = CPUID_VENDOR_AMD_2,
125
        .vendor3 = CPUID_VENDOR_AMD_3,
126
        .family = 6,
127
        .model = 2,
128
        .stepping = 3,
129
        .features = PPRO_FEATURES | 
130
        /* these features are needed for Win64 and aren't fully implemented */
131
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132
        /* this feature is needed for Solaris and isn't fully implemented */
133
            CPUID_PSE36,
134
        .ext_features = CPUID_EXT_SSE3,
135
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
136
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137
        .ext3_features = CPUID_EXT3_SVM,
138
        .xlevel = 0x8000000A,
139
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140
    },
141
    {
142
        .name = "phenom",
143
        .level = 5,
144
        .vendor1 = CPUID_VENDOR_AMD_1,
145
        .vendor2 = CPUID_VENDOR_AMD_2,
146
        .vendor3 = CPUID_VENDOR_AMD_3,
147
        .family = 16,
148
        .model = 2,
149
        .stepping = 3,
150
        /* Missing: CPUID_VME, CPUID_HT */
151
        .features = PPRO_FEATURES | 
152
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153
            CPUID_PSE36,
154
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
155
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
156
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
157
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
158
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
159
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
160
            CPUID_EXT2_FFXSR,
161
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
162
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
163
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
164
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
165
        .ext3_features = CPUID_EXT3_SVM,
166
        .xlevel = 0x8000001A,
167
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
168
    },
169
    {
170
        .name = "core2duo",
171
        .level = 10,
172
        .family = 6,
173
        .model = 15,
174
        .stepping = 11,
175
        /* The original CPU also implements these features:
176
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
177
               CPUID_TM, CPUID_PBE */
178
        .features = PPRO_FEATURES |
179
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180
            CPUID_PSE36,
181
        /* The original CPU also implements these ext features:
182
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
183
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
184
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
185
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
186
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
187
        .xlevel = 0x80000008,
188
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
189
    },
190
#endif
191
    {
192
        .name = "qemu32",
193
        .level = 2,
194
        .family = 6,
195
        .model = 3,
196
        .stepping = 3,
197
        .features = PPRO_FEATURES,
198
        .ext_features = CPUID_EXT_SSE3,
199
        .xlevel = 0,
200
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
201
    },
202
    {
203
        .name = "coreduo",
204
        .level = 10,
205
        .family = 6,
206
        .model = 14,
207
        .stepping = 8,
208
        /* The original CPU also implements these features:
209
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
210
               CPUID_TM, CPUID_PBE */
211
        .features = PPRO_FEATURES | CPUID_VME |
212
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
213
        /* The original CPU also implements these ext features:
214
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
215
               CPUID_EXT_PDCM */
216
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
217
        .ext2_features = CPUID_EXT2_NX,
218
        .xlevel = 0x80000008,
219
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
220
    },
221
    {
222
        .name = "486",
223
        .level = 0,
224
        .family = 4,
225
        .model = 0,
226
        .stepping = 0,
227
        .features = I486_FEATURES,
228
        .xlevel = 0,
229
    },
230
    {
231
        .name = "pentium",
232
        .level = 1,
233
        .family = 5,
234
        .model = 4,
235
        .stepping = 3,
236
        .features = PENTIUM_FEATURES,
237
        .xlevel = 0,
238
    },
239
    {
240
        .name = "pentium2",
241
        .level = 2,
242
        .family = 6,
243
        .model = 5,
244
        .stepping = 2,
245
        .features = PENTIUM2_FEATURES,
246
        .xlevel = 0,
247
    },
248
    {
249
        .name = "pentium3",
250
        .level = 2,
251
        .family = 6,
252
        .model = 7,
253
        .stepping = 3,
254
        .features = PENTIUM3_FEATURES,
255
        .xlevel = 0,
256
    },
257
    {
258
        .name = "athlon",
259
        .level = 2,
260
        .vendor1 = CPUID_VENDOR_AMD_1,
261
        .vendor2 = CPUID_VENDOR_AMD_2,
262
        .vendor3 = CPUID_VENDOR_AMD_3,
263
        .family = 6,
264
        .model = 2,
265
        .stepping = 3,
266
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
267
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
268
        .xlevel = 0x80000008,
269
        /* XXX: put another string ? */
270
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
271
    },
272
    {
273
        .name = "n270",
274
        /* original is on level 10 */
275
        .level = 5,
276
        .family = 6,
277
        .model = 28,
278
        .stepping = 2,
279
        .features = PPRO_FEATURES |
280
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
281
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
282
             * CPUID_HT | CPUID_TM | CPUID_PBE */
283
            /* Some CPUs got no CPUID_SEP */
284
        .ext_features = CPUID_EXT_MONITOR |
285
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
286
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
287
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
288
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
289
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
290
        .xlevel = 0x8000000A,
291
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
292
    },
293
};
294

    
295
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
296
                               uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
297

    
298
static int cpu_x86_fill_model_id(char *str)
299
{
300
    uint32_t eax, ebx, ecx, edx;
301
    int i;
302

    
303
    for (i = 0; i < 3; i++) {
304
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
305
        memcpy(str + i * 16 +  0, &eax, 4);
306
        memcpy(str + i * 16 +  4, &ebx, 4);
307
        memcpy(str + i * 16 +  8, &ecx, 4);
308
        memcpy(str + i * 16 + 12, &edx, 4);
309
    }
310
    return 0;
311
}
312

    
313
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
314
{
315
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
316

    
317
    x86_cpu_def->name = "host";
318
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
319
    x86_cpu_def->level = eax;
320
    x86_cpu_def->vendor1 = ebx;
321
    x86_cpu_def->vendor2 = edx;
322
    x86_cpu_def->vendor3 = ecx;
323

    
324
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
325
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
326
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
327
    x86_cpu_def->stepping = eax & 0x0F;
328
    x86_cpu_def->ext_features = ecx;
329
    x86_cpu_def->features = edx;
330

    
331
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
332
    x86_cpu_def->xlevel = eax;
333

    
334
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
335
    x86_cpu_def->ext2_features = edx;
336
    x86_cpu_def->ext3_features = ecx;
337
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
338
    x86_cpu_def->vendor_override = 0;
339

    
340
    return 0;
341
}
342

    
343
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
344
{
345
    unsigned int i;
346
    x86_def_t *def;
347

    
348
    char *s = strdup(cpu_model);
349
    char *featurestr, *name = strtok(s, ",");
350
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
351
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
352
    int family = -1, model = -1, stepping = -1;
353

    
354
    def = NULL;
355
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
356
        if (strcmp(name, x86_defs[i].name) == 0) {
357
            def = &x86_defs[i];
358
            break;
359
        }
360
    }
361
    if (!def) {
362
        if (strcmp(name, "host") != 0) {
363
            goto error;
364
        }
365
        cpu_x86_fill_host(x86_cpu_def);
366
    } else {
367
        memcpy(x86_cpu_def, def, sizeof(*def));
368
    }
369

    
370
    add_flagname_to_bitmaps("hypervisor", &plus_features,
371
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
372

    
373
    featurestr = strtok(NULL, ",");
374

    
375
    while (featurestr) {
376
        char *val;
377
        if (featurestr[0] == '+') {
378
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
379
        } else if (featurestr[0] == '-') {
380
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
381
        } else if ((val = strchr(featurestr, '='))) {
382
            *val = 0; val++;
383
            if (!strcmp(featurestr, "family")) {
384
                char *err;
385
                family = strtol(val, &err, 10);
386
                if (!*val || *err || family < 0) {
387
                    fprintf(stderr, "bad numerical value %s\n", val);
388
                    goto error;
389
                }
390
                x86_cpu_def->family = family;
391
            } else if (!strcmp(featurestr, "model")) {
392
                char *err;
393
                model = strtol(val, &err, 10);
394
                if (!*val || *err || model < 0 || model > 0xff) {
395
                    fprintf(stderr, "bad numerical value %s\n", val);
396
                    goto error;
397
                }
398
                x86_cpu_def->model = model;
399
            } else if (!strcmp(featurestr, "stepping")) {
400
                char *err;
401
                stepping = strtol(val, &err, 10);
402
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
403
                    fprintf(stderr, "bad numerical value %s\n", val);
404
                    goto error;
405
                }
406
                x86_cpu_def->stepping = stepping;
407
            } else if (!strcmp(featurestr, "vendor")) {
408
                if (strlen(val) != 12) {
409
                    fprintf(stderr, "vendor string must be 12 chars long\n");
410
                    goto error;
411
                }
412
                x86_cpu_def->vendor1 = 0;
413
                x86_cpu_def->vendor2 = 0;
414
                x86_cpu_def->vendor3 = 0;
415
                for(i = 0; i < 4; i++) {
416
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
417
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
418
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
419
                }
420
                x86_cpu_def->vendor_override = 1;
421
            } else if (!strcmp(featurestr, "model_id")) {
422
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
423
                        val);
424
            } else {
425
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
426
                goto error;
427
            }
428
        } else {
429
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
430
            goto error;
431
        }
432
        featurestr = strtok(NULL, ",");
433
    }
434
    x86_cpu_def->features |= plus_features;
435
    x86_cpu_def->ext_features |= plus_ext_features;
436
    x86_cpu_def->ext2_features |= plus_ext2_features;
437
    x86_cpu_def->ext3_features |= plus_ext3_features;
438
    x86_cpu_def->features &= ~minus_features;
439
    x86_cpu_def->ext_features &= ~minus_ext_features;
440
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
441
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
442
    free(s);
443
    return 0;
444

    
445
error:
446
    free(s);
447
    return -1;
448
}
449

    
450
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
451
{
452
    unsigned int i;
453

    
454
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
455
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
456
}
457

    
458
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
459
{
460
    x86_def_t def1, *def = &def1;
461

    
462
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
463
        return -1;
464
    if (def->vendor1) {
465
        env->cpuid_vendor1 = def->vendor1;
466
        env->cpuid_vendor2 = def->vendor2;
467
        env->cpuid_vendor3 = def->vendor3;
468
    } else {
469
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
470
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
471
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
472
    }
473
    env->cpuid_vendor_override = def->vendor_override;
474
    env->cpuid_level = def->level;
475
    if (def->family > 0x0f)
476
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
477
    else
478
        env->cpuid_version = def->family << 8;
479
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
480
    env->cpuid_version |= def->stepping;
481
    env->cpuid_features = def->features;
482
    env->pat = 0x0007040600070406ULL;
483
    env->cpuid_ext_features = def->ext_features;
484
    env->cpuid_ext2_features = def->ext2_features;
485
    env->cpuid_xlevel = def->xlevel;
486
    env->cpuid_ext3_features = def->ext3_features;
487
    {
488
        const char *model_id = def->model_id;
489
        int c, len, i;
490
        if (!model_id)
491
            model_id = "";
492
        len = strlen(model_id);
493
        for(i = 0; i < 48; i++) {
494
            if (i >= len)
495
                c = '\0';
496
            else
497
                c = (uint8_t)model_id[i];
498
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
499
        }
500
    }
501
    return 0;
502
}
503

    
504
/* NOTE: must be called outside the CPU execute loop */
505
void cpu_reset(CPUX86State *env)
506
{
507
    int i;
508

    
509
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
510
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
511
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
512
    }
513

    
514
    memset(env, 0, offsetof(CPUX86State, breakpoints));
515

    
516
    tlb_flush(env, 1);
517

    
518
    env->old_exception = -1;
519

    
520
    /* init to reset state */
521

    
522
#ifdef CONFIG_SOFTMMU
523
    env->hflags |= HF_SOFTMMU_MASK;
524
#endif
525
    env->hflags2 |= HF2_GIF_MASK;
526

    
527
    cpu_x86_update_cr0(env, 0x60000010);
528
    env->a20_mask = ~0x0;
529
    env->smbase = 0x30000;
530

    
531
    env->idt.limit = 0xffff;
532
    env->gdt.limit = 0xffff;
533
    env->ldt.limit = 0xffff;
534
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
535
    env->tr.limit = 0xffff;
536
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
537

    
538
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
539
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
540
                           DESC_R_MASK | DESC_A_MASK);
541
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
542
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
543
                           DESC_A_MASK);
544
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
545
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
546
                           DESC_A_MASK);
547
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
548
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
549
                           DESC_A_MASK);
550
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
551
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
552
                           DESC_A_MASK);
553
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
554
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
555
                           DESC_A_MASK);
556

    
557
    env->eip = 0xfff0;
558
    env->regs[R_EDX] = env->cpuid_version;
559

    
560
    env->eflags = 0x2;
561

    
562
    /* FPU init */
563
    for(i = 0;i < 8; i++)
564
        env->fptags[i] = 1;
565
    env->fpuc = 0x37f;
566

    
567
    env->mxcsr = 0x1f80;
568

    
569
    memset(env->dr, 0, sizeof(env->dr));
570
    env->dr[6] = DR6_FIXED_1;
571
    env->dr[7] = DR7_FIXED_1;
572
    cpu_breakpoint_remove_all(env, BP_CPU);
573
    cpu_watchpoint_remove_all(env, BP_CPU);
574
}
575

    
576
void cpu_x86_close(CPUX86State *env)
577
{
578
    qemu_free(env);
579
}
580

    
581
/***********************************************************/
582
/* x86 debug */
583

    
584
static const char *cc_op_str[] = {
585
    "DYNAMIC",
586
    "EFLAGS",
587

    
588
    "MULB",
589
    "MULW",
590
    "MULL",
591
    "MULQ",
592

    
593
    "ADDB",
594
    "ADDW",
595
    "ADDL",
596
    "ADDQ",
597

    
598
    "ADCB",
599
    "ADCW",
600
    "ADCL",
601
    "ADCQ",
602

    
603
    "SUBB",
604
    "SUBW",
605
    "SUBL",
606
    "SUBQ",
607

    
608
    "SBBB",
609
    "SBBW",
610
    "SBBL",
611
    "SBBQ",
612

    
613
    "LOGICB",
614
    "LOGICW",
615
    "LOGICL",
616
    "LOGICQ",
617

    
618
    "INCB",
619
    "INCW",
620
    "INCL",
621
    "INCQ",
622

    
623
    "DECB",
624
    "DECW",
625
    "DECL",
626
    "DECQ",
627

    
628
    "SHLB",
629
    "SHLW",
630
    "SHLL",
631
    "SHLQ",
632

    
633
    "SARB",
634
    "SARW",
635
    "SARL",
636
    "SARQ",
637
};
638

    
639
static void
640
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
641
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
642
                       const char *name, struct SegmentCache *sc)
643
{
644
#ifdef TARGET_X86_64
645
    if (env->hflags & HF_CS64_MASK) {
646
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
647
                    sc->selector, sc->base, sc->limit, sc->flags);
648
    } else
649
#endif
650
    {
651
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
652
                    (uint32_t)sc->base, sc->limit, sc->flags);
653
    }
654

    
655
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
656
        goto done;
657

    
658
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
659
    if (sc->flags & DESC_S_MASK) {
660
        if (sc->flags & DESC_CS_MASK) {
661
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
662
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
663
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
664
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
665
        } else {
666
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
667
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
668
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
669
        }
670
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
671
    } else {
672
        static const char *sys_type_name[2][16] = {
673
            { /* 32 bit mode */
674
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
675
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
676
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
677
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
678
            },
679
            { /* 64 bit mode */
680
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
681
                "Reserved", "Reserved", "Reserved", "Reserved",
682
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
683
                "Reserved", "IntGate64", "TrapGate64"
684
            }
685
        };
686
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
687
                                    [(sc->flags & DESC_TYPE_MASK)
688
                                     >> DESC_TYPE_SHIFT]);
689
    }
690
done:
691
    cpu_fprintf(f, "\n");
692
}
693

    
694
void cpu_dump_state(CPUState *env, FILE *f,
695
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
696
                    int flags)
697
{
698
    int eflags, i, nb;
699
    char cc_op_name[32];
700
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
701

    
702
    if (kvm_enabled())
703
        kvm_arch_get_registers(env);
704

    
705
    eflags = env->eflags;
706
#ifdef TARGET_X86_64
707
    if (env->hflags & HF_CS64_MASK) {
708
        cpu_fprintf(f,
709
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
710
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
711
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
712
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
713
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
714
                    env->regs[R_EAX],
715
                    env->regs[R_EBX],
716
                    env->regs[R_ECX],
717
                    env->regs[R_EDX],
718
                    env->regs[R_ESI],
719
                    env->regs[R_EDI],
720
                    env->regs[R_EBP],
721
                    env->regs[R_ESP],
722
                    env->regs[8],
723
                    env->regs[9],
724
                    env->regs[10],
725
                    env->regs[11],
726
                    env->regs[12],
727
                    env->regs[13],
728
                    env->regs[14],
729
                    env->regs[15],
730
                    env->eip, eflags,
731
                    eflags & DF_MASK ? 'D' : '-',
732
                    eflags & CC_O ? 'O' : '-',
733
                    eflags & CC_S ? 'S' : '-',
734
                    eflags & CC_Z ? 'Z' : '-',
735
                    eflags & CC_A ? 'A' : '-',
736
                    eflags & CC_P ? 'P' : '-',
737
                    eflags & CC_C ? 'C' : '-',
738
                    env->hflags & HF_CPL_MASK,
739
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
740
                    (int)(env->a20_mask >> 20) & 1,
741
                    (env->hflags >> HF_SMM_SHIFT) & 1,
742
                    env->halted);
743
    } else
744
#endif
745
    {
746
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
747
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
748
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
749
                    (uint32_t)env->regs[R_EAX],
750
                    (uint32_t)env->regs[R_EBX],
751
                    (uint32_t)env->regs[R_ECX],
752
                    (uint32_t)env->regs[R_EDX],
753
                    (uint32_t)env->regs[R_ESI],
754
                    (uint32_t)env->regs[R_EDI],
755
                    (uint32_t)env->regs[R_EBP],
756
                    (uint32_t)env->regs[R_ESP],
757
                    (uint32_t)env->eip, eflags,
758
                    eflags & DF_MASK ? 'D' : '-',
759
                    eflags & CC_O ? 'O' : '-',
760
                    eflags & CC_S ? 'S' : '-',
761
                    eflags & CC_Z ? 'Z' : '-',
762
                    eflags & CC_A ? 'A' : '-',
763
                    eflags & CC_P ? 'P' : '-',
764
                    eflags & CC_C ? 'C' : '-',
765
                    env->hflags & HF_CPL_MASK,
766
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
767
                    (int)(env->a20_mask >> 20) & 1,
768
                    (env->hflags >> HF_SMM_SHIFT) & 1,
769
                    env->halted);
770
    }
771

    
772
    for(i = 0; i < 6; i++) {
773
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
774
                               &env->segs[i]);
775
    }
776
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
777
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
778

    
779
#ifdef TARGET_X86_64
780
    if (env->hflags & HF_LMA_MASK) {
781
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
782
                    env->gdt.base, env->gdt.limit);
783
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
784
                    env->idt.base, env->idt.limit);
785
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
786
                    (uint32_t)env->cr[0],
787
                    env->cr[2],
788
                    env->cr[3],
789
                    (uint32_t)env->cr[4]);
790
        for(i = 0; i < 4; i++)
791
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
792
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
793
                    env->dr[6], env->dr[7]);
794
    } else
795
#endif
796
    {
797
        cpu_fprintf(f, "GDT=     %08x %08x\n",
798
                    (uint32_t)env->gdt.base, env->gdt.limit);
799
        cpu_fprintf(f, "IDT=     %08x %08x\n",
800
                    (uint32_t)env->idt.base, env->idt.limit);
801
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
802
                    (uint32_t)env->cr[0],
803
                    (uint32_t)env->cr[2],
804
                    (uint32_t)env->cr[3],
805
                    (uint32_t)env->cr[4]);
806
        for(i = 0; i < 4; i++)
807
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
808
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
809
    }
810
    if (flags & X86_DUMP_CCOP) {
811
        if ((unsigned)env->cc_op < CC_OP_NB)
812
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
813
        else
814
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
815
#ifdef TARGET_X86_64
816
        if (env->hflags & HF_CS64_MASK) {
817
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
818
                        env->cc_src, env->cc_dst,
819
                        cc_op_name);
820
        } else
821
#endif
822
        {
823
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
824
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
825
                        cc_op_name);
826
        }
827
    }
828
    if (flags & X86_DUMP_FPU) {
829
        int fptag;
830
        fptag = 0;
831
        for(i = 0; i < 8; i++) {
832
            fptag |= ((!env->fptags[i]) << i);
833
        }
834
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
835
                    env->fpuc,
836
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
837
                    env->fpstt,
838
                    fptag,
839
                    env->mxcsr);
840
        for(i=0;i<8;i++) {
841
#if defined(USE_X86LDOUBLE)
842
            union {
843
                long double d;
844
                struct {
845
                    uint64_t lower;
846
                    uint16_t upper;
847
                } l;
848
            } tmp;
849
            tmp.d = env->fpregs[i].d;
850
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
851
                        i, tmp.l.lower, tmp.l.upper);
852
#else
853
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
854
                        i, env->fpregs[i].mmx.q);
855
#endif
856
            if ((i & 1) == 1)
857
                cpu_fprintf(f, "\n");
858
            else
859
                cpu_fprintf(f, " ");
860
        }
861
        if (env->hflags & HF_CS64_MASK)
862
            nb = 16;
863
        else
864
            nb = 8;
865
        for(i=0;i<nb;i++) {
866
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
867
                        i,
868
                        env->xmm_regs[i].XMM_L(3),
869
                        env->xmm_regs[i].XMM_L(2),
870
                        env->xmm_regs[i].XMM_L(1),
871
                        env->xmm_regs[i].XMM_L(0));
872
            if ((i & 1) == 1)
873
                cpu_fprintf(f, "\n");
874
            else
875
                cpu_fprintf(f, " ");
876
        }
877
    }
878
}
879

    
880
/***********************************************************/
881
/* x86 mmu */
882
/* XXX: add PGE support */
883

    
884
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
885
{
886
    a20_state = (a20_state != 0);
887
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
888
#if defined(DEBUG_MMU)
889
        printf("A20 update: a20=%d\n", a20_state);
890
#endif
891
        /* if the cpu is currently executing code, we must unlink it and
892
           all the potentially executing TB */
893
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
894

    
895
        /* when a20 is changed, all the MMU mappings are invalid, so
896
           we must flush everything */
897
        tlb_flush(env, 1);
898
        env->a20_mask = (~0x100000) | (a20_state << 20);
899
    }
900
}
901

    
902
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
903
{
904
    int pe_state;
905

    
906
#if defined(DEBUG_MMU)
907
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
908
#endif
909
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
910
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
911
        tlb_flush(env, 1);
912
    }
913

    
914
#ifdef TARGET_X86_64
915
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
916
        (env->efer & MSR_EFER_LME)) {
917
        /* enter in long mode */
918
        /* XXX: generate an exception */
919
        if (!(env->cr[4] & CR4_PAE_MASK))
920
            return;
921
        env->efer |= MSR_EFER_LMA;
922
        env->hflags |= HF_LMA_MASK;
923
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
924
               (env->efer & MSR_EFER_LMA)) {
925
        /* exit long mode */
926
        env->efer &= ~MSR_EFER_LMA;
927
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
928
        env->eip &= 0xffffffff;
929
    }
930
#endif
931
    env->cr[0] = new_cr0 | CR0_ET_MASK;
932

    
933
    /* update PE flag in hidden flags */
934
    pe_state = (env->cr[0] & CR0_PE_MASK);
935
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
936
    /* ensure that ADDSEG is always set in real mode */
937
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
938
    /* update FPU flags */
939
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
940
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
941
}
942

    
943
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
944
   the PDPT */
945
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
946
{
947
    env->cr[3] = new_cr3;
948
    if (env->cr[0] & CR0_PG_MASK) {
949
#if defined(DEBUG_MMU)
950
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
951
#endif
952
        tlb_flush(env, 0);
953
    }
954
}
955

    
956
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
957
{
958
#if defined(DEBUG_MMU)
959
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
960
#endif
961
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
962
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
963
        tlb_flush(env, 1);
964
    }
965
    /* SSE handling */
966
    if (!(env->cpuid_features & CPUID_SSE))
967
        new_cr4 &= ~CR4_OSFXSR_MASK;
968
    if (new_cr4 & CR4_OSFXSR_MASK)
969
        env->hflags |= HF_OSFXSR_MASK;
970
    else
971
        env->hflags &= ~HF_OSFXSR_MASK;
972

    
973
    env->cr[4] = new_cr4;
974
}
975

    
976
#if defined(CONFIG_USER_ONLY)
977

    
978
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
979
                             int is_write, int mmu_idx, int is_softmmu)
980
{
981
    /* user mode only emulation */
982
    is_write &= 1;
983
    env->cr[2] = addr;
984
    env->error_code = (is_write << PG_ERROR_W_BIT);
985
    env->error_code |= PG_ERROR_U_MASK;
986
    env->exception_index = EXCP0E_PAGE;
987
    return 1;
988
}
989

    
990
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
991
{
992
    return addr;
993
}
994

    
995
#else
996

    
997
/* XXX: This value should match the one returned by CPUID
998
 * and in exec.c */
999
#if defined(CONFIG_KQEMU)
1000
#define PHYS_ADDR_MASK 0xfffff000LL
1001
#else
1002
# if defined(TARGET_X86_64)
1003
# define PHYS_ADDR_MASK 0xfffffff000LL
1004
# else
1005
# define PHYS_ADDR_MASK 0xffffff000LL
1006
# endif
1007
#endif
1008

    
1009
/* return value:
1010
   -1 = cannot handle fault
1011
   0  = nothing more to do
1012
   1  = generate PF fault
1013
   2  = soft MMU activation required for this block
1014
*/
1015
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1016
                             int is_write1, int mmu_idx, int is_softmmu)
1017
{
1018
    uint64_t ptep, pte;
1019
    target_ulong pde_addr, pte_addr;
1020
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1021
    target_phys_addr_t paddr;
1022
    uint32_t page_offset;
1023
    target_ulong vaddr, virt_addr;
1024

    
1025
    is_user = mmu_idx == MMU_USER_IDX;
1026
#if defined(DEBUG_MMU)
1027
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1028
           addr, is_write1, is_user, env->eip);
1029
#endif
1030
    is_write = is_write1 & 1;
1031

    
1032
    if (!(env->cr[0] & CR0_PG_MASK)) {
1033
        pte = addr;
1034
        virt_addr = addr & TARGET_PAGE_MASK;
1035
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1036
        page_size = 4096;
1037
        goto do_mapping;
1038
    }
1039

    
1040
    if (env->cr[4] & CR4_PAE_MASK) {
1041
        uint64_t pde, pdpe;
1042
        target_ulong pdpe_addr;
1043

    
1044
#ifdef TARGET_X86_64
1045
        if (env->hflags & HF_LMA_MASK) {
1046
            uint64_t pml4e_addr, pml4e;
1047
            int32_t sext;
1048

    
1049
            /* test virtual address sign extension */
1050
            sext = (int64_t)addr >> 47;
1051
            if (sext != 0 && sext != -1) {
1052
                env->error_code = 0;
1053
                env->exception_index = EXCP0D_GPF;
1054
                return 1;
1055
            }
1056

    
1057
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1058
                env->a20_mask;
1059
            pml4e = ldq_phys(pml4e_addr);
1060
            if (!(pml4e & PG_PRESENT_MASK)) {
1061
                error_code = 0;
1062
                goto do_fault;
1063
            }
1064
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1065
                error_code = PG_ERROR_RSVD_MASK;
1066
                goto do_fault;
1067
            }
1068
            if (!(pml4e & PG_ACCESSED_MASK)) {
1069
                pml4e |= PG_ACCESSED_MASK;
1070
                stl_phys_notdirty(pml4e_addr, pml4e);
1071
            }
1072
            ptep = pml4e ^ PG_NX_MASK;
1073
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1074
                env->a20_mask;
1075
            pdpe = ldq_phys(pdpe_addr);
1076
            if (!(pdpe & PG_PRESENT_MASK)) {
1077
                error_code = 0;
1078
                goto do_fault;
1079
            }
1080
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1081
                error_code = PG_ERROR_RSVD_MASK;
1082
                goto do_fault;
1083
            }
1084
            ptep &= pdpe ^ PG_NX_MASK;
1085
            if (!(pdpe & PG_ACCESSED_MASK)) {
1086
                pdpe |= PG_ACCESSED_MASK;
1087
                stl_phys_notdirty(pdpe_addr, pdpe);
1088
            }
1089
        } else
1090
#endif
1091
        {
1092
            /* XXX: load them when cr3 is loaded ? */
1093
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1094
                env->a20_mask;
1095
            pdpe = ldq_phys(pdpe_addr);
1096
            if (!(pdpe & PG_PRESENT_MASK)) {
1097
                error_code = 0;
1098
                goto do_fault;
1099
            }
1100
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1101
        }
1102

    
1103
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1104
            env->a20_mask;
1105
        pde = ldq_phys(pde_addr);
1106
        if (!(pde & PG_PRESENT_MASK)) {
1107
            error_code = 0;
1108
            goto do_fault;
1109
        }
1110
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1111
            error_code = PG_ERROR_RSVD_MASK;
1112
            goto do_fault;
1113
        }
1114
        ptep &= pde ^ PG_NX_MASK;
1115
        if (pde & PG_PSE_MASK) {
1116
            /* 2 MB page */
1117
            page_size = 2048 * 1024;
1118
            ptep ^= PG_NX_MASK;
1119
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1120
                goto do_fault_protect;
1121
            if (is_user) {
1122
                if (!(ptep & PG_USER_MASK))
1123
                    goto do_fault_protect;
1124
                if (is_write && !(ptep & PG_RW_MASK))
1125
                    goto do_fault_protect;
1126
            } else {
1127
                if ((env->cr[0] & CR0_WP_MASK) &&
1128
                    is_write && !(ptep & PG_RW_MASK))
1129
                    goto do_fault_protect;
1130
            }
1131
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1132
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1133
                pde |= PG_ACCESSED_MASK;
1134
                if (is_dirty)
1135
                    pde |= PG_DIRTY_MASK;
1136
                stl_phys_notdirty(pde_addr, pde);
1137
            }
1138
            /* align to page_size */
1139
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1140
            virt_addr = addr & ~(page_size - 1);
1141
        } else {
1142
            /* 4 KB page */
1143
            if (!(pde & PG_ACCESSED_MASK)) {
1144
                pde |= PG_ACCESSED_MASK;
1145
                stl_phys_notdirty(pde_addr, pde);
1146
            }
1147
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1148
                env->a20_mask;
1149
            pte = ldq_phys(pte_addr);
1150
            if (!(pte & PG_PRESENT_MASK)) {
1151
                error_code = 0;
1152
                goto do_fault;
1153
            }
1154
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1155
                error_code = PG_ERROR_RSVD_MASK;
1156
                goto do_fault;
1157
            }
1158
            /* combine pde and pte nx, user and rw protections */
1159
            ptep &= pte ^ PG_NX_MASK;
1160
            ptep ^= PG_NX_MASK;
1161
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1162
                goto do_fault_protect;
1163
            if (is_user) {
1164
                if (!(ptep & PG_USER_MASK))
1165
                    goto do_fault_protect;
1166
                if (is_write && !(ptep & PG_RW_MASK))
1167
                    goto do_fault_protect;
1168
            } else {
1169
                if ((env->cr[0] & CR0_WP_MASK) &&
1170
                    is_write && !(ptep & PG_RW_MASK))
1171
                    goto do_fault_protect;
1172
            }
1173
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1174
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1175
                pte |= PG_ACCESSED_MASK;
1176
                if (is_dirty)
1177
                    pte |= PG_DIRTY_MASK;
1178
                stl_phys_notdirty(pte_addr, pte);
1179
            }
1180
            page_size = 4096;
1181
            virt_addr = addr & ~0xfff;
1182
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1183
        }
1184
    } else {
1185
        uint32_t pde;
1186

    
1187
        /* page directory entry */
1188
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1189
            env->a20_mask;
1190
        pde = ldl_phys(pde_addr);
1191
        if (!(pde & PG_PRESENT_MASK)) {
1192
            error_code = 0;
1193
            goto do_fault;
1194
        }
1195
        /* if PSE bit is set, then we use a 4MB page */
1196
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1197
            page_size = 4096 * 1024;
1198
            if (is_user) {
1199
                if (!(pde & PG_USER_MASK))
1200
                    goto do_fault_protect;
1201
                if (is_write && !(pde & PG_RW_MASK))
1202
                    goto do_fault_protect;
1203
            } else {
1204
                if ((env->cr[0] & CR0_WP_MASK) &&
1205
                    is_write && !(pde & PG_RW_MASK))
1206
                    goto do_fault_protect;
1207
            }
1208
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1209
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1210
                pde |= PG_ACCESSED_MASK;
1211
                if (is_dirty)
1212
                    pde |= PG_DIRTY_MASK;
1213
                stl_phys_notdirty(pde_addr, pde);
1214
            }
1215

    
1216
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1217
            ptep = pte;
1218
            virt_addr = addr & ~(page_size - 1);
1219
        } else {
1220
            if (!(pde & PG_ACCESSED_MASK)) {
1221
                pde |= PG_ACCESSED_MASK;
1222
                stl_phys_notdirty(pde_addr, pde);
1223
            }
1224

    
1225
            /* page directory entry */
1226
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1227
                env->a20_mask;
1228
            pte = ldl_phys(pte_addr);
1229
            if (!(pte & PG_PRESENT_MASK)) {
1230
                error_code = 0;
1231
                goto do_fault;
1232
            }
1233
            /* combine pde and pte user and rw protections */
1234
            ptep = pte & pde;
1235
            if (is_user) {
1236
                if (!(ptep & PG_USER_MASK))
1237
                    goto do_fault_protect;
1238
                if (is_write && !(ptep & PG_RW_MASK))
1239
                    goto do_fault_protect;
1240
            } else {
1241
                if ((env->cr[0] & CR0_WP_MASK) &&
1242
                    is_write && !(ptep & PG_RW_MASK))
1243
                    goto do_fault_protect;
1244
            }
1245
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1246
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1247
                pte |= PG_ACCESSED_MASK;
1248
                if (is_dirty)
1249
                    pte |= PG_DIRTY_MASK;
1250
                stl_phys_notdirty(pte_addr, pte);
1251
            }
1252
            page_size = 4096;
1253
            virt_addr = addr & ~0xfff;
1254
        }
1255
    }
1256
    /* the page can be put in the TLB */
1257
    prot = PAGE_READ;
1258
    if (!(ptep & PG_NX_MASK))
1259
        prot |= PAGE_EXEC;
1260
    if (pte & PG_DIRTY_MASK) {
1261
        /* only set write access if already dirty... otherwise wait
1262
           for dirty access */
1263
        if (is_user) {
1264
            if (ptep & PG_RW_MASK)
1265
                prot |= PAGE_WRITE;
1266
        } else {
1267
            if (!(env->cr[0] & CR0_WP_MASK) ||
1268
                (ptep & PG_RW_MASK))
1269
                prot |= PAGE_WRITE;
1270
        }
1271
    }
1272
 do_mapping:
1273
    pte = pte & env->a20_mask;
1274

    
1275
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1276
       avoid filling it too fast */
1277
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1278
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1279
    vaddr = virt_addr + page_offset;
1280

    
1281
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1282
    return ret;
1283
 do_fault_protect:
1284
    error_code = PG_ERROR_P_MASK;
1285
 do_fault:
1286
    error_code |= (is_write << PG_ERROR_W_BIT);
1287
    if (is_user)
1288
        error_code |= PG_ERROR_U_MASK;
1289
    if (is_write1 == 2 &&
1290
        (env->efer & MSR_EFER_NXE) &&
1291
        (env->cr[4] & CR4_PAE_MASK))
1292
        error_code |= PG_ERROR_I_D_MASK;
1293
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1294
        /* cr2 is not modified in case of exceptions */
1295
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1296
                 addr);
1297
    } else {
1298
        env->cr[2] = addr;
1299
    }
1300
    env->error_code = error_code;
1301
    env->exception_index = EXCP0E_PAGE;
1302
    return 1;
1303
}
1304

    
1305
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1306
{
1307
    target_ulong pde_addr, pte_addr;
1308
    uint64_t pte;
1309
    target_phys_addr_t paddr;
1310
    uint32_t page_offset;
1311
    int page_size;
1312

    
1313
    if (env->cr[4] & CR4_PAE_MASK) {
1314
        target_ulong pdpe_addr;
1315
        uint64_t pde, pdpe;
1316

    
1317
#ifdef TARGET_X86_64
1318
        if (env->hflags & HF_LMA_MASK) {
1319
            uint64_t pml4e_addr, pml4e;
1320
            int32_t sext;
1321

    
1322
            /* test virtual address sign extension */
1323
            sext = (int64_t)addr >> 47;
1324
            if (sext != 0 && sext != -1)
1325
                return -1;
1326

    
1327
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1328
                env->a20_mask;
1329
            pml4e = ldq_phys(pml4e_addr);
1330
            if (!(pml4e & PG_PRESENT_MASK))
1331
                return -1;
1332

    
1333
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1334
                env->a20_mask;
1335
            pdpe = ldq_phys(pdpe_addr);
1336
            if (!(pdpe & PG_PRESENT_MASK))
1337
                return -1;
1338
        } else
1339
#endif
1340
        {
1341
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1342
                env->a20_mask;
1343
            pdpe = ldq_phys(pdpe_addr);
1344
            if (!(pdpe & PG_PRESENT_MASK))
1345
                return -1;
1346
        }
1347

    
1348
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1349
            env->a20_mask;
1350
        pde = ldq_phys(pde_addr);
1351
        if (!(pde & PG_PRESENT_MASK)) {
1352
            return -1;
1353
        }
1354
        if (pde & PG_PSE_MASK) {
1355
            /* 2 MB page */
1356
            page_size = 2048 * 1024;
1357
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1358
        } else {
1359
            /* 4 KB page */
1360
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1361
                env->a20_mask;
1362
            page_size = 4096;
1363
            pte = ldq_phys(pte_addr);
1364
        }
1365
        if (!(pte & PG_PRESENT_MASK))
1366
            return -1;
1367
    } else {
1368
        uint32_t pde;
1369

    
1370
        if (!(env->cr[0] & CR0_PG_MASK)) {
1371
            pte = addr;
1372
            page_size = 4096;
1373
        } else {
1374
            /* page directory entry */
1375
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1376
            pde = ldl_phys(pde_addr);
1377
            if (!(pde & PG_PRESENT_MASK))
1378
                return -1;
1379
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1380
                pte = pde & ~0x003ff000; /* align to 4MB */
1381
                page_size = 4096 * 1024;
1382
            } else {
1383
                /* page directory entry */
1384
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1385
                pte = ldl_phys(pte_addr);
1386
                if (!(pte & PG_PRESENT_MASK))
1387
                    return -1;
1388
                page_size = 4096;
1389
            }
1390
        }
1391
        pte = pte & env->a20_mask;
1392
    }
1393

    
1394
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1395
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1396
    return paddr;
1397
}
1398

    
1399
void hw_breakpoint_insert(CPUState *env, int index)
1400
{
1401
    int type, err = 0;
1402

    
1403
    switch (hw_breakpoint_type(env->dr[7], index)) {
1404
    case 0:
1405
        if (hw_breakpoint_enabled(env->dr[7], index))
1406
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1407
                                        &env->cpu_breakpoint[index]);
1408
        break;
1409
    case 1:
1410
        type = BP_CPU | BP_MEM_WRITE;
1411
        goto insert_wp;
1412
    case 2:
1413
         /* No support for I/O watchpoints yet */
1414
        break;
1415
    case 3:
1416
        type = BP_CPU | BP_MEM_ACCESS;
1417
    insert_wp:
1418
        err = cpu_watchpoint_insert(env, env->dr[index],
1419
                                    hw_breakpoint_len(env->dr[7], index),
1420
                                    type, &env->cpu_watchpoint[index]);
1421
        break;
1422
    }
1423
    if (err)
1424
        env->cpu_breakpoint[index] = NULL;
1425
}
1426

    
1427
void hw_breakpoint_remove(CPUState *env, int index)
1428
{
1429
    if (!env->cpu_breakpoint[index])
1430
        return;
1431
    switch (hw_breakpoint_type(env->dr[7], index)) {
1432
    case 0:
1433
        if (hw_breakpoint_enabled(env->dr[7], index))
1434
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1435
        break;
1436
    case 1:
1437
    case 3:
1438
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1439
        break;
1440
    case 2:
1441
        /* No support for I/O watchpoints yet */
1442
        break;
1443
    }
1444
}
1445

    
1446
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1447
{
1448
    target_ulong dr6;
1449
    int reg, type;
1450
    int hit_enabled = 0;
1451

    
1452
    dr6 = env->dr[6] & ~0xf;
1453
    for (reg = 0; reg < 4; reg++) {
1454
        type = hw_breakpoint_type(env->dr[7], reg);
1455
        if ((type == 0 && env->dr[reg] == env->eip) ||
1456
            ((type & 1) && env->cpu_watchpoint[reg] &&
1457
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1458
            dr6 |= 1 << reg;
1459
            if (hw_breakpoint_enabled(env->dr[7], reg))
1460
                hit_enabled = 1;
1461
        }
1462
    }
1463
    if (hit_enabled || force_dr6_update)
1464
        env->dr[6] = dr6;
1465
    return hit_enabled;
1466
}
1467

    
1468
static CPUDebugExcpHandler *prev_debug_excp_handler;
1469

    
1470
void raise_exception(int exception_index);
1471

    
1472
static void breakpoint_handler(CPUState *env)
1473
{
1474
    CPUBreakpoint *bp;
1475

    
1476
    if (env->watchpoint_hit) {
1477
        if (env->watchpoint_hit->flags & BP_CPU) {
1478
            env->watchpoint_hit = NULL;
1479
            if (check_hw_breakpoints(env, 0))
1480
                raise_exception(EXCP01_DB);
1481
            else
1482
                cpu_resume_from_signal(env, NULL);
1483
        }
1484
    } else {
1485
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1486
            if (bp->pc == env->eip) {
1487
                if (bp->flags & BP_CPU) {
1488
                    check_hw_breakpoints(env, 1);
1489
                    raise_exception(EXCP01_DB);
1490
                }
1491
                break;
1492
            }
1493
    }
1494
    if (prev_debug_excp_handler)
1495
        prev_debug_excp_handler(env);
1496
}
1497

    
1498
/* This should come from sysemu.h - if we could include it here... */
1499
void qemu_system_reset_request(void);
1500

    
1501
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1502
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1503
{
1504
    uint64_t mcg_cap = cenv->mcg_cap;
1505
    unsigned bank_num = mcg_cap & 0xff;
1506
    uint64_t *banks = cenv->mce_banks;
1507

    
1508
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1509
        return;
1510

    
1511
    /*
1512
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1513
     * reporting is disabled
1514
     */
1515
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1516
        cenv->mcg_ctl != ~(uint64_t)0)
1517
        return;
1518
    banks += 4 * bank;
1519
    /*
1520
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1521
     * reporting is disabled for the bank
1522
     */
1523
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1524
        return;
1525
    if (status & MCI_STATUS_UC) {
1526
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1527
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1528
            fprintf(stderr, "injects mce exception while previous "
1529
                    "one is in progress!\n");
1530
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1531
            qemu_system_reset_request();
1532
            return;
1533
        }
1534
        if (banks[1] & MCI_STATUS_VAL)
1535
            status |= MCI_STATUS_OVER;
1536
        banks[2] = addr;
1537
        banks[3] = misc;
1538
        cenv->mcg_status = mcg_status;
1539
        banks[1] = status;
1540
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1541
    } else if (!(banks[1] & MCI_STATUS_VAL)
1542
               || !(banks[1] & MCI_STATUS_UC)) {
1543
        if (banks[1] & MCI_STATUS_VAL)
1544
            status |= MCI_STATUS_OVER;
1545
        banks[2] = addr;
1546
        banks[3] = misc;
1547
        banks[1] = status;
1548
    } else
1549
        banks[1] |= MCI_STATUS_OVER;
1550
}
1551
#endif /* !CONFIG_USER_ONLY */
1552

    
1553
static void mce_init(CPUX86State *cenv)
1554
{
1555
    unsigned int bank, bank_num;
1556

    
1557
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1558
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1559
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1560
        cenv->mcg_ctl = ~(uint64_t)0;
1561
        bank_num = cenv->mcg_cap & 0xff;
1562
        cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1563
        for (bank = 0; bank < bank_num; bank++)
1564
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1565
    }
1566
}
1567

    
1568
static void host_cpuid(uint32_t function, uint32_t count,
1569
                       uint32_t *eax, uint32_t *ebx,
1570
                       uint32_t *ecx, uint32_t *edx)
1571
{
1572
#if defined(CONFIG_KVM)
1573
    uint32_t vec[4];
1574

    
1575
#ifdef __x86_64__
1576
    asm volatile("cpuid"
1577
                 : "=a"(vec[0]), "=b"(vec[1]),
1578
                   "=c"(vec[2]), "=d"(vec[3])
1579
                 : "0"(function), "c"(count) : "cc");
1580
#else
1581
    asm volatile("pusha \n\t"
1582
                 "cpuid \n\t"
1583
                 "mov %%eax, 0(%2) \n\t"
1584
                 "mov %%ebx, 4(%2) \n\t"
1585
                 "mov %%ecx, 8(%2) \n\t"
1586
                 "mov %%edx, 12(%2) \n\t"
1587
                 "popa"
1588
                 : : "a"(function), "c"(count), "S"(vec)
1589
                 : "memory", "cc");
1590
#endif
1591

    
1592
    if (eax)
1593
        *eax = vec[0];
1594
    if (ebx)
1595
        *ebx = vec[1];
1596
    if (ecx)
1597
        *ecx = vec[2];
1598
    if (edx)
1599
        *edx = vec[3];
1600
#endif
1601
}
1602

    
1603
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1604
                   uint32_t *eax, uint32_t *ebx,
1605
                   uint32_t *ecx, uint32_t *edx)
1606
{
1607
    /* test if maximum index reached */
1608
    if (index & 0x80000000) {
1609
        if (index > env->cpuid_xlevel)
1610
            index = env->cpuid_level;
1611
    } else {
1612
        if (index > env->cpuid_level)
1613
            index = env->cpuid_level;
1614
    }
1615

    
1616
    switch(index) {
1617
    case 0:
1618
        *eax = env->cpuid_level;
1619
        *ebx = env->cpuid_vendor1;
1620
        *edx = env->cpuid_vendor2;
1621
        *ecx = env->cpuid_vendor3;
1622

    
1623
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1624
         * isn't supported in compatibility mode on Intel.  so advertise the
1625
         * actuall cpu, and say goodbye to migration between different vendors
1626
         * is you use compatibility mode. */
1627
        if (kvm_enabled() && !env->cpuid_vendor_override)
1628
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1629
        break;
1630
    case 1:
1631
        *eax = env->cpuid_version;
1632
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1633
        *ecx = env->cpuid_ext_features;
1634
        *edx = env->cpuid_features;
1635
        break;
1636
    case 2:
1637
        /* cache info: needed for Pentium Pro compatibility */
1638
        *eax = 1;
1639
        *ebx = 0;
1640
        *ecx = 0;
1641
        *edx = 0x2c307d;
1642
        break;
1643
    case 4:
1644
        /* cache info: needed for Core compatibility */
1645
        switch (count) {
1646
            case 0: /* L1 dcache info */
1647
                *eax = 0x0000121;
1648
                *ebx = 0x1c0003f;
1649
                *ecx = 0x000003f;
1650
                *edx = 0x0000001;
1651
                break;
1652
            case 1: /* L1 icache info */
1653
                *eax = 0x0000122;
1654
                *ebx = 0x1c0003f;
1655
                *ecx = 0x000003f;
1656
                *edx = 0x0000001;
1657
                break;
1658
            case 2: /* L2 cache info */
1659
                *eax = 0x0000143;
1660
                *ebx = 0x3c0003f;
1661
                *ecx = 0x0000fff;
1662
                *edx = 0x0000001;
1663
                break;
1664
            default: /* end of info */
1665
                *eax = 0;
1666
                *ebx = 0;
1667
                *ecx = 0;
1668
                *edx = 0;
1669
                break;
1670
        }
1671
        break;
1672
    case 5:
1673
        /* mwait info: needed for Core compatibility */
1674
        *eax = 0; /* Smallest monitor-line size in bytes */
1675
        *ebx = 0; /* Largest monitor-line size in bytes */
1676
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1677
        *edx = 0;
1678
        break;
1679
    case 6:
1680
        /* Thermal and Power Leaf */
1681
        *eax = 0;
1682
        *ebx = 0;
1683
        *ecx = 0;
1684
        *edx = 0;
1685
        break;
1686
    case 9:
1687
        /* Direct Cache Access Information Leaf */
1688
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1689
        *ebx = 0;
1690
        *ecx = 0;
1691
        *edx = 0;
1692
        break;
1693
    case 0xA:
1694
        /* Architectural Performance Monitoring Leaf */
1695
        *eax = 0;
1696
        *ebx = 0;
1697
        *ecx = 0;
1698
        *edx = 0;
1699
        break;
1700
    case 0x80000000:
1701
        *eax = env->cpuid_xlevel;
1702
        *ebx = env->cpuid_vendor1;
1703
        *edx = env->cpuid_vendor2;
1704
        *ecx = env->cpuid_vendor3;
1705
        break;
1706
    case 0x80000001:
1707
        *eax = env->cpuid_version;
1708
        *ebx = 0;
1709
        *ecx = env->cpuid_ext3_features;
1710
        *edx = env->cpuid_ext2_features;
1711

    
1712
        if (kvm_enabled()) {
1713
            /* Nested SVM not yet supported in KVM */
1714
            *ecx &= ~CPUID_EXT3_SVM;
1715
        } else {
1716
            /* AMD 3DNow! is not supported in QEMU */
1717
            *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1718
        }
1719
        break;
1720
    case 0x80000002:
1721
    case 0x80000003:
1722
    case 0x80000004:
1723
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1724
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1725
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1726
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1727
        break;
1728
    case 0x80000005:
1729
        /* cache info (L1 cache) */
1730
        *eax = 0x01ff01ff;
1731
        *ebx = 0x01ff01ff;
1732
        *ecx = 0x40020140;
1733
        *edx = 0x40020140;
1734
        break;
1735
    case 0x80000006:
1736
        /* cache info (L2 cache) */
1737
        *eax = 0;
1738
        *ebx = 0x42004200;
1739
        *ecx = 0x02008140;
1740
        *edx = 0;
1741
        break;
1742
    case 0x80000008:
1743
        /* virtual & phys address size in low 2 bytes. */
1744
/* XXX: This value must match the one used in the MMU code. */ 
1745
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1746
            /* 64 bit processor */
1747
#if defined(CONFIG_KQEMU)
1748
            *eax = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1749
#else
1750
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1751
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1752
#endif
1753
        } else {
1754
#if defined(CONFIG_KQEMU)
1755
            *eax = 0x00000020;        /* 32 bits physical */
1756
#else
1757
            if (env->cpuid_features & CPUID_PSE36)
1758
                *eax = 0x00000024; /* 36 bits physical */
1759
            else
1760
                *eax = 0x00000020; /* 32 bits physical */
1761
#endif
1762
        }
1763
        *ebx = 0;
1764
        *ecx = 0;
1765
        *edx = 0;
1766
        break;
1767
    case 0x8000000A:
1768
        *eax = 0x00000001; /* SVM Revision */
1769
        *ebx = 0x00000010; /* nr of ASIDs */
1770
        *ecx = 0;
1771
        *edx = 0; /* optional features */
1772
        break;
1773
    default:
1774
        /* reserved values: zero */
1775
        *eax = 0;
1776
        *ebx = 0;
1777
        *ecx = 0;
1778
        *edx = 0;
1779
        break;
1780
    }
1781
}
1782

    
1783

    
1784
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1785
                            target_ulong *base, unsigned int *limit,
1786
                            unsigned int *flags)
1787
{
1788
    SegmentCache *dt;
1789
    target_ulong ptr;
1790
    uint32_t e1, e2;
1791
    int index;
1792

    
1793
    if (selector & 0x4)
1794
        dt = &env->ldt;
1795
    else
1796
        dt = &env->gdt;
1797
    index = selector & ~7;
1798
    ptr = dt->base + index;
1799
    if ((index + 7) > dt->limit
1800
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1801
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1802
        return 0;
1803

    
1804
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1805
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1806
    if (e2 & DESC_G_MASK)
1807
        *limit = (*limit << 12) | 0xfff;
1808
    *flags = e2;
1809

    
1810
    return 1;
1811
}
1812

    
1813
CPUX86State *cpu_x86_init(const char *cpu_model)
1814
{
1815
    CPUX86State *env;
1816
    static int inited;
1817

    
1818
    env = qemu_mallocz(sizeof(CPUX86State));
1819
    cpu_exec_init(env);
1820
    env->cpu_model_str = cpu_model;
1821

    
1822
    /* init various static tables */
1823
    if (!inited) {
1824
        inited = 1;
1825
        optimize_flags_init();
1826
#ifndef CONFIG_USER_ONLY
1827
        prev_debug_excp_handler =
1828
            cpu_set_debug_excp_handler(breakpoint_handler);
1829
#endif
1830
    }
1831
    if (cpu_x86_register(env, cpu_model) < 0) {
1832
        cpu_x86_close(env);
1833
        return NULL;
1834
    }
1835
    mce_init(env);
1836
    cpu_reset(env);
1837
#ifdef CONFIG_KQEMU
1838
    kqemu_init(env);
1839
#endif
1840

    
1841
    qemu_init_vcpu(env);
1842

    
1843
    return env;
1844
}
1845

    
1846
#if !defined(CONFIG_USER_ONLY)
1847
void do_cpu_init(CPUState *env)
1848
{
1849
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1850
    cpu_reset(env);
1851
    env->interrupt_request = sipi;
1852
    apic_init_reset(env);
1853
}
1854

    
1855
void do_cpu_sipi(CPUState *env)
1856
{
1857
    apic_sipi(env);
1858
}
1859
#else
1860
void do_cpu_init(CPUState *env)
1861
{
1862
}
1863
void do_cpu_sipi(CPUState *env)
1864
{
1865
}
1866
#endif