Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ b5ec5ce0

History | View | Annotate | Download (74.6 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30

    
31
//#define DEBUG_MMU
32
#include "qemu-option.h"
33
#include "qemu-config.h"
34

    
35
/* feature flags taken from "Intel Processor Identification and the CPUID
36
 * Instruction" and AMD's "CPUID Specification".  In cases of disagreement
37
 * between feature naming conventions, aliases may be added.
38
 */
39
static const char *feature_name[] = {
40
    "fpu", "vme", "de", "pse",
41
    "tsc", "msr", "pae", "mce",
42
    "cx8", "apic", NULL, "sep",
43
    "mtrr", "pge", "mca", "cmov",
44
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
45
    NULL, "ds" /* Intel dts */, "acpi", "mmx",
46
    "fxsr", "sse", "sse2", "ss",
47
    "ht" /* Intel htt */, "tm", "ia64", "pbe",
48
};
49
static const char *ext_feature_name[] = {
50
    "pni|sse3" /* Intel,AMD sse3 */, NULL, NULL, "monitor",
51
    "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52
    "tm2", "ssse3", "cid", NULL,
53
    NULL, "cx16", "xtpr", NULL,
54
    NULL, NULL, "dca", "sse4.1|sse4_1",
55
    "sse4.2|sse4_2", "x2apic", NULL, "popcnt",
56
    NULL, NULL, NULL, NULL,
57
    NULL, NULL, NULL, "hypervisor",
58
};
59
static const char *ext2_feature_name[] = {
60
    "fpu", "vme", "de", "pse",
61
    "tsc", "msr", "pae", "mce",
62
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall",
63
    "mtrr", "pge", "mca", "cmov",
64
    "pat", "pse36", NULL, NULL /* Linux mp */,
65
    "nx" /* Intel xd */, NULL, "mmxext", "mmx",
66
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp",
67
    NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
68
};
69
static const char *ext3_feature_name[] = {
70
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
71
    "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
72
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL,
73
    "skinit", "wdt", NULL, NULL,
74
    NULL, NULL, NULL, NULL,
75
    NULL, NULL, NULL, NULL,
76
    NULL, NULL, NULL, NULL,
77
    NULL, NULL, NULL, NULL,
78
};
79

    
80
static const char *kvm_feature_name[] = {
81
    "kvmclock", "kvm_nopiodelay", "kvm_mmu", NULL, NULL, NULL, NULL, NULL,
82
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
83
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
84
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
85
};
86

    
87
/* collects per-function cpuid data
88
 */
89
typedef struct model_features_t {
90
    uint32_t *guest_feat;
91
    uint32_t *host_feat;
92
    uint32_t check_feat;
93
    const char **flag_names;
94
    uint32_t cpuid;
95
    } model_features_t;
96

    
97
int check_cpuid = 0;
98
int enforce_cpuid = 0;
99

    
100
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
101
                       uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
102

    
103
#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
104

    
105
/* general substring compare of *[s1..e1) and *[s2..e2).  sx is start of
106
 * a substring.  ex if !NULL points to the first char after a substring,
107
 * otherwise the string is assumed to sized by a terminating nul.
108
 * Return lexical ordering of *s1:*s2.
109
 */
110
static int sstrcmp(const char *s1, const char *e1, const char *s2,
111
    const char *e2)
112
{
113
    for (;;) {
114
        if (!*s1 || !*s2 || *s1 != *s2)
115
            return (*s1 - *s2);
116
        ++s1, ++s2;
117
        if (s1 == e1 && s2 == e2)
118
            return (0);
119
        else if (s1 == e1)
120
            return (*s2);
121
        else if (s2 == e2)
122
            return (*s1);
123
    }
124
}
125

    
126
/* compare *[s..e) to *altstr.  *altstr may be a simple string or multiple
127
 * '|' delimited (possibly empty) strings in which case search for a match
128
 * within the alternatives proceeds left to right.  Return 0 for success,
129
 * non-zero otherwise.
130
 */
131
static int altcmp(const char *s, const char *e, const char *altstr)
132
{
133
    const char *p, *q;
134

    
135
    for (q = p = altstr; ; ) {
136
        while (*p && *p != '|')
137
            ++p;
138
        if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
139
            return (0);
140
        if (!*p)
141
            return (1);
142
        else
143
            q = ++p;
144
    }
145
}
146

    
147
/* search featureset for flag *[s..e), if found set corresponding bit in
148
 * *pval and return success, otherwise return zero
149
 */
150
static int lookup_feature(uint32_t *pval, const char *s, const char *e,
151
    const char **featureset)
152
{
153
    uint32_t mask;
154
    const char **ppc;
155

    
156
    for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc)
157
        if (*ppc && !altcmp(s, e, *ppc)) {
158
            *pval |= mask;
159
            break;
160
        }
161
    return (mask ? 1 : 0);
162
}
163

    
164
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
165
                                    uint32_t *ext_features,
166
                                    uint32_t *ext2_features,
167
                                    uint32_t *ext3_features,
168
                                    uint32_t *kvm_features)
169
{
170
    if (!lookup_feature(features, flagname, NULL, feature_name) &&
171
        !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
172
        !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
173
        !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
174
        !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name))
175
            fprintf(stderr, "CPU feature %s not found\n", flagname);
176
}
177

    
178
typedef struct x86_def_t {
179
    struct x86_def_t *next;
180
    const char *name;
181
    uint32_t level;
182
    uint32_t vendor1, vendor2, vendor3;
183
    int family;
184
    int model;
185
    int stepping;
186
    uint32_t features, ext_features, ext2_features, ext3_features, kvm_features;
187
    uint32_t xlevel;
188
    char model_id[48];
189
    int vendor_override;
190
    uint32_t flags;
191
} x86_def_t;
192

    
193
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
194
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
195
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
196
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
197
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
198
          CPUID_PSE36 | CPUID_FXSR)
199
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
200
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
201
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
202
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
203
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
204

    
205
/* maintains list of cpu model definitions
206
 */
207
static x86_def_t *x86_defs = {NULL};
208

    
209
/* built-in cpu model definitions (deprecated)
210
 */
211
static x86_def_t builtin_x86_defs[] = {
212
#ifdef TARGET_X86_64
213
    {
214
        .name = "qemu64",
215
        .level = 4,
216
        .vendor1 = CPUID_VENDOR_AMD_1,
217
        .vendor2 = CPUID_VENDOR_AMD_2,
218
        .vendor3 = CPUID_VENDOR_AMD_3,
219
        .family = 6,
220
        .model = 2,
221
        .stepping = 3,
222
        .features = PPRO_FEATURES | 
223
        /* these features are needed for Win64 and aren't fully implemented */
224
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
225
        /* this feature is needed for Solaris and isn't fully implemented */
226
            CPUID_PSE36,
227
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
228
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
229
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
230
        .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
231
            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
232
        .xlevel = 0x8000000A,
233
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
234
    },
235
    {
236
        .name = "phenom",
237
        .level = 5,
238
        .vendor1 = CPUID_VENDOR_AMD_1,
239
        .vendor2 = CPUID_VENDOR_AMD_2,
240
        .vendor3 = CPUID_VENDOR_AMD_3,
241
        .family = 16,
242
        .model = 2,
243
        .stepping = 3,
244
        /* Missing: CPUID_VME, CPUID_HT */
245
        .features = PPRO_FEATURES | 
246
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
247
            CPUID_PSE36,
248
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
249
            CPUID_EXT_POPCNT,
250
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
251
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
252
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
253
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
254
            CPUID_EXT2_FFXSR,
255
        /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
256
                    CPUID_EXT3_CR8LEG,
257
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
258
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
259
        .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
260
            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
261
        .xlevel = 0x8000001A,
262
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
263
    },
264
    {
265
        .name = "core2duo",
266
        .level = 10,
267
        .family = 6,
268
        .model = 15,
269
        .stepping = 11,
270
        /* The original CPU also implements these features:
271
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
272
               CPUID_TM, CPUID_PBE */
273
        .features = PPRO_FEATURES |
274
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
275
            CPUID_PSE36,
276
        /* The original CPU also implements these ext features:
277
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
278
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
279
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
280
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
281
        .ext3_features = CPUID_EXT3_LAHF_LM,
282
        .xlevel = 0x80000008,
283
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
284
    },
285
    {
286
        .name = "kvm64",
287
        .level = 5,
288
        .vendor1 = CPUID_VENDOR_INTEL_1,
289
        .vendor2 = CPUID_VENDOR_INTEL_2,
290
        .vendor3 = CPUID_VENDOR_INTEL_3,
291
        .family = 15,
292
        .model = 6,
293
        .stepping = 1,
294
        /* Missing: CPUID_VME, CPUID_HT */
295
        .features = PPRO_FEATURES |
296
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
297
            CPUID_PSE36,
298
        /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
299
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
300
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
301
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
302
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
303
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
304
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
305
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
306
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
307
        .ext3_features = 0,
308
        .xlevel = 0x80000008,
309
        .model_id = "Common KVM processor"
310
    },
311
#endif
312
    {
313
        .name = "qemu32",
314
        .level = 4,
315
        .family = 6,
316
        .model = 3,
317
        .stepping = 3,
318
        .features = PPRO_FEATURES,
319
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
320
        .xlevel = 0,
321
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
322
    },
323
    {
324
        .name = "coreduo",
325
        .level = 10,
326
        .family = 6,
327
        .model = 14,
328
        .stepping = 8,
329
        /* The original CPU also implements these features:
330
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
331
               CPUID_TM, CPUID_PBE */
332
        .features = PPRO_FEATURES | CPUID_VME |
333
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
334
        /* The original CPU also implements these ext features:
335
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
336
               CPUID_EXT_PDCM */
337
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
338
        .ext2_features = CPUID_EXT2_NX,
339
        .xlevel = 0x80000008,
340
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
341
    },
342
    {
343
        .name = "486",
344
        .level = 0,
345
        .family = 4,
346
        .model = 0,
347
        .stepping = 0,
348
        .features = I486_FEATURES,
349
        .xlevel = 0,
350
    },
351
    {
352
        .name = "pentium",
353
        .level = 1,
354
        .family = 5,
355
        .model = 4,
356
        .stepping = 3,
357
        .features = PENTIUM_FEATURES,
358
        .xlevel = 0,
359
    },
360
    {
361
        .name = "pentium2",
362
        .level = 2,
363
        .family = 6,
364
        .model = 5,
365
        .stepping = 2,
366
        .features = PENTIUM2_FEATURES,
367
        .xlevel = 0,
368
    },
369
    {
370
        .name = "pentium3",
371
        .level = 2,
372
        .family = 6,
373
        .model = 7,
374
        .stepping = 3,
375
        .features = PENTIUM3_FEATURES,
376
        .xlevel = 0,
377
    },
378
    {
379
        .name = "athlon",
380
        .level = 2,
381
        .vendor1 = CPUID_VENDOR_AMD_1,
382
        .vendor2 = CPUID_VENDOR_AMD_2,
383
        .vendor3 = CPUID_VENDOR_AMD_3,
384
        .family = 6,
385
        .model = 2,
386
        .stepping = 3,
387
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
388
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
389
        .xlevel = 0x80000008,
390
        /* XXX: put another string ? */
391
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
392
    },
393
    {
394
        .name = "n270",
395
        /* original is on level 10 */
396
        .level = 5,
397
        .family = 6,
398
        .model = 28,
399
        .stepping = 2,
400
        .features = PPRO_FEATURES |
401
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
402
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
403
             * CPUID_HT | CPUID_TM | CPUID_PBE */
404
            /* Some CPUs got no CPUID_SEP */
405
        .ext_features = CPUID_EXT_MONITOR |
406
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
407
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
408
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
409
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
410
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
411
        .xlevel = 0x8000000A,
412
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
413
    },
414
};
415

    
416
static int cpu_x86_fill_model_id(char *str)
417
{
418
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
419
    int i;
420

    
421
    for (i = 0; i < 3; i++) {
422
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
423
        memcpy(str + i * 16 +  0, &eax, 4);
424
        memcpy(str + i * 16 +  4, &ebx, 4);
425
        memcpy(str + i * 16 +  8, &ecx, 4);
426
        memcpy(str + i * 16 + 12, &edx, 4);
427
    }
428
    return 0;
429
}
430

    
431
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
432
{
433
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
434

    
435
    x86_cpu_def->name = "host";
436
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
437
    x86_cpu_def->level = eax;
438
    x86_cpu_def->vendor1 = ebx;
439
    x86_cpu_def->vendor2 = edx;
440
    x86_cpu_def->vendor3 = ecx;
441

    
442
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
443
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
444
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
445
    x86_cpu_def->stepping = eax & 0x0F;
446
    x86_cpu_def->ext_features = ecx;
447
    x86_cpu_def->features = edx;
448

    
449
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
450
    x86_cpu_def->xlevel = eax;
451

    
452
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
453
    x86_cpu_def->ext2_features = edx;
454
    x86_cpu_def->ext3_features = ecx;
455
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
456
    x86_cpu_def->vendor_override = 0;
457

    
458
    return 0;
459
}
460

    
461
static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
462
{
463
    int i;
464

    
465
    for (i = 0; i < 32; ++i)
466
        if (1 << i & mask) {
467
            fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
468
                " flag '%s' [0x%08x]\n",
469
                f->cpuid >> 16, f->cpuid & 0xffff,
470
                f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
471
            break;
472
        }
473
    return 0;
474
}
475

    
476
/* best effort attempt to inform user requested cpu flags aren't making
477
 * their way to the guest.  Note: ft[].check_feat ideally should be
478
 * specified via a guest_def field to suppress report of extraneous flags.
479
 */
480
static int check_features_against_host(x86_def_t *guest_def)
481
{
482
    x86_def_t host_def;
483
    uint32_t mask;
484
    int rv, i;
485
    struct model_features_t ft[] = {
486
        {&guest_def->features, &host_def.features,
487
            ~0, feature_name, 0x00000000},
488
        {&guest_def->ext_features, &host_def.ext_features,
489
            ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
490
        {&guest_def->ext2_features, &host_def.ext2_features,
491
            ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
492
        {&guest_def->ext3_features, &host_def.ext3_features,
493
            ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
494

    
495
    cpu_x86_fill_host(&host_def);
496
    for (rv = 0, i = 0; i < sizeof (ft) / sizeof (ft[0]); ++i)
497
        for (mask = 1; mask; mask <<= 1)
498
            if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
499
                !(*ft[i].host_feat & mask)) {
500
                    unavailable_host_feature(&ft[i], mask);
501
                    rv = 1;
502
                }
503
    return rv;
504
}
505

    
506
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
507
{
508
    unsigned int i;
509
    x86_def_t *def;
510

    
511
    char *s = strdup(cpu_model);
512
    char *featurestr, *name = strtok(s, ",");
513
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0, plus_kvm_features = 0;
514
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0, minus_kvm_features = 0;
515
    uint32_t numvalue;
516

    
517
    for (def = x86_defs; def; def = def->next)
518
        if (!strcmp(name, def->name))
519
            break;
520
    if (kvm_enabled() && strcmp(name, "host") == 0) {
521
        cpu_x86_fill_host(x86_cpu_def);
522
    } else if (!def) {
523
        goto error;
524
    } else {
525
        memcpy(x86_cpu_def, def, sizeof(*def));
526
    }
527

    
528
    plus_kvm_features = ~0; /* not supported bits will be filtered out later */
529

    
530
    add_flagname_to_bitmaps("hypervisor", &plus_features,
531
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
532
        &plus_kvm_features);
533

    
534
    featurestr = strtok(NULL, ",");
535

    
536
    while (featurestr) {
537
        char *val;
538
        if (featurestr[0] == '+') {
539
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features, &plus_kvm_features);
540
        } else if (featurestr[0] == '-') {
541
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features, &minus_kvm_features);
542
        } else if ((val = strchr(featurestr, '='))) {
543
            *val = 0; val++;
544
            if (!strcmp(featurestr, "family")) {
545
                char *err;
546
                numvalue = strtoul(val, &err, 0);
547
                if (!*val || *err) {
548
                    fprintf(stderr, "bad numerical value %s\n", val);
549
                    goto error;
550
                }
551
                x86_cpu_def->family = numvalue;
552
            } else if (!strcmp(featurestr, "model")) {
553
                char *err;
554
                numvalue = strtoul(val, &err, 0);
555
                if (!*val || *err || numvalue > 0xff) {
556
                    fprintf(stderr, "bad numerical value %s\n", val);
557
                    goto error;
558
                }
559
                x86_cpu_def->model = numvalue;
560
            } else if (!strcmp(featurestr, "stepping")) {
561
                char *err;
562
                numvalue = strtoul(val, &err, 0);
563
                if (!*val || *err || numvalue > 0xf) {
564
                    fprintf(stderr, "bad numerical value %s\n", val);
565
                    goto error;
566
                }
567
                x86_cpu_def->stepping = numvalue ;
568
            } else if (!strcmp(featurestr, "level")) {
569
                char *err;
570
                numvalue = strtoul(val, &err, 0);
571
                if (!*val || *err) {
572
                    fprintf(stderr, "bad numerical value %s\n", val);
573
                    goto error;
574
                }
575
                x86_cpu_def->level = numvalue;
576
            } else if (!strcmp(featurestr, "xlevel")) {
577
                char *err;
578
                numvalue = strtoul(val, &err, 0);
579
                if (!*val || *err) {
580
                    fprintf(stderr, "bad numerical value %s\n", val);
581
                    goto error;
582
                }
583
                if (numvalue < 0x80000000) {
584
                        numvalue += 0x80000000;
585
                }
586
                x86_cpu_def->xlevel = numvalue;
587
            } else if (!strcmp(featurestr, "vendor")) {
588
                if (strlen(val) != 12) {
589
                    fprintf(stderr, "vendor string must be 12 chars long\n");
590
                    goto error;
591
                }
592
                x86_cpu_def->vendor1 = 0;
593
                x86_cpu_def->vendor2 = 0;
594
                x86_cpu_def->vendor3 = 0;
595
                for(i = 0; i < 4; i++) {
596
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
597
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
598
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
599
                }
600
                x86_cpu_def->vendor_override = 1;
601
            } else if (!strcmp(featurestr, "model_id")) {
602
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
603
                        val);
604
            } else {
605
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
606
                goto error;
607
            }
608
        } else if (!strcmp(featurestr, "check")) {
609
            check_cpuid = 1;
610
        } else if (!strcmp(featurestr, "enforce")) {
611
            check_cpuid = enforce_cpuid = 1;
612
        } else {
613
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
614
            goto error;
615
        }
616
        featurestr = strtok(NULL, ",");
617
    }
618
    x86_cpu_def->features |= plus_features;
619
    x86_cpu_def->ext_features |= plus_ext_features;
620
    x86_cpu_def->ext2_features |= plus_ext2_features;
621
    x86_cpu_def->ext3_features |= plus_ext3_features;
622
    x86_cpu_def->kvm_features |= plus_kvm_features;
623
    x86_cpu_def->features &= ~minus_features;
624
    x86_cpu_def->ext_features &= ~minus_ext_features;
625
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
626
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
627
    x86_cpu_def->kvm_features &= ~minus_kvm_features;
628
    if (check_cpuid) {
629
        if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
630
            goto error;
631
    }
632
    free(s);
633
    return 0;
634

    
635
error:
636
    free(s);
637
    return -1;
638
}
639

    
640
/* generate a composite string into buf of all cpuid names in featureset
641
 * selected by fbits.  indicate truncation at bufsize in the event of overflow.
642
 * if flags, suppress names undefined in featureset.
643
 */
644
static void listflags(char *buf, int bufsize, uint32_t fbits,
645
    const char **featureset, uint32_t flags)
646
{
647
    const char **p = &featureset[31];
648
    char *q, *b, bit;
649
    int nc;
650

    
651
    b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
652
    *buf = '\0';
653
    for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
654
        if (fbits & 1 << bit && (*p || !flags)) {
655
            if (*p)
656
                nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
657
            else
658
                nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
659
            if (bufsize <= nc) {
660
                if (b)
661
                    sprintf(b, "...");
662
                return;
663
            }
664
            q += nc;
665
            bufsize -= nc;
666
        }
667
}
668

    
669
/* generate CPU information:
670
 * -?        list model names
671
 * -?model   list model names/IDs
672
 * -?dump    output all model (x86_def_t) data
673
 * -?cpuid   list all recognized cpuid flag names
674
 */ 
675
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
676
                  const char *optarg)
677
{
678
    unsigned char model = !strcmp("?model", optarg);
679
    unsigned char dump = !strcmp("?dump", optarg);
680
    unsigned char cpuid = !strcmp("?cpuid", optarg);
681
    x86_def_t *def;
682
    char buf[256];
683

    
684
    if (cpuid) {
685
        (*cpu_fprintf)(f, "Recognized CPUID flags:\n");
686
        listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1);
687
        (*cpu_fprintf)(f, "  f_edx: %s\n", buf);
688
        listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1);
689
        (*cpu_fprintf)(f, "  f_ecx: %s\n", buf);
690
        listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1);
691
        (*cpu_fprintf)(f, "  extf_edx: %s\n", buf);
692
        listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1);
693
        (*cpu_fprintf)(f, "  extf_ecx: %s\n", buf);
694
        return;
695
    }
696
    for (def = x86_defs; def; def = def->next) {
697
        snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name);
698
        if (model || dump) {
699
            (*cpu_fprintf)(f, "x86 %16s  %-48s\n", buf, def->model_id);
700
        } else {
701
            (*cpu_fprintf)(f, "x86 %16s\n", buf);
702
        }
703
        if (dump) {
704
            memcpy(buf, &def->vendor1, sizeof (def->vendor1));
705
            memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2));
706
            memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3));
707
            buf[12] = '\0';
708
            (*cpu_fprintf)(f,
709
                "  family %d model %d stepping %d level %d xlevel 0x%x"
710
                " vendor \"%s\"\n",
711
                def->family, def->model, def->stepping, def->level,
712
                def->xlevel, buf);
713
            listflags(buf, sizeof (buf), def->features, feature_name, 0);
714
            (*cpu_fprintf)(f, "  feature_edx %08x (%s)\n", def->features,
715
                buf);
716
            listflags(buf, sizeof (buf), def->ext_features, ext_feature_name,
717
                0);
718
            (*cpu_fprintf)(f, "  feature_ecx %08x (%s)\n", def->ext_features,
719
                buf);
720
            listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name,
721
                0);
722
            (*cpu_fprintf)(f, "  extfeature_edx %08x (%s)\n",
723
                def->ext2_features, buf);
724
            listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name,
725
                0);
726
            (*cpu_fprintf)(f, "  extfeature_ecx %08x (%s)\n",
727
                def->ext3_features, buf);
728
            (*cpu_fprintf)(f, "\n");
729
        }
730
    }
731
}
732

    
733
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
734
{
735
    x86_def_t def1, *def = &def1;
736

    
737
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
738
        return -1;
739
    if (def->vendor1) {
740
        env->cpuid_vendor1 = def->vendor1;
741
        env->cpuid_vendor2 = def->vendor2;
742
        env->cpuid_vendor3 = def->vendor3;
743
    } else {
744
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
745
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
746
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
747
    }
748
    env->cpuid_vendor_override = def->vendor_override;
749
    env->cpuid_level = def->level;
750
    if (def->family > 0x0f)
751
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
752
    else
753
        env->cpuid_version = def->family << 8;
754
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
755
    env->cpuid_version |= def->stepping;
756
    env->cpuid_features = def->features;
757
    env->pat = 0x0007040600070406ULL;
758
    env->cpuid_ext_features = def->ext_features;
759
    env->cpuid_ext2_features = def->ext2_features;
760
    env->cpuid_xlevel = def->xlevel;
761
    env->cpuid_kvm_features = def->kvm_features;
762
    {
763
        const char *model_id = def->model_id;
764
        int c, len, i;
765
        if (!model_id)
766
            model_id = "";
767
        len = strlen(model_id);
768
        for(i = 0; i < 48; i++) {
769
            if (i >= len)
770
                c = '\0';
771
            else
772
                c = (uint8_t)model_id[i];
773
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
774
        }
775
    }
776
    return 0;
777
}
778

    
779
#if !defined(CONFIG_LINUX_USER)
780
/* copy vendor id string to 32 bit register, nul pad as needed
781
 */
782
static void cpyid(const char *s, uint32_t *id)
783
{
784
    char *d = (char *)id;
785
    char i;
786

    
787
    for (i = sizeof (*id); i--; )
788
        *d++ = *s ? *s++ : '\0';
789
}
790

    
791
/* interpret radix and convert from string to arbitrary scalar,
792
 * otherwise flag failure
793
 */
794
#define setscalar(pval, str, perr)                      \
795
{                                                       \
796
    char *pend;                                         \
797
    unsigned long ul;                                   \
798
                                                        \
799
    ul = strtoul(str, &pend, 0);                        \
800
    *str && !*pend ? (*pval = ul) : (*perr = 1);        \
801
}
802

    
803
/* map cpuid options to feature bits, otherwise return failure
804
 * (option tags in *str are delimited by whitespace)
805
 */
806
static void setfeatures(uint32_t *pval, const char *str,
807
    const char **featureset, int *perr)
808
{
809
    const char *p, *q;
810

    
811
    for (q = p = str; *p || *q; q = p) {
812
        while (iswhite(*p))
813
            q = ++p; 
814
        while (*p && !iswhite(*p))
815
            ++p;
816
        if (!*q && !*p)
817
            return;
818
        if (!lookup_feature(pval, q, p, featureset)) {
819
            fprintf(stderr, "error: feature \"%.*s\" not available in set\n",
820
                (int)(p - q), q);
821
            *perr = 1;
822
            return;
823
        }
824
    }
825
}
826

    
827
/* map config file options to x86_def_t form
828
 */
829
static int cpudef_setfield(const char *name, const char *str, void *opaque)
830
{
831
    x86_def_t *def = opaque;
832
    int err = 0;
833

    
834
    if (!strcmp(name, "name")) {
835
        def->name = strdup(str);
836
    } else if (!strcmp(name, "model_id")) {
837
        strncpy(def->model_id, str, sizeof (def->model_id));
838
    } else if (!strcmp(name, "level")) {
839
        setscalar(&def->level, str, &err)
840
    } else if (!strcmp(name, "vendor")) {
841
        cpyid(&str[0], &def->vendor1);
842
        cpyid(&str[4], &def->vendor2);
843
        cpyid(&str[8], &def->vendor3);
844
    } else if (!strcmp(name, "family")) {
845
        setscalar(&def->family, str, &err)
846
    } else if (!strcmp(name, "model")) {
847
        setscalar(&def->model, str, &err)
848
    } else if (!strcmp(name, "stepping")) {
849
        setscalar(&def->stepping, str, &err)
850
    } else if (!strcmp(name, "feature_edx")) {
851
        setfeatures(&def->features, str, feature_name, &err);
852
    } else if (!strcmp(name, "feature_ecx")) {
853
        setfeatures(&def->ext_features, str, ext_feature_name, &err);
854
    } else if (!strcmp(name, "extfeature_edx")) {
855
        setfeatures(&def->ext2_features, str, ext2_feature_name, &err);
856
    } else if (!strcmp(name, "extfeature_ecx")) {
857
        setfeatures(&def->ext3_features, str, ext3_feature_name, &err);
858
    } else if (!strcmp(name, "xlevel")) {
859
        setscalar(&def->xlevel, str, &err)
860
    } else {
861
        fprintf(stderr, "error: unknown option [%s = %s]\n", name, str);
862
        return (1);
863
    }
864
    if (err) {
865
        fprintf(stderr, "error: bad option value [%s = %s]\n", name, str);
866
        return (1);
867
    }
868
    return (0);
869
}
870

    
871
/* register config file entry as x86_def_t
872
 */
873
static int cpudef_register(QemuOpts *opts, void *opaque)
874
{
875
    x86_def_t *def = qemu_mallocz(sizeof (x86_def_t));
876

    
877
    qemu_opt_foreach(opts, cpudef_setfield, def, 1);
878
    def->next = x86_defs;
879
    x86_defs = def;
880
    return (0);
881
}
882
#endif        /* !CONFIG_LINUX_USER */
883

    
884
/* register "cpudef" models defined in configuration file.  Here we first
885
 * preload any built-in definitions
886
 */
887
void x86_cpudef_setup(void)
888
{
889
    int i;
890

    
891
    for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
892
        builtin_x86_defs[i].next = x86_defs;
893
        builtin_x86_defs[i].flags = 1;
894
        x86_defs = &builtin_x86_defs[i];
895
    }
896
#if !defined(CONFIG_LINUX_USER)
897
    qemu_opts_foreach(&qemu_cpudef_opts, cpudef_register, NULL, 0);
898
#endif
899
}
900

    
901
/* NOTE: must be called outside the CPU execute loop */
902
void cpu_reset(CPUX86State *env)
903
{
904
    int i;
905

    
906
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
907
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
908
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
909
    }
910

    
911
    memset(env, 0, offsetof(CPUX86State, breakpoints));
912

    
913
    tlb_flush(env, 1);
914

    
915
    env->old_exception = -1;
916

    
917
    /* init to reset state */
918

    
919
#ifdef CONFIG_SOFTMMU
920
    env->hflags |= HF_SOFTMMU_MASK;
921
#endif
922
    env->hflags2 |= HF2_GIF_MASK;
923

    
924
    cpu_x86_update_cr0(env, 0x60000010);
925
    env->a20_mask = ~0x0;
926
    env->smbase = 0x30000;
927

    
928
    env->idt.limit = 0xffff;
929
    env->gdt.limit = 0xffff;
930
    env->ldt.limit = 0xffff;
931
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
932
    env->tr.limit = 0xffff;
933
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
934

    
935
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
936
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
937
                           DESC_R_MASK | DESC_A_MASK);
938
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
939
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
940
                           DESC_A_MASK);
941
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
942
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
943
                           DESC_A_MASK);
944
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
945
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
946
                           DESC_A_MASK);
947
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
948
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
949
                           DESC_A_MASK);
950
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
951
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
952
                           DESC_A_MASK);
953

    
954
    env->eip = 0xfff0;
955
    env->regs[R_EDX] = env->cpuid_version;
956

    
957
    env->eflags = 0x2;
958

    
959
    /* FPU init */
960
    for(i = 0;i < 8; i++)
961
        env->fptags[i] = 1;
962
    env->fpuc = 0x37f;
963

    
964
    env->mxcsr = 0x1f80;
965

    
966
    memset(env->dr, 0, sizeof(env->dr));
967
    env->dr[6] = DR6_FIXED_1;
968
    env->dr[7] = DR7_FIXED_1;
969
    cpu_breakpoint_remove_all(env, BP_CPU);
970
    cpu_watchpoint_remove_all(env, BP_CPU);
971

    
972
    env->mcg_status = 0;
973
}
974

    
975
void cpu_x86_close(CPUX86State *env)
976
{
977
    qemu_free(env);
978
}
979

    
980
/***********************************************************/
981
/* x86 debug */
982

    
983
static const char *cc_op_str[] = {
984
    "DYNAMIC",
985
    "EFLAGS",
986

    
987
    "MULB",
988
    "MULW",
989
    "MULL",
990
    "MULQ",
991

    
992
    "ADDB",
993
    "ADDW",
994
    "ADDL",
995
    "ADDQ",
996

    
997
    "ADCB",
998
    "ADCW",
999
    "ADCL",
1000
    "ADCQ",
1001

    
1002
    "SUBB",
1003
    "SUBW",
1004
    "SUBL",
1005
    "SUBQ",
1006

    
1007
    "SBBB",
1008
    "SBBW",
1009
    "SBBL",
1010
    "SBBQ",
1011

    
1012
    "LOGICB",
1013
    "LOGICW",
1014
    "LOGICL",
1015
    "LOGICQ",
1016

    
1017
    "INCB",
1018
    "INCW",
1019
    "INCL",
1020
    "INCQ",
1021

    
1022
    "DECB",
1023
    "DECW",
1024
    "DECL",
1025
    "DECQ",
1026

    
1027
    "SHLB",
1028
    "SHLW",
1029
    "SHLL",
1030
    "SHLQ",
1031

    
1032
    "SARB",
1033
    "SARW",
1034
    "SARL",
1035
    "SARQ",
1036
};
1037

    
1038
static void
1039
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
1040
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1041
                       const char *name, struct SegmentCache *sc)
1042
{
1043
#ifdef TARGET_X86_64
1044
    if (env->hflags & HF_CS64_MASK) {
1045
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
1046
                    sc->selector, sc->base, sc->limit, sc->flags);
1047
    } else
1048
#endif
1049
    {
1050
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
1051
                    (uint32_t)sc->base, sc->limit, sc->flags);
1052
    }
1053

    
1054
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
1055
        goto done;
1056

    
1057
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
1058
    if (sc->flags & DESC_S_MASK) {
1059
        if (sc->flags & DESC_CS_MASK) {
1060
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
1061
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
1062
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
1063
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
1064
        } else {
1065
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
1066
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
1067
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
1068
        }
1069
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
1070
    } else {
1071
        static const char *sys_type_name[2][16] = {
1072
            { /* 32 bit mode */
1073
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
1074
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
1075
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
1076
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
1077
            },
1078
            { /* 64 bit mode */
1079
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
1080
                "Reserved", "Reserved", "Reserved", "Reserved",
1081
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
1082
                "Reserved", "IntGate64", "TrapGate64"
1083
            }
1084
        };
1085
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
1086
                                    [(sc->flags & DESC_TYPE_MASK)
1087
                                     >> DESC_TYPE_SHIFT]);
1088
    }
1089
done:
1090
    cpu_fprintf(f, "\n");
1091
}
1092

    
1093
void cpu_dump_state(CPUState *env, FILE *f,
1094
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1095
                    int flags)
1096
{
1097
    int eflags, i, nb;
1098
    char cc_op_name[32];
1099
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
1100

    
1101
    cpu_synchronize_state(env);
1102

    
1103
    eflags = env->eflags;
1104
#ifdef TARGET_X86_64
1105
    if (env->hflags & HF_CS64_MASK) {
1106
        cpu_fprintf(f,
1107
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
1108
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
1109
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
1110
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
1111
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
1112
                    env->regs[R_EAX],
1113
                    env->regs[R_EBX],
1114
                    env->regs[R_ECX],
1115
                    env->regs[R_EDX],
1116
                    env->regs[R_ESI],
1117
                    env->regs[R_EDI],
1118
                    env->regs[R_EBP],
1119
                    env->regs[R_ESP],
1120
                    env->regs[8],
1121
                    env->regs[9],
1122
                    env->regs[10],
1123
                    env->regs[11],
1124
                    env->regs[12],
1125
                    env->regs[13],
1126
                    env->regs[14],
1127
                    env->regs[15],
1128
                    env->eip, eflags,
1129
                    eflags & DF_MASK ? 'D' : '-',
1130
                    eflags & CC_O ? 'O' : '-',
1131
                    eflags & CC_S ? 'S' : '-',
1132
                    eflags & CC_Z ? 'Z' : '-',
1133
                    eflags & CC_A ? 'A' : '-',
1134
                    eflags & CC_P ? 'P' : '-',
1135
                    eflags & CC_C ? 'C' : '-',
1136
                    env->hflags & HF_CPL_MASK,
1137
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
1138
                    (env->a20_mask >> 20) & 1,
1139
                    (env->hflags >> HF_SMM_SHIFT) & 1,
1140
                    env->halted);
1141
    } else
1142
#endif
1143
    {
1144
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
1145
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
1146
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
1147
                    (uint32_t)env->regs[R_EAX],
1148
                    (uint32_t)env->regs[R_EBX],
1149
                    (uint32_t)env->regs[R_ECX],
1150
                    (uint32_t)env->regs[R_EDX],
1151
                    (uint32_t)env->regs[R_ESI],
1152
                    (uint32_t)env->regs[R_EDI],
1153
                    (uint32_t)env->regs[R_EBP],
1154
                    (uint32_t)env->regs[R_ESP],
1155
                    (uint32_t)env->eip, eflags,
1156
                    eflags & DF_MASK ? 'D' : '-',
1157
                    eflags & CC_O ? 'O' : '-',
1158
                    eflags & CC_S ? 'S' : '-',
1159
                    eflags & CC_Z ? 'Z' : '-',
1160
                    eflags & CC_A ? 'A' : '-',
1161
                    eflags & CC_P ? 'P' : '-',
1162
                    eflags & CC_C ? 'C' : '-',
1163
                    env->hflags & HF_CPL_MASK,
1164
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
1165
                    (env->a20_mask >> 20) & 1,
1166
                    (env->hflags >> HF_SMM_SHIFT) & 1,
1167
                    env->halted);
1168
    }
1169

    
1170
    for(i = 0; i < 6; i++) {
1171
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
1172
                               &env->segs[i]);
1173
    }
1174
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
1175
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
1176

    
1177
#ifdef TARGET_X86_64
1178
    if (env->hflags & HF_LMA_MASK) {
1179
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
1180
                    env->gdt.base, env->gdt.limit);
1181
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
1182
                    env->idt.base, env->idt.limit);
1183
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
1184
                    (uint32_t)env->cr[0],
1185
                    env->cr[2],
1186
                    env->cr[3],
1187
                    (uint32_t)env->cr[4]);
1188
        for(i = 0; i < 4; i++)
1189
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
1190
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
1191
                    env->dr[6], env->dr[7]);
1192
    } else
1193
#endif
1194
    {
1195
        cpu_fprintf(f, "GDT=     %08x %08x\n",
1196
                    (uint32_t)env->gdt.base, env->gdt.limit);
1197
        cpu_fprintf(f, "IDT=     %08x %08x\n",
1198
                    (uint32_t)env->idt.base, env->idt.limit);
1199
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
1200
                    (uint32_t)env->cr[0],
1201
                    (uint32_t)env->cr[2],
1202
                    (uint32_t)env->cr[3],
1203
                    (uint32_t)env->cr[4]);
1204
        for(i = 0; i < 4; i++)
1205
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
1206
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
1207
    }
1208
    if (flags & X86_DUMP_CCOP) {
1209
        if ((unsigned)env->cc_op < CC_OP_NB)
1210
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
1211
        else
1212
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
1213
#ifdef TARGET_X86_64
1214
        if (env->hflags & HF_CS64_MASK) {
1215
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
1216
                        env->cc_src, env->cc_dst,
1217
                        cc_op_name);
1218
        } else
1219
#endif
1220
        {
1221
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
1222
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
1223
                        cc_op_name);
1224
        }
1225
    }
1226
    if (flags & X86_DUMP_FPU) {
1227
        int fptag;
1228
        fptag = 0;
1229
        for(i = 0; i < 8; i++) {
1230
            fptag |= ((!env->fptags[i]) << i);
1231
        }
1232
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
1233
                    env->fpuc,
1234
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
1235
                    env->fpstt,
1236
                    fptag,
1237
                    env->mxcsr);
1238
        for(i=0;i<8;i++) {
1239
#if defined(USE_X86LDOUBLE)
1240
            union {
1241
                long double d;
1242
                struct {
1243
                    uint64_t lower;
1244
                    uint16_t upper;
1245
                } l;
1246
            } tmp;
1247
            tmp.d = env->fpregs[i].d;
1248
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1249
                        i, tmp.l.lower, tmp.l.upper);
1250
#else
1251
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
1252
                        i, env->fpregs[i].mmx.q);
1253
#endif
1254
            if ((i & 1) == 1)
1255
                cpu_fprintf(f, "\n");
1256
            else
1257
                cpu_fprintf(f, " ");
1258
        }
1259
        if (env->hflags & HF_CS64_MASK)
1260
            nb = 16;
1261
        else
1262
            nb = 8;
1263
        for(i=0;i<nb;i++) {
1264
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
1265
                        i,
1266
                        env->xmm_regs[i].XMM_L(3),
1267
                        env->xmm_regs[i].XMM_L(2),
1268
                        env->xmm_regs[i].XMM_L(1),
1269
                        env->xmm_regs[i].XMM_L(0));
1270
            if ((i & 1) == 1)
1271
                cpu_fprintf(f, "\n");
1272
            else
1273
                cpu_fprintf(f, " ");
1274
        }
1275
    }
1276
}
1277

    
1278
/***********************************************************/
1279
/* x86 mmu */
1280
/* XXX: add PGE support */
1281

    
1282
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
1283
{
1284
    a20_state = (a20_state != 0);
1285
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
1286
#if defined(DEBUG_MMU)
1287
        printf("A20 update: a20=%d\n", a20_state);
1288
#endif
1289
        /* if the cpu is currently executing code, we must unlink it and
1290
           all the potentially executing TB */
1291
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
1292

    
1293
        /* when a20 is changed, all the MMU mappings are invalid, so
1294
           we must flush everything */
1295
        tlb_flush(env, 1);
1296
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
1297
    }
1298
}
1299

    
1300
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
1301
{
1302
    int pe_state;
1303

    
1304
#if defined(DEBUG_MMU)
1305
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
1306
#endif
1307
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
1308
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
1309
        tlb_flush(env, 1);
1310
    }
1311

    
1312
#ifdef TARGET_X86_64
1313
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
1314
        (env->efer & MSR_EFER_LME)) {
1315
        /* enter in long mode */
1316
        /* XXX: generate an exception */
1317
        if (!(env->cr[4] & CR4_PAE_MASK))
1318
            return;
1319
        env->efer |= MSR_EFER_LMA;
1320
        env->hflags |= HF_LMA_MASK;
1321
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
1322
               (env->efer & MSR_EFER_LMA)) {
1323
        /* exit long mode */
1324
        env->efer &= ~MSR_EFER_LMA;
1325
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1326
        env->eip &= 0xffffffff;
1327
    }
1328
#endif
1329
    env->cr[0] = new_cr0 | CR0_ET_MASK;
1330

    
1331
    /* update PE flag in hidden flags */
1332
    pe_state = (env->cr[0] & CR0_PE_MASK);
1333
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
1334
    /* ensure that ADDSEG is always set in real mode */
1335
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
1336
    /* update FPU flags */
1337
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
1338
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
1339
}
1340

    
1341
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
1342
   the PDPT */
1343
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
1344
{
1345
    env->cr[3] = new_cr3;
1346
    if (env->cr[0] & CR0_PG_MASK) {
1347
#if defined(DEBUG_MMU)
1348
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
1349
#endif
1350
        tlb_flush(env, 0);
1351
    }
1352
}
1353

    
1354
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1355
{
1356
#if defined(DEBUG_MMU)
1357
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1358
#endif
1359
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1360
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1361
        tlb_flush(env, 1);
1362
    }
1363
    /* SSE handling */
1364
    if (!(env->cpuid_features & CPUID_SSE))
1365
        new_cr4 &= ~CR4_OSFXSR_MASK;
1366
    if (new_cr4 & CR4_OSFXSR_MASK)
1367
        env->hflags |= HF_OSFXSR_MASK;
1368
    else
1369
        env->hflags &= ~HF_OSFXSR_MASK;
1370

    
1371
    env->cr[4] = new_cr4;
1372
}
1373

    
1374
#if defined(CONFIG_USER_ONLY)
1375

    
1376
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1377
                             int is_write, int mmu_idx, int is_softmmu)
1378
{
1379
    /* user mode only emulation */
1380
    is_write &= 1;
1381
    env->cr[2] = addr;
1382
    env->error_code = (is_write << PG_ERROR_W_BIT);
1383
    env->error_code |= PG_ERROR_U_MASK;
1384
    env->exception_index = EXCP0E_PAGE;
1385
    return 1;
1386
}
1387

    
1388
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1389
{
1390
    return addr;
1391
}
1392

    
1393
#else
1394

    
1395
/* XXX: This value should match the one returned by CPUID
1396
 * and in exec.c */
1397
# if defined(TARGET_X86_64)
1398
# define PHYS_ADDR_MASK 0xfffffff000LL
1399
# else
1400
# define PHYS_ADDR_MASK 0xffffff000LL
1401
# endif
1402

    
1403
/* return value:
1404
   -1 = cannot handle fault
1405
   0  = nothing more to do
1406
   1  = generate PF fault
1407
   2  = soft MMU activation required for this block
1408
*/
1409
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1410
                             int is_write1, int mmu_idx, int is_softmmu)
1411
{
1412
    uint64_t ptep, pte;
1413
    target_ulong pde_addr, pte_addr;
1414
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1415
    target_phys_addr_t paddr;
1416
    uint32_t page_offset;
1417
    target_ulong vaddr, virt_addr;
1418

    
1419
    is_user = mmu_idx == MMU_USER_IDX;
1420
#if defined(DEBUG_MMU)
1421
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1422
           addr, is_write1, is_user, env->eip);
1423
#endif
1424
    is_write = is_write1 & 1;
1425

    
1426
    if (!(env->cr[0] & CR0_PG_MASK)) {
1427
        pte = addr;
1428
        virt_addr = addr & TARGET_PAGE_MASK;
1429
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1430
        page_size = 4096;
1431
        goto do_mapping;
1432
    }
1433

    
1434
    if (env->cr[4] & CR4_PAE_MASK) {
1435
        uint64_t pde, pdpe;
1436
        target_ulong pdpe_addr;
1437

    
1438
#ifdef TARGET_X86_64
1439
        if (env->hflags & HF_LMA_MASK) {
1440
            uint64_t pml4e_addr, pml4e;
1441
            int32_t sext;
1442

    
1443
            /* test virtual address sign extension */
1444
            sext = (int64_t)addr >> 47;
1445
            if (sext != 0 && sext != -1) {
1446
                env->error_code = 0;
1447
                env->exception_index = EXCP0D_GPF;
1448
                return 1;
1449
            }
1450

    
1451
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1452
                env->a20_mask;
1453
            pml4e = ldq_phys(pml4e_addr);
1454
            if (!(pml4e & PG_PRESENT_MASK)) {
1455
                error_code = 0;
1456
                goto do_fault;
1457
            }
1458
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1459
                error_code = PG_ERROR_RSVD_MASK;
1460
                goto do_fault;
1461
            }
1462
            if (!(pml4e & PG_ACCESSED_MASK)) {
1463
                pml4e |= PG_ACCESSED_MASK;
1464
                stl_phys_notdirty(pml4e_addr, pml4e);
1465
            }
1466
            ptep = pml4e ^ PG_NX_MASK;
1467
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1468
                env->a20_mask;
1469
            pdpe = ldq_phys(pdpe_addr);
1470
            if (!(pdpe & PG_PRESENT_MASK)) {
1471
                error_code = 0;
1472
                goto do_fault;
1473
            }
1474
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1475
                error_code = PG_ERROR_RSVD_MASK;
1476
                goto do_fault;
1477
            }
1478
            ptep &= pdpe ^ PG_NX_MASK;
1479
            if (!(pdpe & PG_ACCESSED_MASK)) {
1480
                pdpe |= PG_ACCESSED_MASK;
1481
                stl_phys_notdirty(pdpe_addr, pdpe);
1482
            }
1483
        } else
1484
#endif
1485
        {
1486
            /* XXX: load them when cr3 is loaded ? */
1487
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1488
                env->a20_mask;
1489
            pdpe = ldq_phys(pdpe_addr);
1490
            if (!(pdpe & PG_PRESENT_MASK)) {
1491
                error_code = 0;
1492
                goto do_fault;
1493
            }
1494
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1495
        }
1496

    
1497
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1498
            env->a20_mask;
1499
        pde = ldq_phys(pde_addr);
1500
        if (!(pde & PG_PRESENT_MASK)) {
1501
            error_code = 0;
1502
            goto do_fault;
1503
        }
1504
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1505
            error_code = PG_ERROR_RSVD_MASK;
1506
            goto do_fault;
1507
        }
1508
        ptep &= pde ^ PG_NX_MASK;
1509
        if (pde & PG_PSE_MASK) {
1510
            /* 2 MB page */
1511
            page_size = 2048 * 1024;
1512
            ptep ^= PG_NX_MASK;
1513
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1514
                goto do_fault_protect;
1515
            if (is_user) {
1516
                if (!(ptep & PG_USER_MASK))
1517
                    goto do_fault_protect;
1518
                if (is_write && !(ptep & PG_RW_MASK))
1519
                    goto do_fault_protect;
1520
            } else {
1521
                if ((env->cr[0] & CR0_WP_MASK) &&
1522
                    is_write && !(ptep & PG_RW_MASK))
1523
                    goto do_fault_protect;
1524
            }
1525
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1526
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1527
                pde |= PG_ACCESSED_MASK;
1528
                if (is_dirty)
1529
                    pde |= PG_DIRTY_MASK;
1530
                stl_phys_notdirty(pde_addr, pde);
1531
            }
1532
            /* align to page_size */
1533
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1534
            virt_addr = addr & ~(page_size - 1);
1535
        } else {
1536
            /* 4 KB page */
1537
            if (!(pde & PG_ACCESSED_MASK)) {
1538
                pde |= PG_ACCESSED_MASK;
1539
                stl_phys_notdirty(pde_addr, pde);
1540
            }
1541
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1542
                env->a20_mask;
1543
            pte = ldq_phys(pte_addr);
1544
            if (!(pte & PG_PRESENT_MASK)) {
1545
                error_code = 0;
1546
                goto do_fault;
1547
            }
1548
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1549
                error_code = PG_ERROR_RSVD_MASK;
1550
                goto do_fault;
1551
            }
1552
            /* combine pde and pte nx, user and rw protections */
1553
            ptep &= pte ^ PG_NX_MASK;
1554
            ptep ^= PG_NX_MASK;
1555
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1556
                goto do_fault_protect;
1557
            if (is_user) {
1558
                if (!(ptep & PG_USER_MASK))
1559
                    goto do_fault_protect;
1560
                if (is_write && !(ptep & PG_RW_MASK))
1561
                    goto do_fault_protect;
1562
            } else {
1563
                if ((env->cr[0] & CR0_WP_MASK) &&
1564
                    is_write && !(ptep & PG_RW_MASK))
1565
                    goto do_fault_protect;
1566
            }
1567
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1568
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1569
                pte |= PG_ACCESSED_MASK;
1570
                if (is_dirty)
1571
                    pte |= PG_DIRTY_MASK;
1572
                stl_phys_notdirty(pte_addr, pte);
1573
            }
1574
            page_size = 4096;
1575
            virt_addr = addr & ~0xfff;
1576
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1577
        }
1578
    } else {
1579
        uint32_t pde;
1580

    
1581
        /* page directory entry */
1582
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1583
            env->a20_mask;
1584
        pde = ldl_phys(pde_addr);
1585
        if (!(pde & PG_PRESENT_MASK)) {
1586
            error_code = 0;
1587
            goto do_fault;
1588
        }
1589
        /* if PSE bit is set, then we use a 4MB page */
1590
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1591
            page_size = 4096 * 1024;
1592
            if (is_user) {
1593
                if (!(pde & PG_USER_MASK))
1594
                    goto do_fault_protect;
1595
                if (is_write && !(pde & PG_RW_MASK))
1596
                    goto do_fault_protect;
1597
            } else {
1598
                if ((env->cr[0] & CR0_WP_MASK) &&
1599
                    is_write && !(pde & PG_RW_MASK))
1600
                    goto do_fault_protect;
1601
            }
1602
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1603
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1604
                pde |= PG_ACCESSED_MASK;
1605
                if (is_dirty)
1606
                    pde |= PG_DIRTY_MASK;
1607
                stl_phys_notdirty(pde_addr, pde);
1608
            }
1609

    
1610
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1611
            ptep = pte;
1612
            virt_addr = addr & ~(page_size - 1);
1613
        } else {
1614
            if (!(pde & PG_ACCESSED_MASK)) {
1615
                pde |= PG_ACCESSED_MASK;
1616
                stl_phys_notdirty(pde_addr, pde);
1617
            }
1618

    
1619
            /* page directory entry */
1620
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1621
                env->a20_mask;
1622
            pte = ldl_phys(pte_addr);
1623
            if (!(pte & PG_PRESENT_MASK)) {
1624
                error_code = 0;
1625
                goto do_fault;
1626
            }
1627
            /* combine pde and pte user and rw protections */
1628
            ptep = pte & pde;
1629
            if (is_user) {
1630
                if (!(ptep & PG_USER_MASK))
1631
                    goto do_fault_protect;
1632
                if (is_write && !(ptep & PG_RW_MASK))
1633
                    goto do_fault_protect;
1634
            } else {
1635
                if ((env->cr[0] & CR0_WP_MASK) &&
1636
                    is_write && !(ptep & PG_RW_MASK))
1637
                    goto do_fault_protect;
1638
            }
1639
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1640
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1641
                pte |= PG_ACCESSED_MASK;
1642
                if (is_dirty)
1643
                    pte |= PG_DIRTY_MASK;
1644
                stl_phys_notdirty(pte_addr, pte);
1645
            }
1646
            page_size = 4096;
1647
            virt_addr = addr & ~0xfff;
1648
        }
1649
    }
1650
    /* the page can be put in the TLB */
1651
    prot = PAGE_READ;
1652
    if (!(ptep & PG_NX_MASK))
1653
        prot |= PAGE_EXEC;
1654
    if (pte & PG_DIRTY_MASK) {
1655
        /* only set write access if already dirty... otherwise wait
1656
           for dirty access */
1657
        if (is_user) {
1658
            if (ptep & PG_RW_MASK)
1659
                prot |= PAGE_WRITE;
1660
        } else {
1661
            if (!(env->cr[0] & CR0_WP_MASK) ||
1662
                (ptep & PG_RW_MASK))
1663
                prot |= PAGE_WRITE;
1664
        }
1665
    }
1666
 do_mapping:
1667
    pte = pte & env->a20_mask;
1668

    
1669
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1670
       avoid filling it too fast */
1671
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1672
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1673
    vaddr = virt_addr + page_offset;
1674

    
1675
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1676
    return ret;
1677
 do_fault_protect:
1678
    error_code = PG_ERROR_P_MASK;
1679
 do_fault:
1680
    error_code |= (is_write << PG_ERROR_W_BIT);
1681
    if (is_user)
1682
        error_code |= PG_ERROR_U_MASK;
1683
    if (is_write1 == 2 &&
1684
        (env->efer & MSR_EFER_NXE) &&
1685
        (env->cr[4] & CR4_PAE_MASK))
1686
        error_code |= PG_ERROR_I_D_MASK;
1687
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1688
        /* cr2 is not modified in case of exceptions */
1689
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1690
                 addr);
1691
    } else {
1692
        env->cr[2] = addr;
1693
    }
1694
    env->error_code = error_code;
1695
    env->exception_index = EXCP0E_PAGE;
1696
    return 1;
1697
}
1698

    
1699
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1700
{
1701
    target_ulong pde_addr, pte_addr;
1702
    uint64_t pte;
1703
    target_phys_addr_t paddr;
1704
    uint32_t page_offset;
1705
    int page_size;
1706

    
1707
    if (env->cr[4] & CR4_PAE_MASK) {
1708
        target_ulong pdpe_addr;
1709
        uint64_t pde, pdpe;
1710

    
1711
#ifdef TARGET_X86_64
1712
        if (env->hflags & HF_LMA_MASK) {
1713
            uint64_t pml4e_addr, pml4e;
1714
            int32_t sext;
1715

    
1716
            /* test virtual address sign extension */
1717
            sext = (int64_t)addr >> 47;
1718
            if (sext != 0 && sext != -1)
1719
                return -1;
1720

    
1721
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1722
                env->a20_mask;
1723
            pml4e = ldq_phys(pml4e_addr);
1724
            if (!(pml4e & PG_PRESENT_MASK))
1725
                return -1;
1726

    
1727
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1728
                env->a20_mask;
1729
            pdpe = ldq_phys(pdpe_addr);
1730
            if (!(pdpe & PG_PRESENT_MASK))
1731
                return -1;
1732
        } else
1733
#endif
1734
        {
1735
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1736
                env->a20_mask;
1737
            pdpe = ldq_phys(pdpe_addr);
1738
            if (!(pdpe & PG_PRESENT_MASK))
1739
                return -1;
1740
        }
1741

    
1742
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1743
            env->a20_mask;
1744
        pde = ldq_phys(pde_addr);
1745
        if (!(pde & PG_PRESENT_MASK)) {
1746
            return -1;
1747
        }
1748
        if (pde & PG_PSE_MASK) {
1749
            /* 2 MB page */
1750
            page_size = 2048 * 1024;
1751
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1752
        } else {
1753
            /* 4 KB page */
1754
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1755
                env->a20_mask;
1756
            page_size = 4096;
1757
            pte = ldq_phys(pte_addr);
1758
        }
1759
        if (!(pte & PG_PRESENT_MASK))
1760
            return -1;
1761
    } else {
1762
        uint32_t pde;
1763

    
1764
        if (!(env->cr[0] & CR0_PG_MASK)) {
1765
            pte = addr;
1766
            page_size = 4096;
1767
        } else {
1768
            /* page directory entry */
1769
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1770
            pde = ldl_phys(pde_addr);
1771
            if (!(pde & PG_PRESENT_MASK))
1772
                return -1;
1773
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1774
                pte = pde & ~0x003ff000; /* align to 4MB */
1775
                page_size = 4096 * 1024;
1776
            } else {
1777
                /* page directory entry */
1778
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1779
                pte = ldl_phys(pte_addr);
1780
                if (!(pte & PG_PRESENT_MASK))
1781
                    return -1;
1782
                page_size = 4096;
1783
            }
1784
        }
1785
        pte = pte & env->a20_mask;
1786
    }
1787

    
1788
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1789
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1790
    return paddr;
1791
}
1792

    
1793
void hw_breakpoint_insert(CPUState *env, int index)
1794
{
1795
    int type, err = 0;
1796

    
1797
    switch (hw_breakpoint_type(env->dr[7], index)) {
1798
    case 0:
1799
        if (hw_breakpoint_enabled(env->dr[7], index))
1800
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1801
                                        &env->cpu_breakpoint[index]);
1802
        break;
1803
    case 1:
1804
        type = BP_CPU | BP_MEM_WRITE;
1805
        goto insert_wp;
1806
    case 2:
1807
         /* No support for I/O watchpoints yet */
1808
        break;
1809
    case 3:
1810
        type = BP_CPU | BP_MEM_ACCESS;
1811
    insert_wp:
1812
        err = cpu_watchpoint_insert(env, env->dr[index],
1813
                                    hw_breakpoint_len(env->dr[7], index),
1814
                                    type, &env->cpu_watchpoint[index]);
1815
        break;
1816
    }
1817
    if (err)
1818
        env->cpu_breakpoint[index] = NULL;
1819
}
1820

    
1821
void hw_breakpoint_remove(CPUState *env, int index)
1822
{
1823
    if (!env->cpu_breakpoint[index])
1824
        return;
1825
    switch (hw_breakpoint_type(env->dr[7], index)) {
1826
    case 0:
1827
        if (hw_breakpoint_enabled(env->dr[7], index))
1828
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1829
        break;
1830
    case 1:
1831
    case 3:
1832
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1833
        break;
1834
    case 2:
1835
        /* No support for I/O watchpoints yet */
1836
        break;
1837
    }
1838
}
1839

    
1840
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1841
{
1842
    target_ulong dr6;
1843
    int reg, type;
1844
    int hit_enabled = 0;
1845

    
1846
    dr6 = env->dr[6] & ~0xf;
1847
    for (reg = 0; reg < 4; reg++) {
1848
        type = hw_breakpoint_type(env->dr[7], reg);
1849
        if ((type == 0 && env->dr[reg] == env->eip) ||
1850
            ((type & 1) && env->cpu_watchpoint[reg] &&
1851
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1852
            dr6 |= 1 << reg;
1853
            if (hw_breakpoint_enabled(env->dr[7], reg))
1854
                hit_enabled = 1;
1855
        }
1856
    }
1857
    if (hit_enabled || force_dr6_update)
1858
        env->dr[6] = dr6;
1859
    return hit_enabled;
1860
}
1861

    
1862
static CPUDebugExcpHandler *prev_debug_excp_handler;
1863

    
1864
void raise_exception(int exception_index);
1865

    
1866
static void breakpoint_handler(CPUState *env)
1867
{
1868
    CPUBreakpoint *bp;
1869

    
1870
    if (env->watchpoint_hit) {
1871
        if (env->watchpoint_hit->flags & BP_CPU) {
1872
            env->watchpoint_hit = NULL;
1873
            if (check_hw_breakpoints(env, 0))
1874
                raise_exception(EXCP01_DB);
1875
            else
1876
                cpu_resume_from_signal(env, NULL);
1877
        }
1878
    } else {
1879
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1880
            if (bp->pc == env->eip) {
1881
                if (bp->flags & BP_CPU) {
1882
                    check_hw_breakpoints(env, 1);
1883
                    raise_exception(EXCP01_DB);
1884
                }
1885
                break;
1886
            }
1887
    }
1888
    if (prev_debug_excp_handler)
1889
        prev_debug_excp_handler(env);
1890
}
1891

    
1892
/* This should come from sysemu.h - if we could include it here... */
1893
void qemu_system_reset_request(void);
1894

    
1895
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1896
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1897
{
1898
    uint64_t mcg_cap = cenv->mcg_cap;
1899
    unsigned bank_num = mcg_cap & 0xff;
1900
    uint64_t *banks = cenv->mce_banks;
1901

    
1902
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1903
        return;
1904

    
1905
    /*
1906
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1907
     * reporting is disabled
1908
     */
1909
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1910
        cenv->mcg_ctl != ~(uint64_t)0)
1911
        return;
1912
    banks += 4 * bank;
1913
    /*
1914
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1915
     * reporting is disabled for the bank
1916
     */
1917
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1918
        return;
1919
    if (status & MCI_STATUS_UC) {
1920
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1921
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1922
            fprintf(stderr, "injects mce exception while previous "
1923
                    "one is in progress!\n");
1924
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1925
            qemu_system_reset_request();
1926
            return;
1927
        }
1928
        if (banks[1] & MCI_STATUS_VAL)
1929
            status |= MCI_STATUS_OVER;
1930
        banks[2] = addr;
1931
        banks[3] = misc;
1932
        cenv->mcg_status = mcg_status;
1933
        banks[1] = status;
1934
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1935
    } else if (!(banks[1] & MCI_STATUS_VAL)
1936
               || !(banks[1] & MCI_STATUS_UC)) {
1937
        if (banks[1] & MCI_STATUS_VAL)
1938
            status |= MCI_STATUS_OVER;
1939
        banks[2] = addr;
1940
        banks[3] = misc;
1941
        banks[1] = status;
1942
    } else
1943
        banks[1] |= MCI_STATUS_OVER;
1944
}
1945
#endif /* !CONFIG_USER_ONLY */
1946

    
1947
static void mce_init(CPUX86State *cenv)
1948
{
1949
    unsigned int bank, bank_num;
1950

    
1951
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1952
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1953
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1954
        cenv->mcg_ctl = ~(uint64_t)0;
1955
        bank_num = MCE_BANKS_DEF;
1956
        for (bank = 0; bank < bank_num; bank++)
1957
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1958
    }
1959
}
1960

    
1961
static void host_cpuid(uint32_t function, uint32_t count,
1962
                       uint32_t *eax, uint32_t *ebx,
1963
                       uint32_t *ecx, uint32_t *edx)
1964
{
1965
#if defined(CONFIG_KVM)
1966
    uint32_t vec[4];
1967

    
1968
#ifdef __x86_64__
1969
    asm volatile("cpuid"
1970
                 : "=a"(vec[0]), "=b"(vec[1]),
1971
                   "=c"(vec[2]), "=d"(vec[3])
1972
                 : "0"(function), "c"(count) : "cc");
1973
#else
1974
    asm volatile("pusha \n\t"
1975
                 "cpuid \n\t"
1976
                 "mov %%eax, 0(%2) \n\t"
1977
                 "mov %%ebx, 4(%2) \n\t"
1978
                 "mov %%ecx, 8(%2) \n\t"
1979
                 "mov %%edx, 12(%2) \n\t"
1980
                 "popa"
1981
                 : : "a"(function), "c"(count), "S"(vec)
1982
                 : "memory", "cc");
1983
#endif
1984

    
1985
    if (eax)
1986
        *eax = vec[0];
1987
    if (ebx)
1988
        *ebx = vec[1];
1989
    if (ecx)
1990
        *ecx = vec[2];
1991
    if (edx)
1992
        *edx = vec[3];
1993
#endif
1994
}
1995

    
1996
static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1997
                             uint32_t *ecx, uint32_t *edx)
1998
{
1999
    *ebx = env->cpuid_vendor1;
2000
    *edx = env->cpuid_vendor2;
2001
    *ecx = env->cpuid_vendor3;
2002

    
2003
    /* sysenter isn't supported on compatibility mode on AMD, syscall
2004
     * isn't supported in compatibility mode on Intel.
2005
     * Normally we advertise the actual cpu vendor, but you can override
2006
     * this if you want to use KVM's sysenter/syscall emulation
2007
     * in compatibility mode and when doing cross vendor migration
2008
     */
2009
    if (kvm_enabled() && env->cpuid_vendor_override) {
2010
        host_cpuid(0, 0, NULL, ebx, ecx, edx);
2011
    }
2012
}
2013

    
2014
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2015
                   uint32_t *eax, uint32_t *ebx,
2016
                   uint32_t *ecx, uint32_t *edx)
2017
{
2018
    /* test if maximum index reached */
2019
    if (index & 0x80000000) {
2020
        if (index > env->cpuid_xlevel)
2021
            index = env->cpuid_level;
2022
    } else {
2023
        if (index > env->cpuid_level)
2024
            index = env->cpuid_level;
2025
    }
2026

    
2027
    switch(index) {
2028
    case 0:
2029
        *eax = env->cpuid_level;
2030
        get_cpuid_vendor(env, ebx, ecx, edx);
2031
        break;
2032
    case 1:
2033
        *eax = env->cpuid_version;
2034
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2035
        *ecx = env->cpuid_ext_features;
2036
        *edx = env->cpuid_features;
2037
        if (env->nr_cores * env->nr_threads > 1) {
2038
            *ebx |= (env->nr_cores * env->nr_threads) << 16;
2039
            *edx |= 1 << 28;    /* HTT bit */
2040
        }
2041
        break;
2042
    case 2:
2043
        /* cache info: needed for Pentium Pro compatibility */
2044
        *eax = 1;
2045
        *ebx = 0;
2046
        *ecx = 0;
2047
        *edx = 0x2c307d;
2048
        break;
2049
    case 4:
2050
        /* cache info: needed for Core compatibility */
2051
        if (env->nr_cores > 1) {
2052
                *eax = (env->nr_cores - 1) << 26;
2053
        } else {
2054
                *eax = 0;
2055
        }
2056
        switch (count) {
2057
            case 0: /* L1 dcache info */
2058
                *eax |= 0x0000121;
2059
                *ebx = 0x1c0003f;
2060
                *ecx = 0x000003f;
2061
                *edx = 0x0000001;
2062
                break;
2063
            case 1: /* L1 icache info */
2064
                *eax |= 0x0000122;
2065
                *ebx = 0x1c0003f;
2066
                *ecx = 0x000003f;
2067
                *edx = 0x0000001;
2068
                break;
2069
            case 2: /* L2 cache info */
2070
                *eax |= 0x0000143;
2071
                if (env->nr_threads > 1) {
2072
                    *eax |= (env->nr_threads - 1) << 14;
2073
                }
2074
                *ebx = 0x3c0003f;
2075
                *ecx = 0x0000fff;
2076
                *edx = 0x0000001;
2077
                break;
2078
            default: /* end of info */
2079
                *eax = 0;
2080
                *ebx = 0;
2081
                *ecx = 0;
2082
                *edx = 0;
2083
                break;
2084
        }
2085
        break;
2086
    case 5:
2087
        /* mwait info: needed for Core compatibility */
2088
        *eax = 0; /* Smallest monitor-line size in bytes */
2089
        *ebx = 0; /* Largest monitor-line size in bytes */
2090
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2091
        *edx = 0;
2092
        break;
2093
    case 6:
2094
        /* Thermal and Power Leaf */
2095
        *eax = 0;
2096
        *ebx = 0;
2097
        *ecx = 0;
2098
        *edx = 0;
2099
        break;
2100
    case 9:
2101
        /* Direct Cache Access Information Leaf */
2102
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2103
        *ebx = 0;
2104
        *ecx = 0;
2105
        *edx = 0;
2106
        break;
2107
    case 0xA:
2108
        /* Architectural Performance Monitoring Leaf */
2109
        *eax = 0;
2110
        *ebx = 0;
2111
        *ecx = 0;
2112
        *edx = 0;
2113
        break;
2114
    case 0x80000000:
2115
        *eax = env->cpuid_xlevel;
2116
        *ebx = env->cpuid_vendor1;
2117
        *edx = env->cpuid_vendor2;
2118
        *ecx = env->cpuid_vendor3;
2119
        break;
2120
    case 0x80000001:
2121
        *eax = env->cpuid_version;
2122
        *ebx = 0;
2123
        *ecx = env->cpuid_ext3_features;
2124
        *edx = env->cpuid_ext2_features;
2125

    
2126
        /* The Linux kernel checks for the CMPLegacy bit and
2127
         * discards multiple thread information if it is set.
2128
         * So dont set it here for Intel to make Linux guests happy.
2129
         */
2130
        if (env->nr_cores * env->nr_threads > 1) {
2131
            uint32_t tebx, tecx, tedx;
2132
            get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2133
            if (tebx != CPUID_VENDOR_INTEL_1 ||
2134
                tedx != CPUID_VENDOR_INTEL_2 ||
2135
                tecx != CPUID_VENDOR_INTEL_3) {
2136
                *ecx |= 1 << 1;    /* CmpLegacy bit */
2137
            }
2138
        }
2139

    
2140
        if (kvm_enabled()) {
2141
            /* Nested SVM not yet supported in upstream QEMU */
2142
            *ecx &= ~CPUID_EXT3_SVM;
2143
        }
2144
        break;
2145
    case 0x80000002:
2146
    case 0x80000003:
2147
    case 0x80000004:
2148
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2149
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2150
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2151
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2152
        break;
2153
    case 0x80000005:
2154
        /* cache info (L1 cache) */
2155
        *eax = 0x01ff01ff;
2156
        *ebx = 0x01ff01ff;
2157
        *ecx = 0x40020140;
2158
        *edx = 0x40020140;
2159
        break;
2160
    case 0x80000006:
2161
        /* cache info (L2 cache) */
2162
        *eax = 0;
2163
        *ebx = 0x42004200;
2164
        *ecx = 0x02008140;
2165
        *edx = 0;
2166
        break;
2167
    case 0x80000008:
2168
        /* virtual & phys address size in low 2 bytes. */
2169
/* XXX: This value must match the one used in the MMU code. */ 
2170
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2171
            /* 64 bit processor */
2172
/* XXX: The physical address space is limited to 42 bits in exec.c. */
2173
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
2174
        } else {
2175
            if (env->cpuid_features & CPUID_PSE36)
2176
                *eax = 0x00000024; /* 36 bits physical */
2177
            else
2178
                *eax = 0x00000020; /* 32 bits physical */
2179
        }
2180
        *ebx = 0;
2181
        *ecx = 0;
2182
        *edx = 0;
2183
        if (env->nr_cores * env->nr_threads > 1) {
2184
            *ecx |= (env->nr_cores * env->nr_threads) - 1;
2185
        }
2186
        break;
2187
    case 0x8000000A:
2188
        *eax = 0x00000001; /* SVM Revision */
2189
        *ebx = 0x00000010; /* nr of ASIDs */
2190
        *ecx = 0;
2191
        *edx = 0; /* optional features */
2192
        break;
2193
    default:
2194
        /* reserved values: zero */
2195
        *eax = 0;
2196
        *ebx = 0;
2197
        *ecx = 0;
2198
        *edx = 0;
2199
        break;
2200
    }
2201
}
2202

    
2203

    
2204
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
2205
                            target_ulong *base, unsigned int *limit,
2206
                            unsigned int *flags)
2207
{
2208
    SegmentCache *dt;
2209
    target_ulong ptr;
2210
    uint32_t e1, e2;
2211
    int index;
2212

    
2213
    if (selector & 0x4)
2214
        dt = &env->ldt;
2215
    else
2216
        dt = &env->gdt;
2217
    index = selector & ~7;
2218
    ptr = dt->base + index;
2219
    if ((index + 7) > dt->limit
2220
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
2221
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
2222
        return 0;
2223

    
2224
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
2225
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
2226
    if (e2 & DESC_G_MASK)
2227
        *limit = (*limit << 12) | 0xfff;
2228
    *flags = e2;
2229

    
2230
    return 1;
2231
}
2232

    
2233
CPUX86State *cpu_x86_init(const char *cpu_model)
2234
{
2235
    CPUX86State *env;
2236
    static int inited;
2237

    
2238
    env = qemu_mallocz(sizeof(CPUX86State));
2239
    cpu_exec_init(env);
2240
    env->cpu_model_str = cpu_model;
2241

    
2242
    /* init various static tables */
2243
    if (!inited) {
2244
        inited = 1;
2245
        optimize_flags_init();
2246
#ifndef CONFIG_USER_ONLY
2247
        prev_debug_excp_handler =
2248
            cpu_set_debug_excp_handler(breakpoint_handler);
2249
#endif
2250
    }
2251
    if (cpu_x86_register(env, cpu_model) < 0) {
2252
        cpu_x86_close(env);
2253
        return NULL;
2254
    }
2255
    mce_init(env);
2256

    
2257
    qemu_init_vcpu(env);
2258

    
2259
    return env;
2260
}
2261

    
2262
#if !defined(CONFIG_USER_ONLY)
2263
void do_cpu_init(CPUState *env)
2264
{
2265
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
2266
    cpu_reset(env);
2267
    env->interrupt_request = sipi;
2268
    apic_init_reset(env);
2269
}
2270

    
2271
void do_cpu_sipi(CPUState *env)
2272
{
2273
    apic_sipi(env);
2274
}
2275
#else
2276
void do_cpu_init(CPUState *env)
2277
{
2278
}
2279
void do_cpu_sipi(CPUState *env)
2280
{
2281
}
2282
#endif