Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 63a54736

History | View | Annotate | Download (74.7 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30

    
31
//#define DEBUG_MMU
32
#include "qemu-option.h"
33
#include "qemu-config.h"
34

    
35
/* feature flags taken from "Intel Processor Identification and the CPUID
36
 * Instruction" and AMD's "CPUID Specification".  In cases of disagreement
37
 * between feature naming conventions, aliases may be added.
38
 */
39
static const char *feature_name[] = {
40
    "fpu", "vme", "de", "pse",
41
    "tsc", "msr", "pae", "mce",
42
    "cx8", "apic", NULL, "sep",
43
    "mtrr", "pge", "mca", "cmov",
44
    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
45
    NULL, "ds" /* Intel dts */, "acpi", "mmx",
46
    "fxsr", "sse", "sse2", "ss",
47
    "ht" /* Intel htt */, "tm", "ia64", "pbe",
48
};
49
static const char *ext_feature_name[] = {
50
    "pni|sse3" /* Intel,AMD sse3 */, NULL, NULL, "monitor",
51
    "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52
    "tm2", "ssse3", "cid", NULL,
53
    NULL, "cx16", "xtpr", NULL,
54
    NULL, NULL, "dca", "sse4.1|sse4_1",
55
    "sse4.2|sse4_2", "x2apic", NULL, "popcnt",
56
    NULL, NULL, NULL, NULL,
57
    NULL, NULL, NULL, "hypervisor",
58
};
59
static const char *ext2_feature_name[] = {
60
    "fpu", "vme", "de", "pse",
61
    "tsc", "msr", "pae", "mce",
62
    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall",
63
    "mtrr", "pge", "mca", "cmov",
64
    "pat", "pse36", NULL, NULL /* Linux mp */,
65
    "nx" /* Intel xd */, NULL, "mmxext", "mmx",
66
    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp",
67
    NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
68
};
69
static const char *ext3_feature_name[] = {
70
    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
71
    "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
72
    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL,
73
    "skinit", "wdt", NULL, NULL,
74
    NULL, NULL, NULL, NULL,
75
    NULL, NULL, NULL, NULL,
76
    NULL, NULL, NULL, NULL,
77
    NULL, NULL, NULL, NULL,
78
};
79

    
80
static const char *kvm_feature_name[] = {
81
    "kvmclock", "kvm_nopiodelay", "kvm_mmu", NULL, NULL, NULL, NULL, NULL,
82
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
83
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
84
    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
85
};
86

    
87
/* collects per-function cpuid data
88
 */
89
typedef struct model_features_t {
90
    uint32_t *guest_feat;
91
    uint32_t *host_feat;
92
    uint32_t check_feat;
93
    const char **flag_names;
94
    uint32_t cpuid;
95
    } model_features_t;
96

    
97
int check_cpuid = 0;
98
int enforce_cpuid = 0;
99

    
100
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
101
                       uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
102

    
103
#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
104

    
105
/* general substring compare of *[s1..e1) and *[s2..e2).  sx is start of
106
 * a substring.  ex if !NULL points to the first char after a substring,
107
 * otherwise the string is assumed to sized by a terminating nul.
108
 * Return lexical ordering of *s1:*s2.
109
 */
110
static int sstrcmp(const char *s1, const char *e1, const char *s2,
111
    const char *e2)
112
{
113
    for (;;) {
114
        if (!*s1 || !*s2 || *s1 != *s2)
115
            return (*s1 - *s2);
116
        ++s1, ++s2;
117
        if (s1 == e1 && s2 == e2)
118
            return (0);
119
        else if (s1 == e1)
120
            return (*s2);
121
        else if (s2 == e2)
122
            return (*s1);
123
    }
124
}
125

    
126
/* compare *[s..e) to *altstr.  *altstr may be a simple string or multiple
127
 * '|' delimited (possibly empty) strings in which case search for a match
128
 * within the alternatives proceeds left to right.  Return 0 for success,
129
 * non-zero otherwise.
130
 */
131
static int altcmp(const char *s, const char *e, const char *altstr)
132
{
133
    const char *p, *q;
134

    
135
    for (q = p = altstr; ; ) {
136
        while (*p && *p != '|')
137
            ++p;
138
        if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
139
            return (0);
140
        if (!*p)
141
            return (1);
142
        else
143
            q = ++p;
144
    }
145
}
146

    
147
/* search featureset for flag *[s..e), if found set corresponding bit in
148
 * *pval and return success, otherwise return zero
149
 */
150
static int lookup_feature(uint32_t *pval, const char *s, const char *e,
151
    const char **featureset)
152
{
153
    uint32_t mask;
154
    const char **ppc;
155

    
156
    for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc)
157
        if (*ppc && !altcmp(s, e, *ppc)) {
158
            *pval |= mask;
159
            break;
160
        }
161
    return (mask ? 1 : 0);
162
}
163

    
164
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
165
                                    uint32_t *ext_features,
166
                                    uint32_t *ext2_features,
167
                                    uint32_t *ext3_features,
168
                                    uint32_t *kvm_features)
169
{
170
    if (!lookup_feature(features, flagname, NULL, feature_name) &&
171
        !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
172
        !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
173
        !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
174
        !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name))
175
            fprintf(stderr, "CPU feature %s not found\n", flagname);
176
}
177

    
178
typedef struct x86_def_t {
179
    struct x86_def_t *next;
180
    const char *name;
181
    uint32_t level;
182
    uint32_t vendor1, vendor2, vendor3;
183
    int family;
184
    int model;
185
    int stepping;
186
    uint32_t features, ext_features, ext2_features, ext3_features, kvm_features;
187
    uint32_t xlevel;
188
    char model_id[48];
189
    int vendor_override;
190
    uint32_t flags;
191
} x86_def_t;
192

    
193
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
194
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
195
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
196
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
197
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
198
          CPUID_PSE36 | CPUID_FXSR)
199
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
200
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
201
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
202
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
203
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
204

    
205
/* maintains list of cpu model definitions
206
 */
207
static x86_def_t *x86_defs = {NULL};
208

    
209
/* built-in cpu model definitions (deprecated)
210
 */
211
static x86_def_t builtin_x86_defs[] = {
212
#ifdef TARGET_X86_64
213
    {
214
        .name = "qemu64",
215
        .level = 4,
216
        .vendor1 = CPUID_VENDOR_AMD_1,
217
        .vendor2 = CPUID_VENDOR_AMD_2,
218
        .vendor3 = CPUID_VENDOR_AMD_3,
219
        .family = 6,
220
        .model = 2,
221
        .stepping = 3,
222
        .features = PPRO_FEATURES | 
223
        /* these features are needed for Win64 and aren't fully implemented */
224
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
225
        /* this feature is needed for Solaris and isn't fully implemented */
226
            CPUID_PSE36,
227
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
228
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
229
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
230
        .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
231
            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
232
        .xlevel = 0x8000000A,
233
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
234
    },
235
    {
236
        .name = "phenom",
237
        .level = 5,
238
        .vendor1 = CPUID_VENDOR_AMD_1,
239
        .vendor2 = CPUID_VENDOR_AMD_2,
240
        .vendor3 = CPUID_VENDOR_AMD_3,
241
        .family = 16,
242
        .model = 2,
243
        .stepping = 3,
244
        /* Missing: CPUID_VME, CPUID_HT */
245
        .features = PPRO_FEATURES | 
246
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
247
            CPUID_PSE36,
248
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
249
            CPUID_EXT_POPCNT,
250
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
251
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
252
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
253
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
254
            CPUID_EXT2_FFXSR,
255
        /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
256
                    CPUID_EXT3_CR8LEG,
257
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
258
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
259
        .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
260
            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
261
        .xlevel = 0x8000001A,
262
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
263
    },
264
    {
265
        .name = "core2duo",
266
        .level = 10,
267
        .family = 6,
268
        .model = 15,
269
        .stepping = 11,
270
        /* The original CPU also implements these features:
271
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
272
               CPUID_TM, CPUID_PBE */
273
        .features = PPRO_FEATURES |
274
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
275
            CPUID_PSE36,
276
        /* The original CPU also implements these ext features:
277
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
278
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
279
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
280
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
281
        .ext3_features = CPUID_EXT3_LAHF_LM,
282
        .xlevel = 0x80000008,
283
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
284
    },
285
    {
286
        .name = "kvm64",
287
        .level = 5,
288
        .vendor1 = CPUID_VENDOR_INTEL_1,
289
        .vendor2 = CPUID_VENDOR_INTEL_2,
290
        .vendor3 = CPUID_VENDOR_INTEL_3,
291
        .family = 15,
292
        .model = 6,
293
        .stepping = 1,
294
        /* Missing: CPUID_VME, CPUID_HT */
295
        .features = PPRO_FEATURES |
296
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
297
            CPUID_PSE36,
298
        /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
299
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
300
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
301
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
302
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
303
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
304
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
305
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
306
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
307
        .ext3_features = 0,
308
        .xlevel = 0x80000008,
309
        .model_id = "Common KVM processor"
310
    },
311
#endif
312
    {
313
        .name = "qemu32",
314
        .level = 4,
315
        .family = 6,
316
        .model = 3,
317
        .stepping = 3,
318
        .features = PPRO_FEATURES,
319
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
320
        .xlevel = 0,
321
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
322
    },
323
    {
324
        .name = "coreduo",
325
        .level = 10,
326
        .family = 6,
327
        .model = 14,
328
        .stepping = 8,
329
        /* The original CPU also implements these features:
330
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
331
               CPUID_TM, CPUID_PBE */
332
        .features = PPRO_FEATURES | CPUID_VME |
333
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
334
        /* The original CPU also implements these ext features:
335
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
336
               CPUID_EXT_PDCM */
337
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
338
        .ext2_features = CPUID_EXT2_NX,
339
        .xlevel = 0x80000008,
340
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
341
    },
342
    {
343
        .name = "486",
344
        .level = 0,
345
        .family = 4,
346
        .model = 0,
347
        .stepping = 0,
348
        .features = I486_FEATURES,
349
        .xlevel = 0,
350
    },
351
    {
352
        .name = "pentium",
353
        .level = 1,
354
        .family = 5,
355
        .model = 4,
356
        .stepping = 3,
357
        .features = PENTIUM_FEATURES,
358
        .xlevel = 0,
359
    },
360
    {
361
        .name = "pentium2",
362
        .level = 2,
363
        .family = 6,
364
        .model = 5,
365
        .stepping = 2,
366
        .features = PENTIUM2_FEATURES,
367
        .xlevel = 0,
368
    },
369
    {
370
        .name = "pentium3",
371
        .level = 2,
372
        .family = 6,
373
        .model = 7,
374
        .stepping = 3,
375
        .features = PENTIUM3_FEATURES,
376
        .xlevel = 0,
377
    },
378
    {
379
        .name = "athlon",
380
        .level = 2,
381
        .vendor1 = CPUID_VENDOR_AMD_1,
382
        .vendor2 = CPUID_VENDOR_AMD_2,
383
        .vendor3 = CPUID_VENDOR_AMD_3,
384
        .family = 6,
385
        .model = 2,
386
        .stepping = 3,
387
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
388
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
389
        .xlevel = 0x80000008,
390
        /* XXX: put another string ? */
391
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
392
    },
393
    {
394
        .name = "n270",
395
        /* original is on level 10 */
396
        .level = 5,
397
        .family = 6,
398
        .model = 28,
399
        .stepping = 2,
400
        .features = PPRO_FEATURES |
401
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
402
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
403
             * CPUID_HT | CPUID_TM | CPUID_PBE */
404
            /* Some CPUs got no CPUID_SEP */
405
        .ext_features = CPUID_EXT_MONITOR |
406
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
407
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
408
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
409
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
410
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
411
        .xlevel = 0x8000000A,
412
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
413
    },
414
};
415

    
416
static int cpu_x86_fill_model_id(char *str)
417
{
418
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
419
    int i;
420

    
421
    for (i = 0; i < 3; i++) {
422
        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
423
        memcpy(str + i * 16 +  0, &eax, 4);
424
        memcpy(str + i * 16 +  4, &ebx, 4);
425
        memcpy(str + i * 16 +  8, &ecx, 4);
426
        memcpy(str + i * 16 + 12, &edx, 4);
427
    }
428
    return 0;
429
}
430

    
431
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
432
{
433
    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
434

    
435
    x86_cpu_def->name = "host";
436
    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
437
    x86_cpu_def->level = eax;
438
    x86_cpu_def->vendor1 = ebx;
439
    x86_cpu_def->vendor2 = edx;
440
    x86_cpu_def->vendor3 = ecx;
441

    
442
    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
443
    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
444
    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
445
    x86_cpu_def->stepping = eax & 0x0F;
446
    x86_cpu_def->ext_features = ecx;
447
    x86_cpu_def->features = edx;
448

    
449
    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
450
    x86_cpu_def->xlevel = eax;
451

    
452
    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
453
    x86_cpu_def->ext2_features = edx;
454
    x86_cpu_def->ext3_features = ecx;
455
    cpu_x86_fill_model_id(x86_cpu_def->model_id);
456
    x86_cpu_def->vendor_override = 0;
457

    
458
    return 0;
459
}
460

    
461
static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
462
{
463
    int i;
464

    
465
    for (i = 0; i < 32; ++i)
466
        if (1 << i & mask) {
467
            fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
468
                " flag '%s' [0x%08x]\n",
469
                f->cpuid >> 16, f->cpuid & 0xffff,
470
                f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
471
            break;
472
        }
473
    return 0;
474
}
475

    
476
/* best effort attempt to inform user requested cpu flags aren't making
477
 * their way to the guest.  Note: ft[].check_feat ideally should be
478
 * specified via a guest_def field to suppress report of extraneous flags.
479
 */
480
static int check_features_against_host(x86_def_t *guest_def)
481
{
482
    x86_def_t host_def;
483
    uint32_t mask;
484
    int rv, i;
485
    struct model_features_t ft[] = {
486
        {&guest_def->features, &host_def.features,
487
            ~0, feature_name, 0x00000000},
488
        {&guest_def->ext_features, &host_def.ext_features,
489
            ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
490
        {&guest_def->ext2_features, &host_def.ext2_features,
491
            ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
492
        {&guest_def->ext3_features, &host_def.ext3_features,
493
            ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
494

    
495
    cpu_x86_fill_host(&host_def);
496
    for (rv = 0, i = 0; i < sizeof (ft) / sizeof (ft[0]); ++i)
497
        for (mask = 1; mask; mask <<= 1)
498
            if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
499
                !(*ft[i].host_feat & mask)) {
500
                    unavailable_host_feature(&ft[i], mask);
501
                    rv = 1;
502
                }
503
    return rv;
504
}
505

    
506
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
507
{
508
    unsigned int i;
509
    x86_def_t *def;
510

    
511
    char *s = strdup(cpu_model);
512
    char *featurestr, *name = strtok(s, ",");
513
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0, plus_kvm_features = 0;
514
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0, minus_kvm_features = 0;
515
    uint32_t numvalue;
516

    
517
    for (def = x86_defs; def; def = def->next)
518
        if (!strcmp(name, def->name))
519
            break;
520
    if (kvm_enabled() && strcmp(name, "host") == 0) {
521
        cpu_x86_fill_host(x86_cpu_def);
522
    } else if (!def) {
523
        goto error;
524
    } else {
525
        memcpy(x86_cpu_def, def, sizeof(*def));
526
    }
527

    
528
    plus_kvm_features = ~0; /* not supported bits will be filtered out later */
529

    
530
    add_flagname_to_bitmaps("hypervisor", &plus_features,
531
        &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
532
        &plus_kvm_features);
533

    
534
    featurestr = strtok(NULL, ",");
535

    
536
    while (featurestr) {
537
        char *val;
538
        if (featurestr[0] == '+') {
539
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features, &plus_kvm_features);
540
        } else if (featurestr[0] == '-') {
541
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features, &minus_kvm_features);
542
        } else if ((val = strchr(featurestr, '='))) {
543
            *val = 0; val++;
544
            if (!strcmp(featurestr, "family")) {
545
                char *err;
546
                numvalue = strtoul(val, &err, 0);
547
                if (!*val || *err) {
548
                    fprintf(stderr, "bad numerical value %s\n", val);
549
                    goto error;
550
                }
551
                x86_cpu_def->family = numvalue;
552
            } else if (!strcmp(featurestr, "model")) {
553
                char *err;
554
                numvalue = strtoul(val, &err, 0);
555
                if (!*val || *err || numvalue > 0xff) {
556
                    fprintf(stderr, "bad numerical value %s\n", val);
557
                    goto error;
558
                }
559
                x86_cpu_def->model = numvalue;
560
            } else if (!strcmp(featurestr, "stepping")) {
561
                char *err;
562
                numvalue = strtoul(val, &err, 0);
563
                if (!*val || *err || numvalue > 0xf) {
564
                    fprintf(stderr, "bad numerical value %s\n", val);
565
                    goto error;
566
                }
567
                x86_cpu_def->stepping = numvalue ;
568
            } else if (!strcmp(featurestr, "level")) {
569
                char *err;
570
                numvalue = strtoul(val, &err, 0);
571
                if (!*val || *err) {
572
                    fprintf(stderr, "bad numerical value %s\n", val);
573
                    goto error;
574
                }
575
                x86_cpu_def->level = numvalue;
576
            } else if (!strcmp(featurestr, "xlevel")) {
577
                char *err;
578
                numvalue = strtoul(val, &err, 0);
579
                if (!*val || *err) {
580
                    fprintf(stderr, "bad numerical value %s\n", val);
581
                    goto error;
582
                }
583
                if (numvalue < 0x80000000) {
584
                        numvalue += 0x80000000;
585
                }
586
                x86_cpu_def->xlevel = numvalue;
587
            } else if (!strcmp(featurestr, "vendor")) {
588
                if (strlen(val) != 12) {
589
                    fprintf(stderr, "vendor string must be 12 chars long\n");
590
                    goto error;
591
                }
592
                x86_cpu_def->vendor1 = 0;
593
                x86_cpu_def->vendor2 = 0;
594
                x86_cpu_def->vendor3 = 0;
595
                for(i = 0; i < 4; i++) {
596
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
597
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
598
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
599
                }
600
                x86_cpu_def->vendor_override = 1;
601
            } else if (!strcmp(featurestr, "model_id")) {
602
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
603
                        val);
604
            } else {
605
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
606
                goto error;
607
            }
608
        } else if (!strcmp(featurestr, "check")) {
609
            check_cpuid = 1;
610
        } else if (!strcmp(featurestr, "enforce")) {
611
            check_cpuid = enforce_cpuid = 1;
612
        } else {
613
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
614
            goto error;
615
        }
616
        featurestr = strtok(NULL, ",");
617
    }
618
    x86_cpu_def->features |= plus_features;
619
    x86_cpu_def->ext_features |= plus_ext_features;
620
    x86_cpu_def->ext2_features |= plus_ext2_features;
621
    x86_cpu_def->ext3_features |= plus_ext3_features;
622
    x86_cpu_def->kvm_features |= plus_kvm_features;
623
    x86_cpu_def->features &= ~minus_features;
624
    x86_cpu_def->ext_features &= ~minus_ext_features;
625
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
626
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
627
    x86_cpu_def->kvm_features &= ~minus_kvm_features;
628
    if (check_cpuid) {
629
        if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
630
            goto error;
631
    }
632
    free(s);
633
    return 0;
634

    
635
error:
636
    free(s);
637
    return -1;
638
}
639

    
640
/* generate a composite string into buf of all cpuid names in featureset
641
 * selected by fbits.  indicate truncation at bufsize in the event of overflow.
642
 * if flags, suppress names undefined in featureset.
643
 */
644
static void listflags(char *buf, int bufsize, uint32_t fbits,
645
    const char **featureset, uint32_t flags)
646
{
647
    const char **p = &featureset[31];
648
    char *q, *b, bit;
649
    int nc;
650

    
651
    b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
652
    *buf = '\0';
653
    for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
654
        if (fbits & 1 << bit && (*p || !flags)) {
655
            if (*p)
656
                nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
657
            else
658
                nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
659
            if (bufsize <= nc) {
660
                if (b) {
661
                    memcpy(b, "...", sizeof("..."));
662
                }
663
                return;
664
            }
665
            q += nc;
666
            bufsize -= nc;
667
        }
668
}
669

    
670
/* generate CPU information:
671
 * -?        list model names
672
 * -?model   list model names/IDs
673
 * -?dump    output all model (x86_def_t) data
674
 * -?cpuid   list all recognized cpuid flag names
675
 */ 
676
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
677
                  const char *optarg)
678
{
679
    unsigned char model = !strcmp("?model", optarg);
680
    unsigned char dump = !strcmp("?dump", optarg);
681
    unsigned char cpuid = !strcmp("?cpuid", optarg);
682
    x86_def_t *def;
683
    char buf[256];
684

    
685
    if (cpuid) {
686
        (*cpu_fprintf)(f, "Recognized CPUID flags:\n");
687
        listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1);
688
        (*cpu_fprintf)(f, "  f_edx: %s\n", buf);
689
        listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1);
690
        (*cpu_fprintf)(f, "  f_ecx: %s\n", buf);
691
        listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1);
692
        (*cpu_fprintf)(f, "  extf_edx: %s\n", buf);
693
        listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1);
694
        (*cpu_fprintf)(f, "  extf_ecx: %s\n", buf);
695
        return;
696
    }
697
    for (def = x86_defs; def; def = def->next) {
698
        snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name);
699
        if (model || dump) {
700
            (*cpu_fprintf)(f, "x86 %16s  %-48s\n", buf, def->model_id);
701
        } else {
702
            (*cpu_fprintf)(f, "x86 %16s\n", buf);
703
        }
704
        if (dump) {
705
            memcpy(buf, &def->vendor1, sizeof (def->vendor1));
706
            memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2));
707
            memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3));
708
            buf[12] = '\0';
709
            (*cpu_fprintf)(f,
710
                "  family %d model %d stepping %d level %d xlevel 0x%x"
711
                " vendor \"%s\"\n",
712
                def->family, def->model, def->stepping, def->level,
713
                def->xlevel, buf);
714
            listflags(buf, sizeof (buf), def->features, feature_name, 0);
715
            (*cpu_fprintf)(f, "  feature_edx %08x (%s)\n", def->features,
716
                buf);
717
            listflags(buf, sizeof (buf), def->ext_features, ext_feature_name,
718
                0);
719
            (*cpu_fprintf)(f, "  feature_ecx %08x (%s)\n", def->ext_features,
720
                buf);
721
            listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name,
722
                0);
723
            (*cpu_fprintf)(f, "  extfeature_edx %08x (%s)\n",
724
                def->ext2_features, buf);
725
            listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name,
726
                0);
727
            (*cpu_fprintf)(f, "  extfeature_ecx %08x (%s)\n",
728
                def->ext3_features, buf);
729
            (*cpu_fprintf)(f, "\n");
730
        }
731
    }
732
}
733

    
734
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
735
{
736
    x86_def_t def1, *def = &def1;
737

    
738
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
739
        return -1;
740
    if (def->vendor1) {
741
        env->cpuid_vendor1 = def->vendor1;
742
        env->cpuid_vendor2 = def->vendor2;
743
        env->cpuid_vendor3 = def->vendor3;
744
    } else {
745
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
746
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
747
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
748
    }
749
    env->cpuid_vendor_override = def->vendor_override;
750
    env->cpuid_level = def->level;
751
    if (def->family > 0x0f)
752
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
753
    else
754
        env->cpuid_version = def->family << 8;
755
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
756
    env->cpuid_version |= def->stepping;
757
    env->cpuid_features = def->features;
758
    env->pat = 0x0007040600070406ULL;
759
    env->cpuid_ext_features = def->ext_features;
760
    env->cpuid_ext2_features = def->ext2_features;
761
    env->cpuid_xlevel = def->xlevel;
762
    env->cpuid_kvm_features = def->kvm_features;
763
    {
764
        const char *model_id = def->model_id;
765
        int c, len, i;
766
        if (!model_id)
767
            model_id = "";
768
        len = strlen(model_id);
769
        for(i = 0; i < 48; i++) {
770
            if (i >= len)
771
                c = '\0';
772
            else
773
                c = (uint8_t)model_id[i];
774
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
775
        }
776
    }
777
    return 0;
778
}
779

    
780
#if !defined(CONFIG_USER_ONLY)
781
/* copy vendor id string to 32 bit register, nul pad as needed
782
 */
783
static void cpyid(const char *s, uint32_t *id)
784
{
785
    char *d = (char *)id;
786
    char i;
787

    
788
    for (i = sizeof (*id); i--; )
789
        *d++ = *s ? *s++ : '\0';
790
}
791

    
792
/* interpret radix and convert from string to arbitrary scalar,
793
 * otherwise flag failure
794
 */
795
#define setscalar(pval, str, perr)                      \
796
{                                                       \
797
    char *pend;                                         \
798
    unsigned long ul;                                   \
799
                                                        \
800
    ul = strtoul(str, &pend, 0);                        \
801
    *str && !*pend ? (*pval = ul) : (*perr = 1);        \
802
}
803

    
804
/* map cpuid options to feature bits, otherwise return failure
805
 * (option tags in *str are delimited by whitespace)
806
 */
807
static void setfeatures(uint32_t *pval, const char *str,
808
    const char **featureset, int *perr)
809
{
810
    const char *p, *q;
811

    
812
    for (q = p = str; *p || *q; q = p) {
813
        while (iswhite(*p))
814
            q = ++p; 
815
        while (*p && !iswhite(*p))
816
            ++p;
817
        if (!*q && !*p)
818
            return;
819
        if (!lookup_feature(pval, q, p, featureset)) {
820
            fprintf(stderr, "error: feature \"%.*s\" not available in set\n",
821
                (int)(p - q), q);
822
            *perr = 1;
823
            return;
824
        }
825
    }
826
}
827

    
828
/* map config file options to x86_def_t form
829
 */
830
static int cpudef_setfield(const char *name, const char *str, void *opaque)
831
{
832
    x86_def_t *def = opaque;
833
    int err = 0;
834

    
835
    if (!strcmp(name, "name")) {
836
        def->name = strdup(str);
837
    } else if (!strcmp(name, "model_id")) {
838
        strncpy(def->model_id, str, sizeof (def->model_id));
839
    } else if (!strcmp(name, "level")) {
840
        setscalar(&def->level, str, &err)
841
    } else if (!strcmp(name, "vendor")) {
842
        cpyid(&str[0], &def->vendor1);
843
        cpyid(&str[4], &def->vendor2);
844
        cpyid(&str[8], &def->vendor3);
845
    } else if (!strcmp(name, "family")) {
846
        setscalar(&def->family, str, &err)
847
    } else if (!strcmp(name, "model")) {
848
        setscalar(&def->model, str, &err)
849
    } else if (!strcmp(name, "stepping")) {
850
        setscalar(&def->stepping, str, &err)
851
    } else if (!strcmp(name, "feature_edx")) {
852
        setfeatures(&def->features, str, feature_name, &err);
853
    } else if (!strcmp(name, "feature_ecx")) {
854
        setfeatures(&def->ext_features, str, ext_feature_name, &err);
855
    } else if (!strcmp(name, "extfeature_edx")) {
856
        setfeatures(&def->ext2_features, str, ext2_feature_name, &err);
857
    } else if (!strcmp(name, "extfeature_ecx")) {
858
        setfeatures(&def->ext3_features, str, ext3_feature_name, &err);
859
    } else if (!strcmp(name, "xlevel")) {
860
        setscalar(&def->xlevel, str, &err)
861
    } else {
862
        fprintf(stderr, "error: unknown option [%s = %s]\n", name, str);
863
        return (1);
864
    }
865
    if (err) {
866
        fprintf(stderr, "error: bad option value [%s = %s]\n", name, str);
867
        return (1);
868
    }
869
    return (0);
870
}
871

    
872
/* register config file entry as x86_def_t
873
 */
874
static int cpudef_register(QemuOpts *opts, void *opaque)
875
{
876
    x86_def_t *def = qemu_mallocz(sizeof (x86_def_t));
877

    
878
    qemu_opt_foreach(opts, cpudef_setfield, def, 1);
879
    def->next = x86_defs;
880
    x86_defs = def;
881
    return (0);
882
}
883
#endif /* !CONFIG_USER_ONLY */
884

    
885
/* register "cpudef" models defined in configuration file.  Here we first
886
 * preload any built-in definitions
887
 */
888
void x86_cpudef_setup(void)
889
{
890
    int i;
891

    
892
    for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
893
        builtin_x86_defs[i].next = x86_defs;
894
        builtin_x86_defs[i].flags = 1;
895
        x86_defs = &builtin_x86_defs[i];
896
    }
897
#if !defined(CONFIG_USER_ONLY)
898
    qemu_opts_foreach(&qemu_cpudef_opts, cpudef_register, NULL, 0);
899
#endif
900
}
901

    
902
/* NOTE: must be called outside the CPU execute loop */
903
void cpu_reset(CPUX86State *env)
904
{
905
    int i;
906

    
907
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
908
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
909
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
910
    }
911

    
912
    memset(env, 0, offsetof(CPUX86State, breakpoints));
913

    
914
    tlb_flush(env, 1);
915

    
916
    env->old_exception = -1;
917

    
918
    /* init to reset state */
919

    
920
#ifdef CONFIG_SOFTMMU
921
    env->hflags |= HF_SOFTMMU_MASK;
922
#endif
923
    env->hflags2 |= HF2_GIF_MASK;
924

    
925
    cpu_x86_update_cr0(env, 0x60000010);
926
    env->a20_mask = ~0x0;
927
    env->smbase = 0x30000;
928

    
929
    env->idt.limit = 0xffff;
930
    env->gdt.limit = 0xffff;
931
    env->ldt.limit = 0xffff;
932
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
933
    env->tr.limit = 0xffff;
934
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
935

    
936
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
937
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
938
                           DESC_R_MASK | DESC_A_MASK);
939
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
940
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
941
                           DESC_A_MASK);
942
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
943
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
944
                           DESC_A_MASK);
945
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
946
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
947
                           DESC_A_MASK);
948
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
949
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
950
                           DESC_A_MASK);
951
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
952
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
953
                           DESC_A_MASK);
954

    
955
    env->eip = 0xfff0;
956
    env->regs[R_EDX] = env->cpuid_version;
957

    
958
    env->eflags = 0x2;
959

    
960
    /* FPU init */
961
    for(i = 0;i < 8; i++)
962
        env->fptags[i] = 1;
963
    env->fpuc = 0x37f;
964

    
965
    env->mxcsr = 0x1f80;
966

    
967
    memset(env->dr, 0, sizeof(env->dr));
968
    env->dr[6] = DR6_FIXED_1;
969
    env->dr[7] = DR7_FIXED_1;
970
    cpu_breakpoint_remove_all(env, BP_CPU);
971
    cpu_watchpoint_remove_all(env, BP_CPU);
972

    
973
    env->mcg_status = 0;
974
}
975

    
976
void cpu_x86_close(CPUX86State *env)
977
{
978
    qemu_free(env);
979
}
980

    
981
/***********************************************************/
982
/* x86 debug */
983

    
984
static const char *cc_op_str[] = {
985
    "DYNAMIC",
986
    "EFLAGS",
987

    
988
    "MULB",
989
    "MULW",
990
    "MULL",
991
    "MULQ",
992

    
993
    "ADDB",
994
    "ADDW",
995
    "ADDL",
996
    "ADDQ",
997

    
998
    "ADCB",
999
    "ADCW",
1000
    "ADCL",
1001
    "ADCQ",
1002

    
1003
    "SUBB",
1004
    "SUBW",
1005
    "SUBL",
1006
    "SUBQ",
1007

    
1008
    "SBBB",
1009
    "SBBW",
1010
    "SBBL",
1011
    "SBBQ",
1012

    
1013
    "LOGICB",
1014
    "LOGICW",
1015
    "LOGICL",
1016
    "LOGICQ",
1017

    
1018
    "INCB",
1019
    "INCW",
1020
    "INCL",
1021
    "INCQ",
1022

    
1023
    "DECB",
1024
    "DECW",
1025
    "DECL",
1026
    "DECQ",
1027

    
1028
    "SHLB",
1029
    "SHLW",
1030
    "SHLL",
1031
    "SHLQ",
1032

    
1033
    "SARB",
1034
    "SARW",
1035
    "SARL",
1036
    "SARQ",
1037
};
1038

    
1039
static void
1040
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
1041
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1042
                       const char *name, struct SegmentCache *sc)
1043
{
1044
#ifdef TARGET_X86_64
1045
    if (env->hflags & HF_CS64_MASK) {
1046
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
1047
                    sc->selector, sc->base, sc->limit, sc->flags);
1048
    } else
1049
#endif
1050
    {
1051
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
1052
                    (uint32_t)sc->base, sc->limit, sc->flags);
1053
    }
1054

    
1055
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
1056
        goto done;
1057

    
1058
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
1059
    if (sc->flags & DESC_S_MASK) {
1060
        if (sc->flags & DESC_CS_MASK) {
1061
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
1062
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
1063
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
1064
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
1065
        } else {
1066
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
1067
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
1068
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
1069
        }
1070
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
1071
    } else {
1072
        static const char *sys_type_name[2][16] = {
1073
            { /* 32 bit mode */
1074
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
1075
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
1076
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
1077
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
1078
            },
1079
            { /* 64 bit mode */
1080
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
1081
                "Reserved", "Reserved", "Reserved", "Reserved",
1082
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
1083
                "Reserved", "IntGate64", "TrapGate64"
1084
            }
1085
        };
1086
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
1087
                                    [(sc->flags & DESC_TYPE_MASK)
1088
                                     >> DESC_TYPE_SHIFT]);
1089
    }
1090
done:
1091
    cpu_fprintf(f, "\n");
1092
}
1093

    
1094
void cpu_dump_state(CPUState *env, FILE *f,
1095
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1096
                    int flags)
1097
{
1098
    int eflags, i, nb;
1099
    char cc_op_name[32];
1100
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
1101

    
1102
    cpu_synchronize_state(env);
1103

    
1104
    eflags = env->eflags;
1105
#ifdef TARGET_X86_64
1106
    if (env->hflags & HF_CS64_MASK) {
1107
        cpu_fprintf(f,
1108
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
1109
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
1110
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
1111
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
1112
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
1113
                    env->regs[R_EAX],
1114
                    env->regs[R_EBX],
1115
                    env->regs[R_ECX],
1116
                    env->regs[R_EDX],
1117
                    env->regs[R_ESI],
1118
                    env->regs[R_EDI],
1119
                    env->regs[R_EBP],
1120
                    env->regs[R_ESP],
1121
                    env->regs[8],
1122
                    env->regs[9],
1123
                    env->regs[10],
1124
                    env->regs[11],
1125
                    env->regs[12],
1126
                    env->regs[13],
1127
                    env->regs[14],
1128
                    env->regs[15],
1129
                    env->eip, eflags,
1130
                    eflags & DF_MASK ? 'D' : '-',
1131
                    eflags & CC_O ? 'O' : '-',
1132
                    eflags & CC_S ? 'S' : '-',
1133
                    eflags & CC_Z ? 'Z' : '-',
1134
                    eflags & CC_A ? 'A' : '-',
1135
                    eflags & CC_P ? 'P' : '-',
1136
                    eflags & CC_C ? 'C' : '-',
1137
                    env->hflags & HF_CPL_MASK,
1138
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
1139
                    (env->a20_mask >> 20) & 1,
1140
                    (env->hflags >> HF_SMM_SHIFT) & 1,
1141
                    env->halted);
1142
    } else
1143
#endif
1144
    {
1145
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
1146
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
1147
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
1148
                    (uint32_t)env->regs[R_EAX],
1149
                    (uint32_t)env->regs[R_EBX],
1150
                    (uint32_t)env->regs[R_ECX],
1151
                    (uint32_t)env->regs[R_EDX],
1152
                    (uint32_t)env->regs[R_ESI],
1153
                    (uint32_t)env->regs[R_EDI],
1154
                    (uint32_t)env->regs[R_EBP],
1155
                    (uint32_t)env->regs[R_ESP],
1156
                    (uint32_t)env->eip, eflags,
1157
                    eflags & DF_MASK ? 'D' : '-',
1158
                    eflags & CC_O ? 'O' : '-',
1159
                    eflags & CC_S ? 'S' : '-',
1160
                    eflags & CC_Z ? 'Z' : '-',
1161
                    eflags & CC_A ? 'A' : '-',
1162
                    eflags & CC_P ? 'P' : '-',
1163
                    eflags & CC_C ? 'C' : '-',
1164
                    env->hflags & HF_CPL_MASK,
1165
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
1166
                    (env->a20_mask >> 20) & 1,
1167
                    (env->hflags >> HF_SMM_SHIFT) & 1,
1168
                    env->halted);
1169
    }
1170

    
1171
    for(i = 0; i < 6; i++) {
1172
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
1173
                               &env->segs[i]);
1174
    }
1175
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
1176
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
1177

    
1178
#ifdef TARGET_X86_64
1179
    if (env->hflags & HF_LMA_MASK) {
1180
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
1181
                    env->gdt.base, env->gdt.limit);
1182
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
1183
                    env->idt.base, env->idt.limit);
1184
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
1185
                    (uint32_t)env->cr[0],
1186
                    env->cr[2],
1187
                    env->cr[3],
1188
                    (uint32_t)env->cr[4]);
1189
        for(i = 0; i < 4; i++)
1190
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
1191
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
1192
                    env->dr[6], env->dr[7]);
1193
    } else
1194
#endif
1195
    {
1196
        cpu_fprintf(f, "GDT=     %08x %08x\n",
1197
                    (uint32_t)env->gdt.base, env->gdt.limit);
1198
        cpu_fprintf(f, "IDT=     %08x %08x\n",
1199
                    (uint32_t)env->idt.base, env->idt.limit);
1200
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
1201
                    (uint32_t)env->cr[0],
1202
                    (uint32_t)env->cr[2],
1203
                    (uint32_t)env->cr[3],
1204
                    (uint32_t)env->cr[4]);
1205
        for(i = 0; i < 4; i++)
1206
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
1207
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
1208
    }
1209
    if (flags & X86_DUMP_CCOP) {
1210
        if ((unsigned)env->cc_op < CC_OP_NB)
1211
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
1212
        else
1213
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
1214
#ifdef TARGET_X86_64
1215
        if (env->hflags & HF_CS64_MASK) {
1216
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
1217
                        env->cc_src, env->cc_dst,
1218
                        cc_op_name);
1219
        } else
1220
#endif
1221
        {
1222
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
1223
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
1224
                        cc_op_name);
1225
        }
1226
    }
1227
    if (flags & X86_DUMP_FPU) {
1228
        int fptag;
1229
        fptag = 0;
1230
        for(i = 0; i < 8; i++) {
1231
            fptag |= ((!env->fptags[i]) << i);
1232
        }
1233
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
1234
                    env->fpuc,
1235
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
1236
                    env->fpstt,
1237
                    fptag,
1238
                    env->mxcsr);
1239
        for(i=0;i<8;i++) {
1240
#if defined(USE_X86LDOUBLE)
1241
            union {
1242
                long double d;
1243
                struct {
1244
                    uint64_t lower;
1245
                    uint16_t upper;
1246
                } l;
1247
            } tmp;
1248
            tmp.d = env->fpregs[i].d;
1249
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1250
                        i, tmp.l.lower, tmp.l.upper);
1251
#else
1252
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
1253
                        i, env->fpregs[i].mmx.q);
1254
#endif
1255
            if ((i & 1) == 1)
1256
                cpu_fprintf(f, "\n");
1257
            else
1258
                cpu_fprintf(f, " ");
1259
        }
1260
        if (env->hflags & HF_CS64_MASK)
1261
            nb = 16;
1262
        else
1263
            nb = 8;
1264
        for(i=0;i<nb;i++) {
1265
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
1266
                        i,
1267
                        env->xmm_regs[i].XMM_L(3),
1268
                        env->xmm_regs[i].XMM_L(2),
1269
                        env->xmm_regs[i].XMM_L(1),
1270
                        env->xmm_regs[i].XMM_L(0));
1271
            if ((i & 1) == 1)
1272
                cpu_fprintf(f, "\n");
1273
            else
1274
                cpu_fprintf(f, " ");
1275
        }
1276
    }
1277
}
1278

    
1279
/***********************************************************/
1280
/* x86 mmu */
1281
/* XXX: add PGE support */
1282

    
1283
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
1284
{
1285
    a20_state = (a20_state != 0);
1286
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
1287
#if defined(DEBUG_MMU)
1288
        printf("A20 update: a20=%d\n", a20_state);
1289
#endif
1290
        /* if the cpu is currently executing code, we must unlink it and
1291
           all the potentially executing TB */
1292
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
1293

    
1294
        /* when a20 is changed, all the MMU mappings are invalid, so
1295
           we must flush everything */
1296
        tlb_flush(env, 1);
1297
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
1298
    }
1299
}
1300

    
1301
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
1302
{
1303
    int pe_state;
1304

    
1305
#if defined(DEBUG_MMU)
1306
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
1307
#endif
1308
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
1309
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
1310
        tlb_flush(env, 1);
1311
    }
1312

    
1313
#ifdef TARGET_X86_64
1314
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
1315
        (env->efer & MSR_EFER_LME)) {
1316
        /* enter in long mode */
1317
        /* XXX: generate an exception */
1318
        if (!(env->cr[4] & CR4_PAE_MASK))
1319
            return;
1320
        env->efer |= MSR_EFER_LMA;
1321
        env->hflags |= HF_LMA_MASK;
1322
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
1323
               (env->efer & MSR_EFER_LMA)) {
1324
        /* exit long mode */
1325
        env->efer &= ~MSR_EFER_LMA;
1326
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1327
        env->eip &= 0xffffffff;
1328
    }
1329
#endif
1330
    env->cr[0] = new_cr0 | CR0_ET_MASK;
1331

    
1332
    /* update PE flag in hidden flags */
1333
    pe_state = (env->cr[0] & CR0_PE_MASK);
1334
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
1335
    /* ensure that ADDSEG is always set in real mode */
1336
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
1337
    /* update FPU flags */
1338
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
1339
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
1340
}
1341

    
1342
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
1343
   the PDPT */
1344
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
1345
{
1346
    env->cr[3] = new_cr3;
1347
    if (env->cr[0] & CR0_PG_MASK) {
1348
#if defined(DEBUG_MMU)
1349
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
1350
#endif
1351
        tlb_flush(env, 0);
1352
    }
1353
}
1354

    
1355
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1356
{
1357
#if defined(DEBUG_MMU)
1358
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1359
#endif
1360
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1361
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1362
        tlb_flush(env, 1);
1363
    }
1364
    /* SSE handling */
1365
    if (!(env->cpuid_features & CPUID_SSE))
1366
        new_cr4 &= ~CR4_OSFXSR_MASK;
1367
    if (new_cr4 & CR4_OSFXSR_MASK)
1368
        env->hflags |= HF_OSFXSR_MASK;
1369
    else
1370
        env->hflags &= ~HF_OSFXSR_MASK;
1371

    
1372
    env->cr[4] = new_cr4;
1373
}
1374

    
1375
#if defined(CONFIG_USER_ONLY)
1376

    
1377
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1378
                             int is_write, int mmu_idx, int is_softmmu)
1379
{
1380
    /* user mode only emulation */
1381
    is_write &= 1;
1382
    env->cr[2] = addr;
1383
    env->error_code = (is_write << PG_ERROR_W_BIT);
1384
    env->error_code |= PG_ERROR_U_MASK;
1385
    env->exception_index = EXCP0E_PAGE;
1386
    return 1;
1387
}
1388

    
1389
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1390
{
1391
    return addr;
1392
}
1393

    
1394
#else
1395

    
1396
/* XXX: This value should match the one returned by CPUID
1397
 * and in exec.c */
1398
# if defined(TARGET_X86_64)
1399
# define PHYS_ADDR_MASK 0xfffffff000LL
1400
# else
1401
# define PHYS_ADDR_MASK 0xffffff000LL
1402
# endif
1403

    
1404
/* return value:
1405
   -1 = cannot handle fault
1406
   0  = nothing more to do
1407
   1  = generate PF fault
1408
   2  = soft MMU activation required for this block
1409
*/
1410
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1411
                             int is_write1, int mmu_idx, int is_softmmu)
1412
{
1413
    uint64_t ptep, pte;
1414
    target_ulong pde_addr, pte_addr;
1415
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1416
    target_phys_addr_t paddr;
1417
    uint32_t page_offset;
1418
    target_ulong vaddr, virt_addr;
1419

    
1420
    is_user = mmu_idx == MMU_USER_IDX;
1421
#if defined(DEBUG_MMU)
1422
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1423
           addr, is_write1, is_user, env->eip);
1424
#endif
1425
    is_write = is_write1 & 1;
1426

    
1427
    if (!(env->cr[0] & CR0_PG_MASK)) {
1428
        pte = addr;
1429
        virt_addr = addr & TARGET_PAGE_MASK;
1430
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1431
        page_size = 4096;
1432
        goto do_mapping;
1433
    }
1434

    
1435
    if (env->cr[4] & CR4_PAE_MASK) {
1436
        uint64_t pde, pdpe;
1437
        target_ulong pdpe_addr;
1438

    
1439
#ifdef TARGET_X86_64
1440
        if (env->hflags & HF_LMA_MASK) {
1441
            uint64_t pml4e_addr, pml4e;
1442
            int32_t sext;
1443

    
1444
            /* test virtual address sign extension */
1445
            sext = (int64_t)addr >> 47;
1446
            if (sext != 0 && sext != -1) {
1447
                env->error_code = 0;
1448
                env->exception_index = EXCP0D_GPF;
1449
                return 1;
1450
            }
1451

    
1452
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1453
                env->a20_mask;
1454
            pml4e = ldq_phys(pml4e_addr);
1455
            if (!(pml4e & PG_PRESENT_MASK)) {
1456
                error_code = 0;
1457
                goto do_fault;
1458
            }
1459
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1460
                error_code = PG_ERROR_RSVD_MASK;
1461
                goto do_fault;
1462
            }
1463
            if (!(pml4e & PG_ACCESSED_MASK)) {
1464
                pml4e |= PG_ACCESSED_MASK;
1465
                stl_phys_notdirty(pml4e_addr, pml4e);
1466
            }
1467
            ptep = pml4e ^ PG_NX_MASK;
1468
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1469
                env->a20_mask;
1470
            pdpe = ldq_phys(pdpe_addr);
1471
            if (!(pdpe & PG_PRESENT_MASK)) {
1472
                error_code = 0;
1473
                goto do_fault;
1474
            }
1475
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1476
                error_code = PG_ERROR_RSVD_MASK;
1477
                goto do_fault;
1478
            }
1479
            ptep &= pdpe ^ PG_NX_MASK;
1480
            if (!(pdpe & PG_ACCESSED_MASK)) {
1481
                pdpe |= PG_ACCESSED_MASK;
1482
                stl_phys_notdirty(pdpe_addr, pdpe);
1483
            }
1484
        } else
1485
#endif
1486
        {
1487
            /* XXX: load them when cr3 is loaded ? */
1488
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1489
                env->a20_mask;
1490
            pdpe = ldq_phys(pdpe_addr);
1491
            if (!(pdpe & PG_PRESENT_MASK)) {
1492
                error_code = 0;
1493
                goto do_fault;
1494
            }
1495
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1496
        }
1497

    
1498
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1499
            env->a20_mask;
1500
        pde = ldq_phys(pde_addr);
1501
        if (!(pde & PG_PRESENT_MASK)) {
1502
            error_code = 0;
1503
            goto do_fault;
1504
        }
1505
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1506
            error_code = PG_ERROR_RSVD_MASK;
1507
            goto do_fault;
1508
        }
1509
        ptep &= pde ^ PG_NX_MASK;
1510
        if (pde & PG_PSE_MASK) {
1511
            /* 2 MB page */
1512
            page_size = 2048 * 1024;
1513
            ptep ^= PG_NX_MASK;
1514
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1515
                goto do_fault_protect;
1516
            if (is_user) {
1517
                if (!(ptep & PG_USER_MASK))
1518
                    goto do_fault_protect;
1519
                if (is_write && !(ptep & PG_RW_MASK))
1520
                    goto do_fault_protect;
1521
            } else {
1522
                if ((env->cr[0] & CR0_WP_MASK) &&
1523
                    is_write && !(ptep & PG_RW_MASK))
1524
                    goto do_fault_protect;
1525
            }
1526
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1527
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1528
                pde |= PG_ACCESSED_MASK;
1529
                if (is_dirty)
1530
                    pde |= PG_DIRTY_MASK;
1531
                stl_phys_notdirty(pde_addr, pde);
1532
            }
1533
            /* align to page_size */
1534
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1535
            virt_addr = addr & ~(page_size - 1);
1536
        } else {
1537
            /* 4 KB page */
1538
            if (!(pde & PG_ACCESSED_MASK)) {
1539
                pde |= PG_ACCESSED_MASK;
1540
                stl_phys_notdirty(pde_addr, pde);
1541
            }
1542
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1543
                env->a20_mask;
1544
            pte = ldq_phys(pte_addr);
1545
            if (!(pte & PG_PRESENT_MASK)) {
1546
                error_code = 0;
1547
                goto do_fault;
1548
            }
1549
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1550
                error_code = PG_ERROR_RSVD_MASK;
1551
                goto do_fault;
1552
            }
1553
            /* combine pde and pte nx, user and rw protections */
1554
            ptep &= pte ^ PG_NX_MASK;
1555
            ptep ^= PG_NX_MASK;
1556
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1557
                goto do_fault_protect;
1558
            if (is_user) {
1559
                if (!(ptep & PG_USER_MASK))
1560
                    goto do_fault_protect;
1561
                if (is_write && !(ptep & PG_RW_MASK))
1562
                    goto do_fault_protect;
1563
            } else {
1564
                if ((env->cr[0] & CR0_WP_MASK) &&
1565
                    is_write && !(ptep & PG_RW_MASK))
1566
                    goto do_fault_protect;
1567
            }
1568
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1569
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1570
                pte |= PG_ACCESSED_MASK;
1571
                if (is_dirty)
1572
                    pte |= PG_DIRTY_MASK;
1573
                stl_phys_notdirty(pte_addr, pte);
1574
            }
1575
            page_size = 4096;
1576
            virt_addr = addr & ~0xfff;
1577
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1578
        }
1579
    } else {
1580
        uint32_t pde;
1581

    
1582
        /* page directory entry */
1583
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1584
            env->a20_mask;
1585
        pde = ldl_phys(pde_addr);
1586
        if (!(pde & PG_PRESENT_MASK)) {
1587
            error_code = 0;
1588
            goto do_fault;
1589
        }
1590
        /* if PSE bit is set, then we use a 4MB page */
1591
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1592
            page_size = 4096 * 1024;
1593
            if (is_user) {
1594
                if (!(pde & PG_USER_MASK))
1595
                    goto do_fault_protect;
1596
                if (is_write && !(pde & PG_RW_MASK))
1597
                    goto do_fault_protect;
1598
            } else {
1599
                if ((env->cr[0] & CR0_WP_MASK) &&
1600
                    is_write && !(pde & PG_RW_MASK))
1601
                    goto do_fault_protect;
1602
            }
1603
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1604
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1605
                pde |= PG_ACCESSED_MASK;
1606
                if (is_dirty)
1607
                    pde |= PG_DIRTY_MASK;
1608
                stl_phys_notdirty(pde_addr, pde);
1609
            }
1610

    
1611
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1612
            ptep = pte;
1613
            virt_addr = addr & ~(page_size - 1);
1614
        } else {
1615
            if (!(pde & PG_ACCESSED_MASK)) {
1616
                pde |= PG_ACCESSED_MASK;
1617
                stl_phys_notdirty(pde_addr, pde);
1618
            }
1619

    
1620
            /* page directory entry */
1621
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1622
                env->a20_mask;
1623
            pte = ldl_phys(pte_addr);
1624
            if (!(pte & PG_PRESENT_MASK)) {
1625
                error_code = 0;
1626
                goto do_fault;
1627
            }
1628
            /* combine pde and pte user and rw protections */
1629
            ptep = pte & pde;
1630
            if (is_user) {
1631
                if (!(ptep & PG_USER_MASK))
1632
                    goto do_fault_protect;
1633
                if (is_write && !(ptep & PG_RW_MASK))
1634
                    goto do_fault_protect;
1635
            } else {
1636
                if ((env->cr[0] & CR0_WP_MASK) &&
1637
                    is_write && !(ptep & PG_RW_MASK))
1638
                    goto do_fault_protect;
1639
            }
1640
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1641
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1642
                pte |= PG_ACCESSED_MASK;
1643
                if (is_dirty)
1644
                    pte |= PG_DIRTY_MASK;
1645
                stl_phys_notdirty(pte_addr, pte);
1646
            }
1647
            page_size = 4096;
1648
            virt_addr = addr & ~0xfff;
1649
        }
1650
    }
1651
    /* the page can be put in the TLB */
1652
    prot = PAGE_READ;
1653
    if (!(ptep & PG_NX_MASK))
1654
        prot |= PAGE_EXEC;
1655
    if (pte & PG_DIRTY_MASK) {
1656
        /* only set write access if already dirty... otherwise wait
1657
           for dirty access */
1658
        if (is_user) {
1659
            if (ptep & PG_RW_MASK)
1660
                prot |= PAGE_WRITE;
1661
        } else {
1662
            if (!(env->cr[0] & CR0_WP_MASK) ||
1663
                (ptep & PG_RW_MASK))
1664
                prot |= PAGE_WRITE;
1665
        }
1666
    }
1667
 do_mapping:
1668
    pte = pte & env->a20_mask;
1669

    
1670
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1671
       avoid filling it too fast */
1672
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1673
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1674
    vaddr = virt_addr + page_offset;
1675

    
1676
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1677
    return ret;
1678
 do_fault_protect:
1679
    error_code = PG_ERROR_P_MASK;
1680
 do_fault:
1681
    error_code |= (is_write << PG_ERROR_W_BIT);
1682
    if (is_user)
1683
        error_code |= PG_ERROR_U_MASK;
1684
    if (is_write1 == 2 &&
1685
        (env->efer & MSR_EFER_NXE) &&
1686
        (env->cr[4] & CR4_PAE_MASK))
1687
        error_code |= PG_ERROR_I_D_MASK;
1688
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1689
        /* cr2 is not modified in case of exceptions */
1690
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1691
                 addr);
1692
    } else {
1693
        env->cr[2] = addr;
1694
    }
1695
    env->error_code = error_code;
1696
    env->exception_index = EXCP0E_PAGE;
1697
    return 1;
1698
}
1699

    
1700
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1701
{
1702
    target_ulong pde_addr, pte_addr;
1703
    uint64_t pte;
1704
    target_phys_addr_t paddr;
1705
    uint32_t page_offset;
1706
    int page_size;
1707

    
1708
    if (env->cr[4] & CR4_PAE_MASK) {
1709
        target_ulong pdpe_addr;
1710
        uint64_t pde, pdpe;
1711

    
1712
#ifdef TARGET_X86_64
1713
        if (env->hflags & HF_LMA_MASK) {
1714
            uint64_t pml4e_addr, pml4e;
1715
            int32_t sext;
1716

    
1717
            /* test virtual address sign extension */
1718
            sext = (int64_t)addr >> 47;
1719
            if (sext != 0 && sext != -1)
1720
                return -1;
1721

    
1722
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1723
                env->a20_mask;
1724
            pml4e = ldq_phys(pml4e_addr);
1725
            if (!(pml4e & PG_PRESENT_MASK))
1726
                return -1;
1727

    
1728
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1729
                env->a20_mask;
1730
            pdpe = ldq_phys(pdpe_addr);
1731
            if (!(pdpe & PG_PRESENT_MASK))
1732
                return -1;
1733
        } else
1734
#endif
1735
        {
1736
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1737
                env->a20_mask;
1738
            pdpe = ldq_phys(pdpe_addr);
1739
            if (!(pdpe & PG_PRESENT_MASK))
1740
                return -1;
1741
        }
1742

    
1743
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1744
            env->a20_mask;
1745
        pde = ldq_phys(pde_addr);
1746
        if (!(pde & PG_PRESENT_MASK)) {
1747
            return -1;
1748
        }
1749
        if (pde & PG_PSE_MASK) {
1750
            /* 2 MB page */
1751
            page_size = 2048 * 1024;
1752
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1753
        } else {
1754
            /* 4 KB page */
1755
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1756
                env->a20_mask;
1757
            page_size = 4096;
1758
            pte = ldq_phys(pte_addr);
1759
        }
1760
        if (!(pte & PG_PRESENT_MASK))
1761
            return -1;
1762
    } else {
1763
        uint32_t pde;
1764

    
1765
        if (!(env->cr[0] & CR0_PG_MASK)) {
1766
            pte = addr;
1767
            page_size = 4096;
1768
        } else {
1769
            /* page directory entry */
1770
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1771
            pde = ldl_phys(pde_addr);
1772
            if (!(pde & PG_PRESENT_MASK))
1773
                return -1;
1774
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1775
                pte = pde & ~0x003ff000; /* align to 4MB */
1776
                page_size = 4096 * 1024;
1777
            } else {
1778
                /* page directory entry */
1779
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1780
                pte = ldl_phys(pte_addr);
1781
                if (!(pte & PG_PRESENT_MASK))
1782
                    return -1;
1783
                page_size = 4096;
1784
            }
1785
        }
1786
        pte = pte & env->a20_mask;
1787
    }
1788

    
1789
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1790
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1791
    return paddr;
1792
}
1793

    
1794
void hw_breakpoint_insert(CPUState *env, int index)
1795
{
1796
    int type, err = 0;
1797

    
1798
    switch (hw_breakpoint_type(env->dr[7], index)) {
1799
    case 0:
1800
        if (hw_breakpoint_enabled(env->dr[7], index))
1801
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1802
                                        &env->cpu_breakpoint[index]);
1803
        break;
1804
    case 1:
1805
        type = BP_CPU | BP_MEM_WRITE;
1806
        goto insert_wp;
1807
    case 2:
1808
         /* No support for I/O watchpoints yet */
1809
        break;
1810
    case 3:
1811
        type = BP_CPU | BP_MEM_ACCESS;
1812
    insert_wp:
1813
        err = cpu_watchpoint_insert(env, env->dr[index],
1814
                                    hw_breakpoint_len(env->dr[7], index),
1815
                                    type, &env->cpu_watchpoint[index]);
1816
        break;
1817
    }
1818
    if (err)
1819
        env->cpu_breakpoint[index] = NULL;
1820
}
1821

    
1822
void hw_breakpoint_remove(CPUState *env, int index)
1823
{
1824
    if (!env->cpu_breakpoint[index])
1825
        return;
1826
    switch (hw_breakpoint_type(env->dr[7], index)) {
1827
    case 0:
1828
        if (hw_breakpoint_enabled(env->dr[7], index))
1829
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1830
        break;
1831
    case 1:
1832
    case 3:
1833
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1834
        break;
1835
    case 2:
1836
        /* No support for I/O watchpoints yet */
1837
        break;
1838
    }
1839
}
1840

    
1841
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1842
{
1843
    target_ulong dr6;
1844
    int reg, type;
1845
    int hit_enabled = 0;
1846

    
1847
    dr6 = env->dr[6] & ~0xf;
1848
    for (reg = 0; reg < 4; reg++) {
1849
        type = hw_breakpoint_type(env->dr[7], reg);
1850
        if ((type == 0 && env->dr[reg] == env->eip) ||
1851
            ((type & 1) && env->cpu_watchpoint[reg] &&
1852
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1853
            dr6 |= 1 << reg;
1854
            if (hw_breakpoint_enabled(env->dr[7], reg))
1855
                hit_enabled = 1;
1856
        }
1857
    }
1858
    if (hit_enabled || force_dr6_update)
1859
        env->dr[6] = dr6;
1860
    return hit_enabled;
1861
}
1862

    
1863
static CPUDebugExcpHandler *prev_debug_excp_handler;
1864

    
1865
void raise_exception_env(int exception_index, CPUState *env);
1866

    
1867
static void breakpoint_handler(CPUState *env)
1868
{
1869
    CPUBreakpoint *bp;
1870

    
1871
    if (env->watchpoint_hit) {
1872
        if (env->watchpoint_hit->flags & BP_CPU) {
1873
            env->watchpoint_hit = NULL;
1874
            if (check_hw_breakpoints(env, 0))
1875
                raise_exception_env(EXCP01_DB, env);
1876
            else
1877
                cpu_resume_from_signal(env, NULL);
1878
        }
1879
    } else {
1880
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1881
            if (bp->pc == env->eip) {
1882
                if (bp->flags & BP_CPU) {
1883
                    check_hw_breakpoints(env, 1);
1884
                    raise_exception_env(EXCP01_DB, env);
1885
                }
1886
                break;
1887
            }
1888
    }
1889
    if (prev_debug_excp_handler)
1890
        prev_debug_excp_handler(env);
1891
}
1892

    
1893
/* This should come from sysemu.h - if we could include it here... */
1894
void qemu_system_reset_request(void);
1895

    
1896
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1897
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1898
{
1899
    uint64_t mcg_cap = cenv->mcg_cap;
1900
    unsigned bank_num = mcg_cap & 0xff;
1901
    uint64_t *banks = cenv->mce_banks;
1902

    
1903
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1904
        return;
1905

    
1906
    /*
1907
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1908
     * reporting is disabled
1909
     */
1910
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1911
        cenv->mcg_ctl != ~(uint64_t)0)
1912
        return;
1913
    banks += 4 * bank;
1914
    /*
1915
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1916
     * reporting is disabled for the bank
1917
     */
1918
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1919
        return;
1920
    if (status & MCI_STATUS_UC) {
1921
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1922
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1923
            fprintf(stderr, "injects mce exception while previous "
1924
                    "one is in progress!\n");
1925
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1926
            qemu_system_reset_request();
1927
            return;
1928
        }
1929
        if (banks[1] & MCI_STATUS_VAL)
1930
            status |= MCI_STATUS_OVER;
1931
        banks[2] = addr;
1932
        banks[3] = misc;
1933
        cenv->mcg_status = mcg_status;
1934
        banks[1] = status;
1935
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1936
    } else if (!(banks[1] & MCI_STATUS_VAL)
1937
               || !(banks[1] & MCI_STATUS_UC)) {
1938
        if (banks[1] & MCI_STATUS_VAL)
1939
            status |= MCI_STATUS_OVER;
1940
        banks[2] = addr;
1941
        banks[3] = misc;
1942
        banks[1] = status;
1943
    } else
1944
        banks[1] |= MCI_STATUS_OVER;
1945
}
1946
#endif /* !CONFIG_USER_ONLY */
1947

    
1948
static void mce_init(CPUX86State *cenv)
1949
{
1950
    unsigned int bank, bank_num;
1951

    
1952
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1953
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1954
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1955
        cenv->mcg_ctl = ~(uint64_t)0;
1956
        bank_num = MCE_BANKS_DEF;
1957
        for (bank = 0; bank < bank_num; bank++)
1958
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1959
    }
1960
}
1961

    
1962
static void host_cpuid(uint32_t function, uint32_t count,
1963
                       uint32_t *eax, uint32_t *ebx,
1964
                       uint32_t *ecx, uint32_t *edx)
1965
{
1966
#if defined(CONFIG_KVM)
1967
    uint32_t vec[4];
1968

    
1969
#ifdef __x86_64__
1970
    asm volatile("cpuid"
1971
                 : "=a"(vec[0]), "=b"(vec[1]),
1972
                   "=c"(vec[2]), "=d"(vec[3])
1973
                 : "0"(function), "c"(count) : "cc");
1974
#else
1975
    asm volatile("pusha \n\t"
1976
                 "cpuid \n\t"
1977
                 "mov %%eax, 0(%2) \n\t"
1978
                 "mov %%ebx, 4(%2) \n\t"
1979
                 "mov %%ecx, 8(%2) \n\t"
1980
                 "mov %%edx, 12(%2) \n\t"
1981
                 "popa"
1982
                 : : "a"(function), "c"(count), "S"(vec)
1983
                 : "memory", "cc");
1984
#endif
1985

    
1986
    if (eax)
1987
        *eax = vec[0];
1988
    if (ebx)
1989
        *ebx = vec[1];
1990
    if (ecx)
1991
        *ecx = vec[2];
1992
    if (edx)
1993
        *edx = vec[3];
1994
#endif
1995
}
1996

    
1997
static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1998
                             uint32_t *ecx, uint32_t *edx)
1999
{
2000
    *ebx = env->cpuid_vendor1;
2001
    *edx = env->cpuid_vendor2;
2002
    *ecx = env->cpuid_vendor3;
2003

    
2004
    /* sysenter isn't supported on compatibility mode on AMD, syscall
2005
     * isn't supported in compatibility mode on Intel.
2006
     * Normally we advertise the actual cpu vendor, but you can override
2007
     * this if you want to use KVM's sysenter/syscall emulation
2008
     * in compatibility mode and when doing cross vendor migration
2009
     */
2010
    if (kvm_enabled() && env->cpuid_vendor_override) {
2011
        host_cpuid(0, 0, NULL, ebx, ecx, edx);
2012
    }
2013
}
2014

    
2015
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2016
                   uint32_t *eax, uint32_t *ebx,
2017
                   uint32_t *ecx, uint32_t *edx)
2018
{
2019
    /* test if maximum index reached */
2020
    if (index & 0x80000000) {
2021
        if (index > env->cpuid_xlevel)
2022
            index = env->cpuid_level;
2023
    } else {
2024
        if (index > env->cpuid_level)
2025
            index = env->cpuid_level;
2026
    }
2027

    
2028
    switch(index) {
2029
    case 0:
2030
        *eax = env->cpuid_level;
2031
        get_cpuid_vendor(env, ebx, ecx, edx);
2032
        break;
2033
    case 1:
2034
        *eax = env->cpuid_version;
2035
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2036
        *ecx = env->cpuid_ext_features;
2037
        *edx = env->cpuid_features;
2038
        if (env->nr_cores * env->nr_threads > 1) {
2039
            *ebx |= (env->nr_cores * env->nr_threads) << 16;
2040
            *edx |= 1 << 28;    /* HTT bit */
2041
        }
2042
        break;
2043
    case 2:
2044
        /* cache info: needed for Pentium Pro compatibility */
2045
        *eax = 1;
2046
        *ebx = 0;
2047
        *ecx = 0;
2048
        *edx = 0x2c307d;
2049
        break;
2050
    case 4:
2051
        /* cache info: needed for Core compatibility */
2052
        if (env->nr_cores > 1) {
2053
                *eax = (env->nr_cores - 1) << 26;
2054
        } else {
2055
                *eax = 0;
2056
        }
2057
        switch (count) {
2058
            case 0: /* L1 dcache info */
2059
                *eax |= 0x0000121;
2060
                *ebx = 0x1c0003f;
2061
                *ecx = 0x000003f;
2062
                *edx = 0x0000001;
2063
                break;
2064
            case 1: /* L1 icache info */
2065
                *eax |= 0x0000122;
2066
                *ebx = 0x1c0003f;
2067
                *ecx = 0x000003f;
2068
                *edx = 0x0000001;
2069
                break;
2070
            case 2: /* L2 cache info */
2071
                *eax |= 0x0000143;
2072
                if (env->nr_threads > 1) {
2073
                    *eax |= (env->nr_threads - 1) << 14;
2074
                }
2075
                *ebx = 0x3c0003f;
2076
                *ecx = 0x0000fff;
2077
                *edx = 0x0000001;
2078
                break;
2079
            default: /* end of info */
2080
                *eax = 0;
2081
                *ebx = 0;
2082
                *ecx = 0;
2083
                *edx = 0;
2084
                break;
2085
        }
2086
        break;
2087
    case 5:
2088
        /* mwait info: needed for Core compatibility */
2089
        *eax = 0; /* Smallest monitor-line size in bytes */
2090
        *ebx = 0; /* Largest monitor-line size in bytes */
2091
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2092
        *edx = 0;
2093
        break;
2094
    case 6:
2095
        /* Thermal and Power Leaf */
2096
        *eax = 0;
2097
        *ebx = 0;
2098
        *ecx = 0;
2099
        *edx = 0;
2100
        break;
2101
    case 9:
2102
        /* Direct Cache Access Information Leaf */
2103
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2104
        *ebx = 0;
2105
        *ecx = 0;
2106
        *edx = 0;
2107
        break;
2108
    case 0xA:
2109
        /* Architectural Performance Monitoring Leaf */
2110
        *eax = 0;
2111
        *ebx = 0;
2112
        *ecx = 0;
2113
        *edx = 0;
2114
        break;
2115
    case 0x80000000:
2116
        *eax = env->cpuid_xlevel;
2117
        *ebx = env->cpuid_vendor1;
2118
        *edx = env->cpuid_vendor2;
2119
        *ecx = env->cpuid_vendor3;
2120
        break;
2121
    case 0x80000001:
2122
        *eax = env->cpuid_version;
2123
        *ebx = 0;
2124
        *ecx = env->cpuid_ext3_features;
2125
        *edx = env->cpuid_ext2_features;
2126

    
2127
        /* The Linux kernel checks for the CMPLegacy bit and
2128
         * discards multiple thread information if it is set.
2129
         * So dont set it here for Intel to make Linux guests happy.
2130
         */
2131
        if (env->nr_cores * env->nr_threads > 1) {
2132
            uint32_t tebx, tecx, tedx;
2133
            get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2134
            if (tebx != CPUID_VENDOR_INTEL_1 ||
2135
                tedx != CPUID_VENDOR_INTEL_2 ||
2136
                tecx != CPUID_VENDOR_INTEL_3) {
2137
                *ecx |= 1 << 1;    /* CmpLegacy bit */
2138
            }
2139
        }
2140

    
2141
        if (kvm_enabled()) {
2142
            /* Nested SVM not yet supported in upstream QEMU */
2143
            *ecx &= ~CPUID_EXT3_SVM;
2144
        }
2145
        break;
2146
    case 0x80000002:
2147
    case 0x80000003:
2148
    case 0x80000004:
2149
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2150
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2151
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2152
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2153
        break;
2154
    case 0x80000005:
2155
        /* cache info (L1 cache) */
2156
        *eax = 0x01ff01ff;
2157
        *ebx = 0x01ff01ff;
2158
        *ecx = 0x40020140;
2159
        *edx = 0x40020140;
2160
        break;
2161
    case 0x80000006:
2162
        /* cache info (L2 cache) */
2163
        *eax = 0;
2164
        *ebx = 0x42004200;
2165
        *ecx = 0x02008140;
2166
        *edx = 0;
2167
        break;
2168
    case 0x80000008:
2169
        /* virtual & phys address size in low 2 bytes. */
2170
/* XXX: This value must match the one used in the MMU code. */ 
2171
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2172
            /* 64 bit processor */
2173
/* XXX: The physical address space is limited to 42 bits in exec.c. */
2174
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
2175
        } else {
2176
            if (env->cpuid_features & CPUID_PSE36)
2177
                *eax = 0x00000024; /* 36 bits physical */
2178
            else
2179
                *eax = 0x00000020; /* 32 bits physical */
2180
        }
2181
        *ebx = 0;
2182
        *ecx = 0;
2183
        *edx = 0;
2184
        if (env->nr_cores * env->nr_threads > 1) {
2185
            *ecx |= (env->nr_cores * env->nr_threads) - 1;
2186
        }
2187
        break;
2188
    case 0x8000000A:
2189
        *eax = 0x00000001; /* SVM Revision */
2190
        *ebx = 0x00000010; /* nr of ASIDs */
2191
        *ecx = 0;
2192
        *edx = 0; /* optional features */
2193
        break;
2194
    default:
2195
        /* reserved values: zero */
2196
        *eax = 0;
2197
        *ebx = 0;
2198
        *ecx = 0;
2199
        *edx = 0;
2200
        break;
2201
    }
2202
}
2203

    
2204

    
2205
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
2206
                            target_ulong *base, unsigned int *limit,
2207
                            unsigned int *flags)
2208
{
2209
    SegmentCache *dt;
2210
    target_ulong ptr;
2211
    uint32_t e1, e2;
2212
    int index;
2213

    
2214
    if (selector & 0x4)
2215
        dt = &env->ldt;
2216
    else
2217
        dt = &env->gdt;
2218
    index = selector & ~7;
2219
    ptr = dt->base + index;
2220
    if ((index + 7) > dt->limit
2221
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
2222
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
2223
        return 0;
2224

    
2225
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
2226
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
2227
    if (e2 & DESC_G_MASK)
2228
        *limit = (*limit << 12) | 0xfff;
2229
    *flags = e2;
2230

    
2231
    return 1;
2232
}
2233

    
2234
CPUX86State *cpu_x86_init(const char *cpu_model)
2235
{
2236
    CPUX86State *env;
2237
    static int inited;
2238

    
2239
    env = qemu_mallocz(sizeof(CPUX86State));
2240
    cpu_exec_init(env);
2241
    env->cpu_model_str = cpu_model;
2242

    
2243
    /* init various static tables */
2244
    if (!inited) {
2245
        inited = 1;
2246
        optimize_flags_init();
2247
#ifndef CONFIG_USER_ONLY
2248
        prev_debug_excp_handler =
2249
            cpu_set_debug_excp_handler(breakpoint_handler);
2250
#endif
2251
    }
2252
    if (cpu_x86_register(env, cpu_model) < 0) {
2253
        cpu_x86_close(env);
2254
        return NULL;
2255
    }
2256
    mce_init(env);
2257

    
2258
    qemu_init_vcpu(env);
2259

    
2260
    return env;
2261
}
2262

    
2263
#if !defined(CONFIG_USER_ONLY)
2264
void do_cpu_init(CPUState *env)
2265
{
2266
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
2267
    cpu_reset(env);
2268
    env->interrupt_request = sipi;
2269
    apic_init_reset(env);
2270
}
2271

    
2272
void do_cpu_sipi(CPUState *env)
2273
{
2274
    apic_sipi(env);
2275
}
2276
#else
2277
void do_cpu_init(CPUState *env)
2278
{
2279
}
2280
void do_cpu_sipi(CPUState *env)
2281
{
2282
}
2283
#endif