Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 640f42e4

History | View | Annotate | Download (55.7 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "qemu-common.h"
31
#include "kvm.h"
32

    
33
//#define DEBUG_MMU
34

    
35
static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
36
                                    uint32_t *ext_features, 
37
                                    uint32_t *ext2_features, 
38
                                    uint32_t *ext3_features)
39
{
40
    int i;
41
    /* feature flags taken from "Intel Processor Identification and the CPUID
42
     * Instruction" and AMD's "CPUID Specification". In cases of disagreement 
43
     * about feature names, the Linux name is used. */
44
    static const char *feature_name[] = {
45
        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
46
        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
47
        "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
48
        "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
49
    };
50
    static const char *ext_feature_name[] = {
51
       "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52
       "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
53
       NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
54
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
55
    };
56
    static const char *ext2_feature_name[] = {
57
       "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
58
       "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
59
       "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
60
       "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
61
    };
62
    static const char *ext3_feature_name[] = {
63
       "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
64
       "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
65
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
66
       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
67
    };
68

    
69
    for ( i = 0 ; i < 32 ; i++ ) 
70
        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71
            *features |= 1 << i;
72
            return;
73
        }
74
    for ( i = 0 ; i < 32 ; i++ ) 
75
        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76
            *ext_features |= 1 << i;
77
            return;
78
        }
79
    for ( i = 0 ; i < 32 ; i++ ) 
80
        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81
            *ext2_features |= 1 << i;
82
            return;
83
        }
84
    for ( i = 0 ; i < 32 ; i++ ) 
85
        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86
            *ext3_features |= 1 << i;
87
            return;
88
        }
89
    fprintf(stderr, "CPU feature %s not found\n", flagname);
90
}
91

    
92
typedef struct x86_def_t {
93
    const char *name;
94
    uint32_t level;
95
    uint32_t vendor1, vendor2, vendor3;
96
    int family;
97
    int model;
98
    int stepping;
99
    uint32_t features, ext_features, ext2_features, ext3_features;
100
    uint32_t xlevel;
101
    char model_id[48];
102
} x86_def_t;
103

    
104
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
105
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
106
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
107
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
108
          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
109
          CPUID_PSE36 | CPUID_FXSR)
110
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
111
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
112
          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
113
          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
114
          CPUID_PAE | CPUID_SEP | CPUID_APIC)
115
static x86_def_t x86_defs[] = {
116
#ifdef TARGET_X86_64
117
    {
118
        .name = "qemu64",
119
        .level = 2,
120
        .vendor1 = CPUID_VENDOR_AMD_1,
121
        .vendor2 = CPUID_VENDOR_AMD_2,
122
        .vendor3 = CPUID_VENDOR_AMD_3,
123
        .family = 6,
124
        .model = 2,
125
        .stepping = 3,
126
        .features = PPRO_FEATURES | 
127
        /* these features are needed for Win64 and aren't fully implemented */
128
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
129
        /* this feature is needed for Solaris and isn't fully implemented */
130
            CPUID_PSE36,
131
        .ext_features = CPUID_EXT_SSE3,
132
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
133
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
134
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
135
        .ext3_features = CPUID_EXT3_SVM,
136
        .xlevel = 0x8000000A,
137
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
138
    },
139
    {
140
        .name = "phenom",
141
        .level = 5,
142
        .vendor1 = CPUID_VENDOR_AMD_1,
143
        .vendor2 = CPUID_VENDOR_AMD_2,
144
        .vendor3 = CPUID_VENDOR_AMD_3,
145
        .family = 16,
146
        .model = 2,
147
        .stepping = 3,
148
        /* Missing: CPUID_VME, CPUID_HT */
149
        .features = PPRO_FEATURES | 
150
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
151
            CPUID_PSE36,
152
        /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
153
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
154
        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
155
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
156
            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
157
            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
158
            CPUID_EXT2_FFXSR,
159
        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
160
                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
161
                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
162
                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
163
        .ext3_features = CPUID_EXT3_SVM,
164
        .xlevel = 0x8000001A,
165
        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
166
    },
167
    {
168
        .name = "core2duo",
169
        .level = 10,
170
        .family = 6,
171
        .model = 15,
172
        .stepping = 11,
173
        /* The original CPU also implements these features:
174
               CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
175
               CPUID_TM, CPUID_PBE */
176
        .features = PPRO_FEATURES |
177
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
178
            CPUID_PSE36,
179
        /* The original CPU also implements these ext features:
180
               CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
181
               CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
182
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
183
        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
184
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
185
        .xlevel = 0x80000008,
186
        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
187
    },
188
#endif
189
    {
190
        .name = "qemu32",
191
        .level = 2,
192
        .family = 6,
193
        .model = 3,
194
        .stepping = 3,
195
        .features = PPRO_FEATURES,
196
        .ext_features = CPUID_EXT_SSE3,
197
        .xlevel = 0,
198
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
199
    },
200
    {
201
        .name = "coreduo",
202
        .level = 10,
203
        .family = 6,
204
        .model = 14,
205
        .stepping = 8,
206
        /* The original CPU also implements these features:
207
               CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
208
               CPUID_TM, CPUID_PBE */
209
        .features = PPRO_FEATURES | CPUID_VME |
210
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
211
        /* The original CPU also implements these ext features:
212
               CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
213
               CPUID_EXT_PDCM */
214
        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
215
        .ext2_features = CPUID_EXT2_NX,
216
        .xlevel = 0x80000008,
217
        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
218
    },
219
    {
220
        .name = "486",
221
        .level = 0,
222
        .family = 4,
223
        .model = 0,
224
        .stepping = 0,
225
        .features = I486_FEATURES,
226
        .xlevel = 0,
227
    },
228
    {
229
        .name = "pentium",
230
        .level = 1,
231
        .family = 5,
232
        .model = 4,
233
        .stepping = 3,
234
        .features = PENTIUM_FEATURES,
235
        .xlevel = 0,
236
    },
237
    {
238
        .name = "pentium2",
239
        .level = 2,
240
        .family = 6,
241
        .model = 5,
242
        .stepping = 2,
243
        .features = PENTIUM2_FEATURES,
244
        .xlevel = 0,
245
    },
246
    {
247
        .name = "pentium3",
248
        .level = 2,
249
        .family = 6,
250
        .model = 7,
251
        .stepping = 3,
252
        .features = PENTIUM3_FEATURES,
253
        .xlevel = 0,
254
    },
255
    {
256
        .name = "athlon",
257
        .level = 2,
258
        .vendor1 = 0x68747541, /* "Auth" */
259
        .vendor2 = 0x69746e65, /* "enti" */
260
        .vendor3 = 0x444d4163, /* "cAMD" */
261
        .family = 6,
262
        .model = 2,
263
        .stepping = 3,
264
        .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
265
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
266
        .xlevel = 0x80000008,
267
        /* XXX: put another string ? */
268
        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
269
    },
270
    {
271
        .name = "n270",
272
        /* original is on level 10 */
273
        .level = 5,
274
        .family = 6,
275
        .model = 28,
276
        .stepping = 2,
277
        .features = PPRO_FEATURES |
278
            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
279
            /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
280
             * CPUID_HT | CPUID_TM | CPUID_PBE */
281
            /* Some CPUs got no CPUID_SEP */
282
        .ext_features = CPUID_EXT_MONITOR |
283
            CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
284
            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
285
             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
286
        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
287
        /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
288
        .xlevel = 0x8000000A,
289
        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
290
    },
291
};
292

    
293
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
294
{
295
    unsigned int i;
296
    x86_def_t *def;
297

    
298
    char *s = strdup(cpu_model);
299
    char *featurestr, *name = strtok(s, ",");
300
    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
301
    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
302
    int family = -1, model = -1, stepping = -1;
303

    
304
    def = NULL;
305
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
306
        if (strcmp(name, x86_defs[i].name) == 0) {
307
            def = &x86_defs[i];
308
            break;
309
        }
310
    }
311
    if (!def)
312
        goto error;
313
    memcpy(x86_cpu_def, def, sizeof(*def));
314

    
315
    featurestr = strtok(NULL, ",");
316

    
317
    while (featurestr) {
318
        char *val;
319
        if (featurestr[0] == '+') {
320
            add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
321
        } else if (featurestr[0] == '-') {
322
            add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
323
        } else if ((val = strchr(featurestr, '='))) {
324
            *val = 0; val++;
325
            if (!strcmp(featurestr, "family")) {
326
                char *err;
327
                family = strtol(val, &err, 10);
328
                if (!*val || *err || family < 0) {
329
                    fprintf(stderr, "bad numerical value %s\n", val);
330
                    goto error;
331
                }
332
                x86_cpu_def->family = family;
333
            } else if (!strcmp(featurestr, "model")) {
334
                char *err;
335
                model = strtol(val, &err, 10);
336
                if (!*val || *err || model < 0 || model > 0xff) {
337
                    fprintf(stderr, "bad numerical value %s\n", val);
338
                    goto error;
339
                }
340
                x86_cpu_def->model = model;
341
            } else if (!strcmp(featurestr, "stepping")) {
342
                char *err;
343
                stepping = strtol(val, &err, 10);
344
                if (!*val || *err || stepping < 0 || stepping > 0xf) {
345
                    fprintf(stderr, "bad numerical value %s\n", val);
346
                    goto error;
347
                }
348
                x86_cpu_def->stepping = stepping;
349
            } else if (!strcmp(featurestr, "vendor")) {
350
                if (strlen(val) != 12) {
351
                    fprintf(stderr, "vendor string must be 12 chars long\n");
352
                    goto error;
353
                }
354
                x86_cpu_def->vendor1 = 0;
355
                x86_cpu_def->vendor2 = 0;
356
                x86_cpu_def->vendor3 = 0;
357
                for(i = 0; i < 4; i++) {
358
                    x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
359
                    x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
360
                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
361
                }
362
            } else if (!strcmp(featurestr, "model_id")) {
363
                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
364
                        val);
365
            } else {
366
                fprintf(stderr, "unrecognized feature %s\n", featurestr);
367
                goto error;
368
            }
369
        } else {
370
            fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
371
            goto error;
372
        }
373
        featurestr = strtok(NULL, ",");
374
    }
375
    x86_cpu_def->features |= plus_features;
376
    x86_cpu_def->ext_features |= plus_ext_features;
377
    x86_cpu_def->ext2_features |= plus_ext2_features;
378
    x86_cpu_def->ext3_features |= plus_ext3_features;
379
    x86_cpu_def->features &= ~minus_features;
380
    x86_cpu_def->ext_features &= ~minus_ext_features;
381
    x86_cpu_def->ext2_features &= ~minus_ext2_features;
382
    x86_cpu_def->ext3_features &= ~minus_ext3_features;
383
    free(s);
384
    return 0;
385

    
386
error:
387
    free(s);
388
    return -1;
389
}
390

    
391
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
392
{
393
    unsigned int i;
394

    
395
    for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
396
        (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
397
}
398

    
399
static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
400
{
401
    x86_def_t def1, *def = &def1;
402

    
403
    if (cpu_x86_find_by_name(def, cpu_model) < 0)
404
        return -1;
405
    if (def->vendor1) {
406
        env->cpuid_vendor1 = def->vendor1;
407
        env->cpuid_vendor2 = def->vendor2;
408
        env->cpuid_vendor3 = def->vendor3;
409
    } else {
410
        env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
411
        env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
412
        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
413
    }
414
    env->cpuid_level = def->level;
415
    if (def->family > 0x0f)
416
        env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
417
    else
418
        env->cpuid_version = def->family << 8;
419
    env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
420
    env->cpuid_version |= def->stepping;
421
    env->cpuid_features = def->features;
422
    env->pat = 0x0007040600070406ULL;
423
    env->cpuid_ext_features = def->ext_features;
424
    env->cpuid_ext2_features = def->ext2_features;
425
    env->cpuid_xlevel = def->xlevel;
426
    env->cpuid_ext3_features = def->ext3_features;
427
    {
428
        const char *model_id = def->model_id;
429
        int c, len, i;
430
        if (!model_id)
431
            model_id = "";
432
        len = strlen(model_id);
433
        for(i = 0; i < 48; i++) {
434
            if (i >= len)
435
                c = '\0';
436
            else
437
                c = (uint8_t)model_id[i];
438
            env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
439
        }
440
    }
441
    return 0;
442
}
443

    
444
/* NOTE: must be called outside the CPU execute loop */
445
void cpu_reset(CPUX86State *env)
446
{
447
    int i;
448

    
449
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
450
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
451
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
452
    }
453

    
454
    memset(env, 0, offsetof(CPUX86State, breakpoints));
455

    
456
    tlb_flush(env, 1);
457

    
458
    env->old_exception = -1;
459

    
460
    /* init to reset state */
461

    
462
#ifdef CONFIG_SOFTMMU
463
    env->hflags |= HF_SOFTMMU_MASK;
464
#endif
465
    env->hflags2 |= HF2_GIF_MASK;
466

    
467
    cpu_x86_update_cr0(env, 0x60000010);
468
    env->a20_mask = ~0x0;
469
    env->smbase = 0x30000;
470

    
471
    env->idt.limit = 0xffff;
472
    env->gdt.limit = 0xffff;
473
    env->ldt.limit = 0xffff;
474
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
475
    env->tr.limit = 0xffff;
476
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
477

    
478
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
479
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
480
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
481
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
482
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
483
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
484
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
485
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
486
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
487
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
488
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
489
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
490

    
491
    env->eip = 0xfff0;
492
    env->regs[R_EDX] = env->cpuid_version;
493

    
494
    env->eflags = 0x2;
495

    
496
    /* FPU init */
497
    for(i = 0;i < 8; i++)
498
        env->fptags[i] = 1;
499
    env->fpuc = 0x37f;
500

    
501
    env->mxcsr = 0x1f80;
502

    
503
    memset(env->dr, 0, sizeof(env->dr));
504
    env->dr[6] = DR6_FIXED_1;
505
    env->dr[7] = DR7_FIXED_1;
506
    cpu_breakpoint_remove_all(env, BP_CPU);
507
    cpu_watchpoint_remove_all(env, BP_CPU);
508
}
509

    
510
void cpu_x86_close(CPUX86State *env)
511
{
512
    qemu_free(env);
513
}
514

    
515
/***********************************************************/
516
/* x86 debug */
517

    
518
static const char *cc_op_str[] = {
519
    "DYNAMIC",
520
    "EFLAGS",
521

    
522
    "MULB",
523
    "MULW",
524
    "MULL",
525
    "MULQ",
526

    
527
    "ADDB",
528
    "ADDW",
529
    "ADDL",
530
    "ADDQ",
531

    
532
    "ADCB",
533
    "ADCW",
534
    "ADCL",
535
    "ADCQ",
536

    
537
    "SUBB",
538
    "SUBW",
539
    "SUBL",
540
    "SUBQ",
541

    
542
    "SBBB",
543
    "SBBW",
544
    "SBBL",
545
    "SBBQ",
546

    
547
    "LOGICB",
548
    "LOGICW",
549
    "LOGICL",
550
    "LOGICQ",
551

    
552
    "INCB",
553
    "INCW",
554
    "INCL",
555
    "INCQ",
556

    
557
    "DECB",
558
    "DECW",
559
    "DECL",
560
    "DECQ",
561

    
562
    "SHLB",
563
    "SHLW",
564
    "SHLL",
565
    "SHLQ",
566

    
567
    "SARB",
568
    "SARW",
569
    "SARL",
570
    "SARQ",
571
};
572

    
573
static void
574
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
575
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
576
                       const char *name, struct SegmentCache *sc)
577
{
578
#ifdef TARGET_X86_64
579
    if (env->hflags & HF_CS64_MASK) {
580
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
581
                    sc->selector, sc->base, sc->limit, sc->flags);
582
    } else
583
#endif
584
    {
585
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
586
                    (uint32_t)sc->base, sc->limit, sc->flags);
587
    }
588

    
589
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
590
        goto done;
591

    
592
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
593
    if (sc->flags & DESC_S_MASK) {
594
        if (sc->flags & DESC_CS_MASK) {
595
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
596
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
597
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
598
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
599
        } else {
600
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
601
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
602
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
603
        }
604
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
605
    } else {
606
        static const char *sys_type_name[2][16] = {
607
            { /* 32 bit mode */
608
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
609
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
610
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
611
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
612
            },
613
            { /* 64 bit mode */
614
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
615
                "Reserved", "Reserved", "Reserved", "Reserved",
616
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
617
                "Reserved", "IntGate64", "TrapGate64"
618
            }
619
        };
620
        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
621
                                    [(sc->flags & DESC_TYPE_MASK)
622
                                     >> DESC_TYPE_SHIFT]);
623
    }
624
done:
625
    cpu_fprintf(f, "\n");
626
}
627

    
628
void cpu_dump_state(CPUState *env, FILE *f,
629
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
630
                    int flags)
631
{
632
    int eflags, i, nb;
633
    char cc_op_name[32];
634
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
635

    
636
    if (kvm_enabled())
637
        kvm_arch_get_registers(env);
638

    
639
    eflags = env->eflags;
640
#ifdef TARGET_X86_64
641
    if (env->hflags & HF_CS64_MASK) {
642
        cpu_fprintf(f,
643
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
644
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
645
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
646
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
647
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
648
                    env->regs[R_EAX],
649
                    env->regs[R_EBX],
650
                    env->regs[R_ECX],
651
                    env->regs[R_EDX],
652
                    env->regs[R_ESI],
653
                    env->regs[R_EDI],
654
                    env->regs[R_EBP],
655
                    env->regs[R_ESP],
656
                    env->regs[8],
657
                    env->regs[9],
658
                    env->regs[10],
659
                    env->regs[11],
660
                    env->regs[12],
661
                    env->regs[13],
662
                    env->regs[14],
663
                    env->regs[15],
664
                    env->eip, eflags,
665
                    eflags & DF_MASK ? 'D' : '-',
666
                    eflags & CC_O ? 'O' : '-',
667
                    eflags & CC_S ? 'S' : '-',
668
                    eflags & CC_Z ? 'Z' : '-',
669
                    eflags & CC_A ? 'A' : '-',
670
                    eflags & CC_P ? 'P' : '-',
671
                    eflags & CC_C ? 'C' : '-',
672
                    env->hflags & HF_CPL_MASK,
673
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
674
                    (int)(env->a20_mask >> 20) & 1,
675
                    (env->hflags >> HF_SMM_SHIFT) & 1,
676
                    env->halted);
677
    } else
678
#endif
679
    {
680
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
681
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
682
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
683
                    (uint32_t)env->regs[R_EAX],
684
                    (uint32_t)env->regs[R_EBX],
685
                    (uint32_t)env->regs[R_ECX],
686
                    (uint32_t)env->regs[R_EDX],
687
                    (uint32_t)env->regs[R_ESI],
688
                    (uint32_t)env->regs[R_EDI],
689
                    (uint32_t)env->regs[R_EBP],
690
                    (uint32_t)env->regs[R_ESP],
691
                    (uint32_t)env->eip, eflags,
692
                    eflags & DF_MASK ? 'D' : '-',
693
                    eflags & CC_O ? 'O' : '-',
694
                    eflags & CC_S ? 'S' : '-',
695
                    eflags & CC_Z ? 'Z' : '-',
696
                    eflags & CC_A ? 'A' : '-',
697
                    eflags & CC_P ? 'P' : '-',
698
                    eflags & CC_C ? 'C' : '-',
699
                    env->hflags & HF_CPL_MASK,
700
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
701
                    (int)(env->a20_mask >> 20) & 1,
702
                    (env->hflags >> HF_SMM_SHIFT) & 1,
703
                    env->halted);
704
    }
705

    
706
    for(i = 0; i < 6; i++) {
707
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
708
                               &env->segs[i]);
709
    }
710
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
711
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
712

    
713
#ifdef TARGET_X86_64
714
    if (env->hflags & HF_LMA_MASK) {
715
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
716
                    env->gdt.base, env->gdt.limit);
717
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
718
                    env->idt.base, env->idt.limit);
719
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
720
                    (uint32_t)env->cr[0],
721
                    env->cr[2],
722
                    env->cr[3],
723
                    (uint32_t)env->cr[4]);
724
        for(i = 0; i < 4; i++)
725
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
726
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
727
                    env->dr[6], env->dr[7]);
728
    } else
729
#endif
730
    {
731
        cpu_fprintf(f, "GDT=     %08x %08x\n",
732
                    (uint32_t)env->gdt.base, env->gdt.limit);
733
        cpu_fprintf(f, "IDT=     %08x %08x\n",
734
                    (uint32_t)env->idt.base, env->idt.limit);
735
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
736
                    (uint32_t)env->cr[0],
737
                    (uint32_t)env->cr[2],
738
                    (uint32_t)env->cr[3],
739
                    (uint32_t)env->cr[4]);
740
        for(i = 0; i < 4; i++)
741
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
742
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
743
    }
744
    if (flags & X86_DUMP_CCOP) {
745
        if ((unsigned)env->cc_op < CC_OP_NB)
746
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
747
        else
748
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
749
#ifdef TARGET_X86_64
750
        if (env->hflags & HF_CS64_MASK) {
751
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
752
                        env->cc_src, env->cc_dst,
753
                        cc_op_name);
754
        } else
755
#endif
756
        {
757
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
758
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
759
                        cc_op_name);
760
        }
761
    }
762
    if (flags & X86_DUMP_FPU) {
763
        int fptag;
764
        fptag = 0;
765
        for(i = 0; i < 8; i++) {
766
            fptag |= ((!env->fptags[i]) << i);
767
        }
768
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
769
                    env->fpuc,
770
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
771
                    env->fpstt,
772
                    fptag,
773
                    env->mxcsr);
774
        for(i=0;i<8;i++) {
775
#if defined(USE_X86LDOUBLE)
776
            union {
777
                long double d;
778
                struct {
779
                    uint64_t lower;
780
                    uint16_t upper;
781
                } l;
782
            } tmp;
783
            tmp.d = env->fpregs[i].d;
784
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
785
                        i, tmp.l.lower, tmp.l.upper);
786
#else
787
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
788
                        i, env->fpregs[i].mmx.q);
789
#endif
790
            if ((i & 1) == 1)
791
                cpu_fprintf(f, "\n");
792
            else
793
                cpu_fprintf(f, " ");
794
        }
795
        if (env->hflags & HF_CS64_MASK)
796
            nb = 16;
797
        else
798
            nb = 8;
799
        for(i=0;i<nb;i++) {
800
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
801
                        i,
802
                        env->xmm_regs[i].XMM_L(3),
803
                        env->xmm_regs[i].XMM_L(2),
804
                        env->xmm_regs[i].XMM_L(1),
805
                        env->xmm_regs[i].XMM_L(0));
806
            if ((i & 1) == 1)
807
                cpu_fprintf(f, "\n");
808
            else
809
                cpu_fprintf(f, " ");
810
        }
811
    }
812
}
813

    
814
/***********************************************************/
815
/* x86 mmu */
816
/* XXX: add PGE support */
817

    
818
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
819
{
820
    a20_state = (a20_state != 0);
821
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
822
#if defined(DEBUG_MMU)
823
        printf("A20 update: a20=%d\n", a20_state);
824
#endif
825
        /* if the cpu is currently executing code, we must unlink it and
826
           all the potentially executing TB */
827
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
828

    
829
        /* when a20 is changed, all the MMU mappings are invalid, so
830
           we must flush everything */
831
        tlb_flush(env, 1);
832
        env->a20_mask = (~0x100000) | (a20_state << 20);
833
    }
834
}
835

    
836
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
837
{
838
    int pe_state;
839

    
840
#if defined(DEBUG_MMU)
841
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
842
#endif
843
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
844
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
845
        tlb_flush(env, 1);
846
    }
847

    
848
#ifdef TARGET_X86_64
849
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
850
        (env->efer & MSR_EFER_LME)) {
851
        /* enter in long mode */
852
        /* XXX: generate an exception */
853
        if (!(env->cr[4] & CR4_PAE_MASK))
854
            return;
855
        env->efer |= MSR_EFER_LMA;
856
        env->hflags |= HF_LMA_MASK;
857
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
858
               (env->efer & MSR_EFER_LMA)) {
859
        /* exit long mode */
860
        env->efer &= ~MSR_EFER_LMA;
861
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
862
        env->eip &= 0xffffffff;
863
    }
864
#endif
865
    env->cr[0] = new_cr0 | CR0_ET_MASK;
866

    
867
    /* update PE flag in hidden flags */
868
    pe_state = (env->cr[0] & CR0_PE_MASK);
869
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
870
    /* ensure that ADDSEG is always set in real mode */
871
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
872
    /* update FPU flags */
873
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
874
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
875
}
876

    
877
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
878
   the PDPT */
879
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
880
{
881
    env->cr[3] = new_cr3;
882
    if (env->cr[0] & CR0_PG_MASK) {
883
#if defined(DEBUG_MMU)
884
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
885
#endif
886
        tlb_flush(env, 0);
887
    }
888
}
889

    
890
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
891
{
892
#if defined(DEBUG_MMU)
893
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
894
#endif
895
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
896
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
897
        tlb_flush(env, 1);
898
    }
899
    /* SSE handling */
900
    if (!(env->cpuid_features & CPUID_SSE))
901
        new_cr4 &= ~CR4_OSFXSR_MASK;
902
    if (new_cr4 & CR4_OSFXSR_MASK)
903
        env->hflags |= HF_OSFXSR_MASK;
904
    else
905
        env->hflags &= ~HF_OSFXSR_MASK;
906

    
907
    env->cr[4] = new_cr4;
908
}
909

    
910
#if defined(CONFIG_USER_ONLY)
911

    
912
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
913
                             int is_write, int mmu_idx, int is_softmmu)
914
{
915
    /* user mode only emulation */
916
    is_write &= 1;
917
    env->cr[2] = addr;
918
    env->error_code = (is_write << PG_ERROR_W_BIT);
919
    env->error_code |= PG_ERROR_U_MASK;
920
    env->exception_index = EXCP0E_PAGE;
921
    return 1;
922
}
923

    
924
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
925
{
926
    return addr;
927
}
928

    
929
#else
930

    
931
/* XXX: This value should match the one returned by CPUID
932
 * and in exec.c */
933
#if defined(CONFIG_KQEMU)
934
#define PHYS_ADDR_MASK 0xfffff000LL
935
#else
936
# if defined(TARGET_X86_64)
937
# define PHYS_ADDR_MASK 0xfffffff000LL
938
# else
939
# define PHYS_ADDR_MASK 0xffffff000LL
940
# endif
941
#endif
942

    
943
/* return value:
944
   -1 = cannot handle fault
945
   0  = nothing more to do
946
   1  = generate PF fault
947
   2  = soft MMU activation required for this block
948
*/
949
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
950
                             int is_write1, int mmu_idx, int is_softmmu)
951
{
952
    uint64_t ptep, pte;
953
    target_ulong pde_addr, pte_addr;
954
    int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
955
    target_phys_addr_t paddr;
956
    uint32_t page_offset;
957
    target_ulong vaddr, virt_addr;
958

    
959
    is_user = mmu_idx == MMU_USER_IDX;
960
#if defined(DEBUG_MMU)
961
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
962
           addr, is_write1, is_user, env->eip);
963
#endif
964
    is_write = is_write1 & 1;
965

    
966
    if (!(env->cr[0] & CR0_PG_MASK)) {
967
        pte = addr;
968
        virt_addr = addr & TARGET_PAGE_MASK;
969
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
970
        page_size = 4096;
971
        goto do_mapping;
972
    }
973

    
974
    if (env->cr[4] & CR4_PAE_MASK) {
975
        uint64_t pde, pdpe;
976
        target_ulong pdpe_addr;
977

    
978
#ifdef TARGET_X86_64
979
        if (env->hflags & HF_LMA_MASK) {
980
            uint64_t pml4e_addr, pml4e;
981
            int32_t sext;
982

    
983
            /* test virtual address sign extension */
984
            sext = (int64_t)addr >> 47;
985
            if (sext != 0 && sext != -1) {
986
                env->error_code = 0;
987
                env->exception_index = EXCP0D_GPF;
988
                return 1;
989
            }
990

    
991
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
992
                env->a20_mask;
993
            pml4e = ldq_phys(pml4e_addr);
994
            if (!(pml4e & PG_PRESENT_MASK)) {
995
                error_code = 0;
996
                goto do_fault;
997
            }
998
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
999
                error_code = PG_ERROR_RSVD_MASK;
1000
                goto do_fault;
1001
            }
1002
            if (!(pml4e & PG_ACCESSED_MASK)) {
1003
                pml4e |= PG_ACCESSED_MASK;
1004
                stl_phys_notdirty(pml4e_addr, pml4e);
1005
            }
1006
            ptep = pml4e ^ PG_NX_MASK;
1007
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1008
                env->a20_mask;
1009
            pdpe = ldq_phys(pdpe_addr);
1010
            if (!(pdpe & PG_PRESENT_MASK)) {
1011
                error_code = 0;
1012
                goto do_fault;
1013
            }
1014
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1015
                error_code = PG_ERROR_RSVD_MASK;
1016
                goto do_fault;
1017
            }
1018
            ptep &= pdpe ^ PG_NX_MASK;
1019
            if (!(pdpe & PG_ACCESSED_MASK)) {
1020
                pdpe |= PG_ACCESSED_MASK;
1021
                stl_phys_notdirty(pdpe_addr, pdpe);
1022
            }
1023
        } else
1024
#endif
1025
        {
1026
            /* XXX: load them when cr3 is loaded ? */
1027
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1028
                env->a20_mask;
1029
            pdpe = ldq_phys(pdpe_addr);
1030
            if (!(pdpe & PG_PRESENT_MASK)) {
1031
                error_code = 0;
1032
                goto do_fault;
1033
            }
1034
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1035
        }
1036

    
1037
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1038
            env->a20_mask;
1039
        pde = ldq_phys(pde_addr);
1040
        if (!(pde & PG_PRESENT_MASK)) {
1041
            error_code = 0;
1042
            goto do_fault;
1043
        }
1044
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1045
            error_code = PG_ERROR_RSVD_MASK;
1046
            goto do_fault;
1047
        }
1048
        ptep &= pde ^ PG_NX_MASK;
1049
        if (pde & PG_PSE_MASK) {
1050
            /* 2 MB page */
1051
            page_size = 2048 * 1024;
1052
            ptep ^= PG_NX_MASK;
1053
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1054
                goto do_fault_protect;
1055
            if (is_user) {
1056
                if (!(ptep & PG_USER_MASK))
1057
                    goto do_fault_protect;
1058
                if (is_write && !(ptep & PG_RW_MASK))
1059
                    goto do_fault_protect;
1060
            } else {
1061
                if ((env->cr[0] & CR0_WP_MASK) &&
1062
                    is_write && !(ptep & PG_RW_MASK))
1063
                    goto do_fault_protect;
1064
            }
1065
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1066
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1067
                pde |= PG_ACCESSED_MASK;
1068
                if (is_dirty)
1069
                    pde |= PG_DIRTY_MASK;
1070
                stl_phys_notdirty(pde_addr, pde);
1071
            }
1072
            /* align to page_size */
1073
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1074
            virt_addr = addr & ~(page_size - 1);
1075
        } else {
1076
            /* 4 KB page */
1077
            if (!(pde & PG_ACCESSED_MASK)) {
1078
                pde |= PG_ACCESSED_MASK;
1079
                stl_phys_notdirty(pde_addr, pde);
1080
            }
1081
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1082
                env->a20_mask;
1083
            pte = ldq_phys(pte_addr);
1084
            if (!(pte & PG_PRESENT_MASK)) {
1085
                error_code = 0;
1086
                goto do_fault;
1087
            }
1088
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1089
                error_code = PG_ERROR_RSVD_MASK;
1090
                goto do_fault;
1091
            }
1092
            /* combine pde and pte nx, user and rw protections */
1093
            ptep &= pte ^ PG_NX_MASK;
1094
            ptep ^= PG_NX_MASK;
1095
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
1096
                goto do_fault_protect;
1097
            if (is_user) {
1098
                if (!(ptep & PG_USER_MASK))
1099
                    goto do_fault_protect;
1100
                if (is_write && !(ptep & PG_RW_MASK))
1101
                    goto do_fault_protect;
1102
            } else {
1103
                if ((env->cr[0] & CR0_WP_MASK) &&
1104
                    is_write && !(ptep & PG_RW_MASK))
1105
                    goto do_fault_protect;
1106
            }
1107
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1108
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1109
                pte |= PG_ACCESSED_MASK;
1110
                if (is_dirty)
1111
                    pte |= PG_DIRTY_MASK;
1112
                stl_phys_notdirty(pte_addr, pte);
1113
            }
1114
            page_size = 4096;
1115
            virt_addr = addr & ~0xfff;
1116
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
1117
        }
1118
    } else {
1119
        uint32_t pde;
1120

    
1121
        /* page directory entry */
1122
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1123
            env->a20_mask;
1124
        pde = ldl_phys(pde_addr);
1125
        if (!(pde & PG_PRESENT_MASK)) {
1126
            error_code = 0;
1127
            goto do_fault;
1128
        }
1129
        /* if PSE bit is set, then we use a 4MB page */
1130
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1131
            page_size = 4096 * 1024;
1132
            if (is_user) {
1133
                if (!(pde & PG_USER_MASK))
1134
                    goto do_fault_protect;
1135
                if (is_write && !(pde & PG_RW_MASK))
1136
                    goto do_fault_protect;
1137
            } else {
1138
                if ((env->cr[0] & CR0_WP_MASK) &&
1139
                    is_write && !(pde & PG_RW_MASK))
1140
                    goto do_fault_protect;
1141
            }
1142
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1143
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1144
                pde |= PG_ACCESSED_MASK;
1145
                if (is_dirty)
1146
                    pde |= PG_DIRTY_MASK;
1147
                stl_phys_notdirty(pde_addr, pde);
1148
            }
1149

    
1150
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1151
            ptep = pte;
1152
            virt_addr = addr & ~(page_size - 1);
1153
        } else {
1154
            if (!(pde & PG_ACCESSED_MASK)) {
1155
                pde |= PG_ACCESSED_MASK;
1156
                stl_phys_notdirty(pde_addr, pde);
1157
            }
1158

    
1159
            /* page directory entry */
1160
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1161
                env->a20_mask;
1162
            pte = ldl_phys(pte_addr);
1163
            if (!(pte & PG_PRESENT_MASK)) {
1164
                error_code = 0;
1165
                goto do_fault;
1166
            }
1167
            /* combine pde and pte user and rw protections */
1168
            ptep = pte & pde;
1169
            if (is_user) {
1170
                if (!(ptep & PG_USER_MASK))
1171
                    goto do_fault_protect;
1172
                if (is_write && !(ptep & PG_RW_MASK))
1173
                    goto do_fault_protect;
1174
            } else {
1175
                if ((env->cr[0] & CR0_WP_MASK) &&
1176
                    is_write && !(ptep & PG_RW_MASK))
1177
                    goto do_fault_protect;
1178
            }
1179
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1180
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1181
                pte |= PG_ACCESSED_MASK;
1182
                if (is_dirty)
1183
                    pte |= PG_DIRTY_MASK;
1184
                stl_phys_notdirty(pte_addr, pte);
1185
            }
1186
            page_size = 4096;
1187
            virt_addr = addr & ~0xfff;
1188
        }
1189
    }
1190
    /* the page can be put in the TLB */
1191
    prot = PAGE_READ;
1192
    if (!(ptep & PG_NX_MASK))
1193
        prot |= PAGE_EXEC;
1194
    if (pte & PG_DIRTY_MASK) {
1195
        /* only set write access if already dirty... otherwise wait
1196
           for dirty access */
1197
        if (is_user) {
1198
            if (ptep & PG_RW_MASK)
1199
                prot |= PAGE_WRITE;
1200
        } else {
1201
            if (!(env->cr[0] & CR0_WP_MASK) ||
1202
                (ptep & PG_RW_MASK))
1203
                prot |= PAGE_WRITE;
1204
        }
1205
    }
1206
 do_mapping:
1207
    pte = pte & env->a20_mask;
1208

    
1209
    /* Even if 4MB pages, we map only one 4KB page in the cache to
1210
       avoid filling it too fast */
1211
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1212
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1213
    vaddr = virt_addr + page_offset;
1214

    
1215
    ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1216
    return ret;
1217
 do_fault_protect:
1218
    error_code = PG_ERROR_P_MASK;
1219
 do_fault:
1220
    error_code |= (is_write << PG_ERROR_W_BIT);
1221
    if (is_user)
1222
        error_code |= PG_ERROR_U_MASK;
1223
    if (is_write1 == 2 &&
1224
        (env->efer & MSR_EFER_NXE) &&
1225
        (env->cr[4] & CR4_PAE_MASK))
1226
        error_code |= PG_ERROR_I_D_MASK;
1227
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1228
        /* cr2 is not modified in case of exceptions */
1229
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1230
                 addr);
1231
    } else {
1232
        env->cr[2] = addr;
1233
    }
1234
    env->error_code = error_code;
1235
    env->exception_index = EXCP0E_PAGE;
1236
    return 1;
1237
}
1238

    
1239
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1240
{
1241
    target_ulong pde_addr, pte_addr;
1242
    uint64_t pte;
1243
    target_phys_addr_t paddr;
1244
    uint32_t page_offset;
1245
    int page_size;
1246

    
1247
    if (env->cr[4] & CR4_PAE_MASK) {
1248
        target_ulong pdpe_addr;
1249
        uint64_t pde, pdpe;
1250

    
1251
#ifdef TARGET_X86_64
1252
        if (env->hflags & HF_LMA_MASK) {
1253
            uint64_t pml4e_addr, pml4e;
1254
            int32_t sext;
1255

    
1256
            /* test virtual address sign extension */
1257
            sext = (int64_t)addr >> 47;
1258
            if (sext != 0 && sext != -1)
1259
                return -1;
1260

    
1261
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1262
                env->a20_mask;
1263
            pml4e = ldq_phys(pml4e_addr);
1264
            if (!(pml4e & PG_PRESENT_MASK))
1265
                return -1;
1266

    
1267
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1268
                env->a20_mask;
1269
            pdpe = ldq_phys(pdpe_addr);
1270
            if (!(pdpe & PG_PRESENT_MASK))
1271
                return -1;
1272
        } else
1273
#endif
1274
        {
1275
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1276
                env->a20_mask;
1277
            pdpe = ldq_phys(pdpe_addr);
1278
            if (!(pdpe & PG_PRESENT_MASK))
1279
                return -1;
1280
        }
1281

    
1282
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1283
            env->a20_mask;
1284
        pde = ldq_phys(pde_addr);
1285
        if (!(pde & PG_PRESENT_MASK)) {
1286
            return -1;
1287
        }
1288
        if (pde & PG_PSE_MASK) {
1289
            /* 2 MB page */
1290
            page_size = 2048 * 1024;
1291
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1292
        } else {
1293
            /* 4 KB page */
1294
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1295
                env->a20_mask;
1296
            page_size = 4096;
1297
            pte = ldq_phys(pte_addr);
1298
        }
1299
        if (!(pte & PG_PRESENT_MASK))
1300
            return -1;
1301
    } else {
1302
        uint32_t pde;
1303

    
1304
        if (!(env->cr[0] & CR0_PG_MASK)) {
1305
            pte = addr;
1306
            page_size = 4096;
1307
        } else {
1308
            /* page directory entry */
1309
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1310
            pde = ldl_phys(pde_addr);
1311
            if (!(pde & PG_PRESENT_MASK))
1312
                return -1;
1313
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1314
                pte = pde & ~0x003ff000; /* align to 4MB */
1315
                page_size = 4096 * 1024;
1316
            } else {
1317
                /* page directory entry */
1318
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1319
                pte = ldl_phys(pte_addr);
1320
                if (!(pte & PG_PRESENT_MASK))
1321
                    return -1;
1322
                page_size = 4096;
1323
            }
1324
        }
1325
        pte = pte & env->a20_mask;
1326
    }
1327

    
1328
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1329
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1330
    return paddr;
1331
}
1332

    
1333
void hw_breakpoint_insert(CPUState *env, int index)
1334
{
1335
    int type, err = 0;
1336

    
1337
    switch (hw_breakpoint_type(env->dr[7], index)) {
1338
    case 0:
1339
        if (hw_breakpoint_enabled(env->dr[7], index))
1340
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1341
                                        &env->cpu_breakpoint[index]);
1342
        break;
1343
    case 1:
1344
        type = BP_CPU | BP_MEM_WRITE;
1345
        goto insert_wp;
1346
    case 2:
1347
         /* No support for I/O watchpoints yet */
1348
        break;
1349
    case 3:
1350
        type = BP_CPU | BP_MEM_ACCESS;
1351
    insert_wp:
1352
        err = cpu_watchpoint_insert(env, env->dr[index],
1353
                                    hw_breakpoint_len(env->dr[7], index),
1354
                                    type, &env->cpu_watchpoint[index]);
1355
        break;
1356
    }
1357
    if (err)
1358
        env->cpu_breakpoint[index] = NULL;
1359
}
1360

    
1361
void hw_breakpoint_remove(CPUState *env, int index)
1362
{
1363
    if (!env->cpu_breakpoint[index])
1364
        return;
1365
    switch (hw_breakpoint_type(env->dr[7], index)) {
1366
    case 0:
1367
        if (hw_breakpoint_enabled(env->dr[7], index))
1368
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1369
        break;
1370
    case 1:
1371
    case 3:
1372
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1373
        break;
1374
    case 2:
1375
        /* No support for I/O watchpoints yet */
1376
        break;
1377
    }
1378
}
1379

    
1380
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1381
{
1382
    target_ulong dr6;
1383
    int reg, type;
1384
    int hit_enabled = 0;
1385

    
1386
    dr6 = env->dr[6] & ~0xf;
1387
    for (reg = 0; reg < 4; reg++) {
1388
        type = hw_breakpoint_type(env->dr[7], reg);
1389
        if ((type == 0 && env->dr[reg] == env->eip) ||
1390
            ((type & 1) && env->cpu_watchpoint[reg] &&
1391
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1392
            dr6 |= 1 << reg;
1393
            if (hw_breakpoint_enabled(env->dr[7], reg))
1394
                hit_enabled = 1;
1395
        }
1396
    }
1397
    if (hit_enabled || force_dr6_update)
1398
        env->dr[6] = dr6;
1399
    return hit_enabled;
1400
}
1401

    
1402
static CPUDebugExcpHandler *prev_debug_excp_handler;
1403

    
1404
void raise_exception(int exception_index);
1405

    
1406
static void breakpoint_handler(CPUState *env)
1407
{
1408
    CPUBreakpoint *bp;
1409

    
1410
    if (env->watchpoint_hit) {
1411
        if (env->watchpoint_hit->flags & BP_CPU) {
1412
            env->watchpoint_hit = NULL;
1413
            if (check_hw_breakpoints(env, 0))
1414
                raise_exception(EXCP01_DB);
1415
            else
1416
                cpu_resume_from_signal(env, NULL);
1417
        }
1418
    } else {
1419
        TAILQ_FOREACH(bp, &env->breakpoints, entry)
1420
            if (bp->pc == env->eip) {
1421
                if (bp->flags & BP_CPU) {
1422
                    check_hw_breakpoints(env, 1);
1423
                    raise_exception(EXCP01_DB);
1424
                }
1425
                break;
1426
            }
1427
    }
1428
    if (prev_debug_excp_handler)
1429
        prev_debug_excp_handler(env);
1430
}
1431
#endif /* !CONFIG_USER_ONLY */
1432

    
1433
static void host_cpuid(uint32_t function, uint32_t count,
1434
                       uint32_t *eax, uint32_t *ebx,
1435
                       uint32_t *ecx, uint32_t *edx)
1436
{
1437
#if defined(CONFIG_KVM)
1438
    uint32_t vec[4];
1439

    
1440
#ifdef __x86_64__
1441
    asm volatile("cpuid"
1442
                 : "=a"(vec[0]), "=b"(vec[1]),
1443
                   "=c"(vec[2]), "=d"(vec[3])
1444
                 : "0"(function), "c"(count) : "cc");
1445
#else
1446
    asm volatile("pusha \n\t"
1447
                 "cpuid \n\t"
1448
                 "mov %%eax, 0(%2) \n\t"
1449
                 "mov %%ebx, 4(%2) \n\t"
1450
                 "mov %%ecx, 8(%2) \n\t"
1451
                 "mov %%edx, 12(%2) \n\t"
1452
                 "popa"
1453
                 : : "a"(function), "c"(count), "S"(vec)
1454
                 : "memory", "cc");
1455
#endif
1456

    
1457
    if (eax)
1458
        *eax = vec[0];
1459
    if (ebx)
1460
        *ebx = vec[1];
1461
    if (ecx)
1462
        *ecx = vec[2];
1463
    if (edx)
1464
        *edx = vec[3];
1465
#endif
1466
}
1467

    
1468
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1469
                   uint32_t *eax, uint32_t *ebx,
1470
                   uint32_t *ecx, uint32_t *edx)
1471
{
1472
    /* test if maximum index reached */
1473
    if (index & 0x80000000) {
1474
        if (index > env->cpuid_xlevel)
1475
            index = env->cpuid_level;
1476
    } else {
1477
        if (index > env->cpuid_level)
1478
            index = env->cpuid_level;
1479
    }
1480

    
1481
    switch(index) {
1482
    case 0:
1483
        *eax = env->cpuid_level;
1484
        *ebx = env->cpuid_vendor1;
1485
        *edx = env->cpuid_vendor2;
1486
        *ecx = env->cpuid_vendor3;
1487

    
1488
        /* sysenter isn't supported on compatibility mode on AMD.  and syscall
1489
         * isn't supported in compatibility mode on Intel.  so advertise the
1490
         * actuall cpu, and say goodbye to migration between different vendors
1491
         * is you use compatibility mode. */
1492
        if (kvm_enabled())
1493
            host_cpuid(0, 0, NULL, ebx, ecx, edx);
1494
        break;
1495
    case 1:
1496
        *eax = env->cpuid_version;
1497
        *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1498
        *ecx = env->cpuid_ext_features;
1499
        *edx = env->cpuid_features;
1500

    
1501
        /* "Hypervisor present" bit required for Microsoft SVVP */
1502
        if (kvm_enabled())
1503
            *ecx |= (1 << 31);
1504
        break;
1505
    case 2:
1506
        /* cache info: needed for Pentium Pro compatibility */
1507
        *eax = 1;
1508
        *ebx = 0;
1509
        *ecx = 0;
1510
        *edx = 0x2c307d;
1511
        break;
1512
    case 4:
1513
        /* cache info: needed for Core compatibility */
1514
        switch (count) {
1515
            case 0: /* L1 dcache info */
1516
                *eax = 0x0000121;
1517
                *ebx = 0x1c0003f;
1518
                *ecx = 0x000003f;
1519
                *edx = 0x0000001;
1520
                break;
1521
            case 1: /* L1 icache info */
1522
                *eax = 0x0000122;
1523
                *ebx = 0x1c0003f;
1524
                *ecx = 0x000003f;
1525
                *edx = 0x0000001;
1526
                break;
1527
            case 2: /* L2 cache info */
1528
                *eax = 0x0000143;
1529
                *ebx = 0x3c0003f;
1530
                *ecx = 0x0000fff;
1531
                *edx = 0x0000001;
1532
                break;
1533
            default: /* end of info */
1534
                *eax = 0;
1535
                *ebx = 0;
1536
                *ecx = 0;
1537
                *edx = 0;
1538
                break;
1539
        }
1540
        break;
1541
    case 5:
1542
        /* mwait info: needed for Core compatibility */
1543
        *eax = 0; /* Smallest monitor-line size in bytes */
1544
        *ebx = 0; /* Largest monitor-line size in bytes */
1545
        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1546
        *edx = 0;
1547
        break;
1548
    case 6:
1549
        /* Thermal and Power Leaf */
1550
        *eax = 0;
1551
        *ebx = 0;
1552
        *ecx = 0;
1553
        *edx = 0;
1554
        break;
1555
    case 9:
1556
        /* Direct Cache Access Information Leaf */
1557
        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1558
        *ebx = 0;
1559
        *ecx = 0;
1560
        *edx = 0;
1561
        break;
1562
    case 0xA:
1563
        /* Architectural Performance Monitoring Leaf */
1564
        *eax = 0;
1565
        *ebx = 0;
1566
        *ecx = 0;
1567
        *edx = 0;
1568
        break;
1569
    case 0x80000000:
1570
        *eax = env->cpuid_xlevel;
1571
        *ebx = env->cpuid_vendor1;
1572
        *edx = env->cpuid_vendor2;
1573
        *ecx = env->cpuid_vendor3;
1574
        break;
1575
    case 0x80000001:
1576
        *eax = env->cpuid_features;
1577
        *ebx = 0;
1578
        *ecx = env->cpuid_ext3_features;
1579
        *edx = env->cpuid_ext2_features;
1580

    
1581
        if (kvm_enabled()) {
1582
            uint32_t h_eax, h_edx;
1583

    
1584
            host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1585

    
1586
            /* disable CPU features that the host does not support */
1587

    
1588
            /* long mode */
1589
            if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1590
                *edx &= ~0x20000000;
1591
            /* syscall */
1592
            if ((h_edx & 0x00000800) == 0)
1593
                *edx &= ~0x00000800;
1594
            /* nx */
1595
            if ((h_edx & 0x00100000) == 0)
1596
                *edx &= ~0x00100000;
1597

    
1598
            /* disable CPU features that KVM cannot support */
1599

    
1600
            /* svm */
1601
            *ecx &= ~4UL;
1602
            /* 3dnow */
1603
            *edx &= ~0xc0000000;
1604
        }
1605
        break;
1606
    case 0x80000002:
1607
    case 0x80000003:
1608
    case 0x80000004:
1609
        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1610
        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1611
        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1612
        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1613
        break;
1614
    case 0x80000005:
1615
        /* cache info (L1 cache) */
1616
        *eax = 0x01ff01ff;
1617
        *ebx = 0x01ff01ff;
1618
        *ecx = 0x40020140;
1619
        *edx = 0x40020140;
1620
        break;
1621
    case 0x80000006:
1622
        /* cache info (L2 cache) */
1623
        *eax = 0;
1624
        *ebx = 0x42004200;
1625
        *ecx = 0x02008140;
1626
        *edx = 0;
1627
        break;
1628
    case 0x80000008:
1629
        /* virtual & phys address size in low 2 bytes. */
1630
/* XXX: This value must match the one used in the MMU code. */ 
1631
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1632
            /* 64 bit processor */
1633
#if defined(CONFIG_KQEMU)
1634
            *eax = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1635
#else
1636
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1637
            *eax = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1638
#endif
1639
        } else {
1640
#if defined(CONFIG_KQEMU)
1641
            *eax = 0x00000020;        /* 32 bits physical */
1642
#else
1643
            if (env->cpuid_features & CPUID_PSE36)
1644
                *eax = 0x00000024; /* 36 bits physical */
1645
            else
1646
                *eax = 0x00000020; /* 32 bits physical */
1647
#endif
1648
        }
1649
        *ebx = 0;
1650
        *ecx = 0;
1651
        *edx = 0;
1652
        break;
1653
    case 0x8000000A:
1654
        *eax = 0x00000001; /* SVM Revision */
1655
        *ebx = 0x00000010; /* nr of ASIDs */
1656
        *ecx = 0;
1657
        *edx = 0; /* optional features */
1658
        break;
1659
    default:
1660
        /* reserved values: zero */
1661
        *eax = 0;
1662
        *ebx = 0;
1663
        *ecx = 0;
1664
        *edx = 0;
1665
        break;
1666
    }
1667
}
1668

    
1669
CPUX86State *cpu_x86_init(const char *cpu_model)
1670
{
1671
    CPUX86State *env;
1672
    static int inited;
1673

    
1674
    env = qemu_mallocz(sizeof(CPUX86State));
1675
    cpu_exec_init(env);
1676
    env->cpu_model_str = cpu_model;
1677

    
1678
    /* init various static tables */
1679
    if (!inited) {
1680
        inited = 1;
1681
        optimize_flags_init();
1682
#ifndef CONFIG_USER_ONLY
1683
        prev_debug_excp_handler =
1684
            cpu_set_debug_excp_handler(breakpoint_handler);
1685
#endif
1686
    }
1687
    if (cpu_x86_register(env, cpu_model) < 0) {
1688
        cpu_x86_close(env);
1689
        return NULL;
1690
    }
1691
    cpu_reset(env);
1692
#ifdef CONFIG_KQEMU
1693
    kqemu_init(env);
1694
#endif
1695
    if (kvm_enabled())
1696
        kvm_init_vcpu(env);
1697
    return env;
1698
}