root / target-i386 / helper.c @ 6c1f42fe
History | View | Annotate | Download (57.9 kB)
1 |
/*
|
---|---|
2 |
* i386 helpers (without register variable usage)
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
|
19 |
*/
|
20 |
#include <stdarg.h> |
21 |
#include <stdlib.h> |
22 |
#include <stdio.h> |
23 |
#include <string.h> |
24 |
#include <inttypes.h> |
25 |
#include <signal.h> |
26 |
|
27 |
#include "cpu.h" |
28 |
#include "exec-all.h" |
29 |
#include "qemu-common.h" |
30 |
#include "kvm.h" |
31 |
|
32 |
//#define DEBUG_MMU
|
33 |
|
34 |
/* feature flags taken from "Intel Processor Identification and the CPUID
|
35 |
* Instruction" and AMD's "CPUID Specification". In cases of disagreement
|
36 |
* about feature names, the Linux name is used. */
|
37 |
static const char *feature_name[] = { |
38 |
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", |
39 |
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", |
40 |
"pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx", |
41 |
"fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe", |
42 |
}; |
43 |
static const char *ext_feature_name[] = { |
44 |
"pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est", |
45 |
"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, |
46 |
NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", |
47 |
NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor", |
48 |
}; |
49 |
static const char *ext2_feature_name[] = { |
50 |
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", |
51 |
"cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov", |
52 |
"pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx", |
53 |
"fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow", |
54 |
}; |
55 |
static const char *ext3_feature_name[] = { |
56 |
"lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse", |
57 |
"3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL, |
58 |
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
59 |
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
60 |
}; |
61 |
|
62 |
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features, |
63 |
uint32_t *ext_features, |
64 |
uint32_t *ext2_features, |
65 |
uint32_t *ext3_features) |
66 |
{ |
67 |
int i;
|
68 |
int found = 0; |
69 |
|
70 |
for ( i = 0 ; i < 32 ; i++ ) |
71 |
if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
|
72 |
*features |= 1 << i;
|
73 |
found = 1;
|
74 |
} |
75 |
for ( i = 0 ; i < 32 ; i++ ) |
76 |
if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
|
77 |
*ext_features |= 1 << i;
|
78 |
found = 1;
|
79 |
} |
80 |
for ( i = 0 ; i < 32 ; i++ ) |
81 |
if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
|
82 |
*ext2_features |= 1 << i;
|
83 |
found = 1;
|
84 |
} |
85 |
for ( i = 0 ; i < 32 ; i++ ) |
86 |
if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
|
87 |
*ext3_features |= 1 << i;
|
88 |
found = 1;
|
89 |
} |
90 |
if (!found) {
|
91 |
fprintf(stderr, "CPU feature %s not found\n", flagname);
|
92 |
} |
93 |
} |
94 |
|
95 |
typedef struct x86_def_t { |
96 |
const char *name; |
97 |
uint32_t level; |
98 |
uint32_t vendor1, vendor2, vendor3; |
99 |
int family;
|
100 |
int model;
|
101 |
int stepping;
|
102 |
uint32_t features, ext_features, ext2_features, ext3_features; |
103 |
uint32_t xlevel; |
104 |
char model_id[48]; |
105 |
int vendor_override;
|
106 |
} x86_def_t; |
107 |
|
108 |
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
|
109 |
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
|
110 |
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX) |
111 |
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
|
112 |
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ |
113 |
CPUID_PSE36 | CPUID_FXSR) |
114 |
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
|
115 |
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
|
116 |
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ |
117 |
CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ |
118 |
CPUID_PAE | CPUID_SEP | CPUID_APIC) |
119 |
static x86_def_t x86_defs[] = {
|
120 |
#ifdef TARGET_X86_64
|
121 |
{ |
122 |
.name = "qemu64",
|
123 |
.level = 2,
|
124 |
.vendor1 = CPUID_VENDOR_AMD_1, |
125 |
.vendor2 = CPUID_VENDOR_AMD_2, |
126 |
.vendor3 = CPUID_VENDOR_AMD_3, |
127 |
.family = 6,
|
128 |
.model = 2,
|
129 |
.stepping = 3,
|
130 |
.features = PPRO_FEATURES | |
131 |
/* these features are needed for Win64 and aren't fully implemented */
|
132 |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
133 |
/* this feature is needed for Solaris and isn't fully implemented */
|
134 |
CPUID_PSE36, |
135 |
.ext_features = CPUID_EXT_SSE3, |
136 |
.ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
|
137 |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | |
138 |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, |
139 |
.ext3_features = CPUID_EXT3_SVM, |
140 |
.xlevel = 0x8000000A,
|
141 |
.model_id = "QEMU Virtual CPU version " QEMU_VERSION,
|
142 |
}, |
143 |
{ |
144 |
.name = "phenom",
|
145 |
.level = 5,
|
146 |
.vendor1 = CPUID_VENDOR_AMD_1, |
147 |
.vendor2 = CPUID_VENDOR_AMD_2, |
148 |
.vendor3 = CPUID_VENDOR_AMD_3, |
149 |
.family = 16,
|
150 |
.model = 2,
|
151 |
.stepping = 3,
|
152 |
/* Missing: CPUID_VME, CPUID_HT */
|
153 |
.features = PPRO_FEATURES | |
154 |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
155 |
CPUID_PSE36, |
156 |
/* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
|
157 |
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, |
158 |
/* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
|
159 |
.ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
|
160 |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | |
161 |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | |
162 |
CPUID_EXT2_FFXSR, |
163 |
/* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
|
164 |
CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
|
165 |
CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
|
166 |
CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
|
167 |
.ext3_features = CPUID_EXT3_SVM, |
168 |
.xlevel = 0x8000001A,
|
169 |
.model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
|
170 |
}, |
171 |
{ |
172 |
.name = "core2duo",
|
173 |
.level = 10,
|
174 |
.family = 6,
|
175 |
.model = 15,
|
176 |
.stepping = 11,
|
177 |
/* The original CPU also implements these features:
|
178 |
CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
|
179 |
CPUID_TM, CPUID_PBE */
|
180 |
.features = PPRO_FEATURES | |
181 |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
182 |
CPUID_PSE36, |
183 |
/* The original CPU also implements these ext features:
|
184 |
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
|
185 |
CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
|
186 |
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3, |
187 |
.ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, |
188 |
/* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
|
189 |
.xlevel = 0x80000008,
|
190 |
.model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
|
191 |
}, |
192 |
#endif
|
193 |
{ |
194 |
.name = "qemu32",
|
195 |
.level = 2,
|
196 |
.family = 6,
|
197 |
.model = 3,
|
198 |
.stepping = 3,
|
199 |
.features = PPRO_FEATURES, |
200 |
.ext_features = CPUID_EXT_SSE3, |
201 |
.xlevel = 0,
|
202 |
.model_id = "QEMU Virtual CPU version " QEMU_VERSION,
|
203 |
}, |
204 |
{ |
205 |
.name = "coreduo",
|
206 |
.level = 10,
|
207 |
.family = 6,
|
208 |
.model = 14,
|
209 |
.stepping = 8,
|
210 |
/* The original CPU also implements these features:
|
211 |
CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
|
212 |
CPUID_TM, CPUID_PBE */
|
213 |
.features = PPRO_FEATURES | CPUID_VME | |
214 |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA, |
215 |
/* The original CPU also implements these ext features:
|
216 |
CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
|
217 |
CPUID_EXT_PDCM */
|
218 |
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, |
219 |
.ext2_features = CPUID_EXT2_NX, |
220 |
.xlevel = 0x80000008,
|
221 |
.model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
|
222 |
}, |
223 |
{ |
224 |
.name = "486",
|
225 |
.level = 0,
|
226 |
.family = 4,
|
227 |
.model = 0,
|
228 |
.stepping = 0,
|
229 |
.features = I486_FEATURES, |
230 |
.xlevel = 0,
|
231 |
}, |
232 |
{ |
233 |
.name = "pentium",
|
234 |
.level = 1,
|
235 |
.family = 5,
|
236 |
.model = 4,
|
237 |
.stepping = 3,
|
238 |
.features = PENTIUM_FEATURES, |
239 |
.xlevel = 0,
|
240 |
}, |
241 |
{ |
242 |
.name = "pentium2",
|
243 |
.level = 2,
|
244 |
.family = 6,
|
245 |
.model = 5,
|
246 |
.stepping = 2,
|
247 |
.features = PENTIUM2_FEATURES, |
248 |
.xlevel = 0,
|
249 |
}, |
250 |
{ |
251 |
.name = "pentium3",
|
252 |
.level = 2,
|
253 |
.family = 6,
|
254 |
.model = 7,
|
255 |
.stepping = 3,
|
256 |
.features = PENTIUM3_FEATURES, |
257 |
.xlevel = 0,
|
258 |
}, |
259 |
{ |
260 |
.name = "athlon",
|
261 |
.level = 2,
|
262 |
.vendor1 = CPUID_VENDOR_AMD_1, |
263 |
.vendor2 = CPUID_VENDOR_AMD_2, |
264 |
.vendor3 = CPUID_VENDOR_AMD_3, |
265 |
.family = 6,
|
266 |
.model = 2,
|
267 |
.stepping = 3,
|
268 |
.features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA, |
269 |
.ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
|
270 |
.xlevel = 0x80000008,
|
271 |
/* XXX: put another string ? */
|
272 |
.model_id = "QEMU Virtual CPU version " QEMU_VERSION,
|
273 |
}, |
274 |
{ |
275 |
.name = "n270",
|
276 |
/* original is on level 10 */
|
277 |
.level = 5,
|
278 |
.family = 6,
|
279 |
.model = 28,
|
280 |
.stepping = 2,
|
281 |
.features = PPRO_FEATURES | |
282 |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME, |
283 |
/* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
|
284 |
* CPUID_HT | CPUID_TM | CPUID_PBE */
|
285 |
/* Some CPUs got no CPUID_SEP */
|
286 |
.ext_features = CPUID_EXT_MONITOR | |
287 |
CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
|
288 |
/* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
|
289 |
* CPUID_EXT_TM2 | CPUID_EXT_XTPR */
|
290 |
.ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
|
291 |
/* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
|
292 |
.xlevel = 0x8000000A,
|
293 |
.model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
|
294 |
}, |
295 |
}; |
296 |
|
297 |
static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, |
298 |
uint32_t *ebx, uint32_t *ecx, uint32_t *edx); |
299 |
|
300 |
static int cpu_x86_fill_model_id(char *str) |
301 |
{ |
302 |
uint32_t eax, ebx, ecx, edx; |
303 |
int i;
|
304 |
|
305 |
for (i = 0; i < 3; i++) { |
306 |
host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); |
307 |
memcpy(str + i * 16 + 0, &eax, 4); |
308 |
memcpy(str + i * 16 + 4, &ebx, 4); |
309 |
memcpy(str + i * 16 + 8, &ecx, 4); |
310 |
memcpy(str + i * 16 + 12, &edx, 4); |
311 |
} |
312 |
return 0; |
313 |
} |
314 |
|
315 |
static int cpu_x86_fill_host(x86_def_t *x86_cpu_def) |
316 |
{ |
317 |
uint32_t eax, ebx, ecx, edx; |
318 |
|
319 |
x86_cpu_def->name = "host";
|
320 |
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); |
321 |
x86_cpu_def->level = eax; |
322 |
x86_cpu_def->vendor1 = ebx; |
323 |
x86_cpu_def->vendor2 = edx; |
324 |
x86_cpu_def->vendor3 = ecx; |
325 |
|
326 |
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); |
327 |
x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); |
328 |
x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); |
329 |
x86_cpu_def->stepping = eax & 0x0F;
|
330 |
x86_cpu_def->ext_features = ecx; |
331 |
x86_cpu_def->features = edx; |
332 |
|
333 |
host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx); |
334 |
x86_cpu_def->xlevel = eax; |
335 |
|
336 |
host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx); |
337 |
x86_cpu_def->ext2_features = edx; |
338 |
x86_cpu_def->ext3_features = ecx; |
339 |
cpu_x86_fill_model_id(x86_cpu_def->model_id); |
340 |
x86_cpu_def->vendor_override = 0;
|
341 |
|
342 |
return 0; |
343 |
} |
344 |
|
345 |
static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model) |
346 |
{ |
347 |
unsigned int i; |
348 |
x86_def_t *def; |
349 |
|
350 |
char *s = strdup(cpu_model);
|
351 |
char *featurestr, *name = strtok(s, ","); |
352 |
uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0; |
353 |
uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0; |
354 |
int family = -1, model = -1, stepping = -1; |
355 |
|
356 |
def = NULL;
|
357 |
for (i = 0; i < ARRAY_SIZE(x86_defs); i++) { |
358 |
if (strcmp(name, x86_defs[i].name) == 0) { |
359 |
def = &x86_defs[i]; |
360 |
break;
|
361 |
} |
362 |
} |
363 |
if (!def) {
|
364 |
if (strcmp(name, "host") != 0) { |
365 |
goto error;
|
366 |
} |
367 |
cpu_x86_fill_host(x86_cpu_def); |
368 |
} else {
|
369 |
memcpy(x86_cpu_def, def, sizeof(*def));
|
370 |
} |
371 |
|
372 |
add_flagname_to_bitmaps("hypervisor", &plus_features,
|
373 |
&plus_ext_features, &plus_ext2_features, &plus_ext3_features); |
374 |
|
375 |
featurestr = strtok(NULL, ","); |
376 |
|
377 |
while (featurestr) {
|
378 |
char *val;
|
379 |
if (featurestr[0] == '+') { |
380 |
add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
|
381 |
} else if (featurestr[0] == '-') { |
382 |
add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
|
383 |
} else if ((val = strchr(featurestr, '='))) { |
384 |
*val = 0; val++;
|
385 |
if (!strcmp(featurestr, "family")) { |
386 |
char *err;
|
387 |
family = strtol(val, &err, 10);
|
388 |
if (!*val || *err || family < 0) { |
389 |
fprintf(stderr, "bad numerical value %s\n", val);
|
390 |
goto error;
|
391 |
} |
392 |
x86_cpu_def->family = family; |
393 |
} else if (!strcmp(featurestr, "model")) { |
394 |
char *err;
|
395 |
model = strtol(val, &err, 10);
|
396 |
if (!*val || *err || model < 0 || model > 0xff) { |
397 |
fprintf(stderr, "bad numerical value %s\n", val);
|
398 |
goto error;
|
399 |
} |
400 |
x86_cpu_def->model = model; |
401 |
} else if (!strcmp(featurestr, "stepping")) { |
402 |
char *err;
|
403 |
stepping = strtol(val, &err, 10);
|
404 |
if (!*val || *err || stepping < 0 || stepping > 0xf) { |
405 |
fprintf(stderr, "bad numerical value %s\n", val);
|
406 |
goto error;
|
407 |
} |
408 |
x86_cpu_def->stepping = stepping; |
409 |
} else if (!strcmp(featurestr, "vendor")) { |
410 |
if (strlen(val) != 12) { |
411 |
fprintf(stderr, "vendor string must be 12 chars long\n");
|
412 |
goto error;
|
413 |
} |
414 |
x86_cpu_def->vendor1 = 0;
|
415 |
x86_cpu_def->vendor2 = 0;
|
416 |
x86_cpu_def->vendor3 = 0;
|
417 |
for(i = 0; i < 4; i++) { |
418 |
x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
|
419 |
x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i); |
420 |
x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i); |
421 |
} |
422 |
x86_cpu_def->vendor_override = 1;
|
423 |
} else if (!strcmp(featurestr, "model_id")) { |
424 |
pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
|
425 |
val); |
426 |
} else {
|
427 |
fprintf(stderr, "unrecognized feature %s\n", featurestr);
|
428 |
goto error;
|
429 |
} |
430 |
} else {
|
431 |
fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
|
432 |
goto error;
|
433 |
} |
434 |
featurestr = strtok(NULL, ","); |
435 |
} |
436 |
x86_cpu_def->features |= plus_features; |
437 |
x86_cpu_def->ext_features |= plus_ext_features; |
438 |
x86_cpu_def->ext2_features |= plus_ext2_features; |
439 |
x86_cpu_def->ext3_features |= plus_ext3_features; |
440 |
x86_cpu_def->features &= ~minus_features; |
441 |
x86_cpu_def->ext_features &= ~minus_ext_features; |
442 |
x86_cpu_def->ext2_features &= ~minus_ext2_features; |
443 |
x86_cpu_def->ext3_features &= ~minus_ext3_features; |
444 |
free(s); |
445 |
return 0; |
446 |
|
447 |
error:
|
448 |
free(s); |
449 |
return -1; |
450 |
} |
451 |
|
452 |
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) |
453 |
{ |
454 |
unsigned int i; |
455 |
|
456 |
for (i = 0; i < ARRAY_SIZE(x86_defs); i++) |
457 |
(*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
|
458 |
} |
459 |
|
460 |
static int cpu_x86_register (CPUX86State *env, const char *cpu_model) |
461 |
{ |
462 |
x86_def_t def1, *def = &def1; |
463 |
|
464 |
if (cpu_x86_find_by_name(def, cpu_model) < 0) |
465 |
return -1; |
466 |
if (def->vendor1) {
|
467 |
env->cpuid_vendor1 = def->vendor1; |
468 |
env->cpuid_vendor2 = def->vendor2; |
469 |
env->cpuid_vendor3 = def->vendor3; |
470 |
} else {
|
471 |
env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1; |
472 |
env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2; |
473 |
env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3; |
474 |
} |
475 |
env->cpuid_vendor_override = def->vendor_override; |
476 |
env->cpuid_level = def->level; |
477 |
if (def->family > 0x0f) |
478 |
env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20); |
479 |
else
|
480 |
env->cpuid_version = def->family << 8;
|
481 |
env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16); |
482 |
env->cpuid_version |= def->stepping; |
483 |
env->cpuid_features = def->features; |
484 |
env->pat = 0x0007040600070406ULL;
|
485 |
env->cpuid_ext_features = def->ext_features; |
486 |
env->cpuid_ext2_features = def->ext2_features; |
487 |
env->cpuid_xlevel = def->xlevel; |
488 |
env->cpuid_ext3_features = def->ext3_features; |
489 |
{ |
490 |
const char *model_id = def->model_id; |
491 |
int c, len, i;
|
492 |
if (!model_id)
|
493 |
model_id = "";
|
494 |
len = strlen(model_id); |
495 |
for(i = 0; i < 48; i++) { |
496 |
if (i >= len)
|
497 |
c = '\0';
|
498 |
else
|
499 |
c = (uint8_t)model_id[i]; |
500 |
env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); |
501 |
} |
502 |
} |
503 |
return 0; |
504 |
} |
505 |
|
506 |
/* NOTE: must be called outside the CPU execute loop */
|
507 |
void cpu_reset(CPUX86State *env)
|
508 |
{ |
509 |
int i;
|
510 |
|
511 |
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
512 |
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
513 |
log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP); |
514 |
} |
515 |
|
516 |
memset(env, 0, offsetof(CPUX86State, breakpoints));
|
517 |
|
518 |
tlb_flush(env, 1);
|
519 |
|
520 |
env->old_exception = -1;
|
521 |
|
522 |
/* init to reset state */
|
523 |
|
524 |
#ifdef CONFIG_SOFTMMU
|
525 |
env->hflags |= HF_SOFTMMU_MASK; |
526 |
#endif
|
527 |
env->hflags2 |= HF2_GIF_MASK; |
528 |
|
529 |
cpu_x86_update_cr0(env, 0x60000010);
|
530 |
env->a20_mask = ~0x0;
|
531 |
env->smbase = 0x30000;
|
532 |
|
533 |
env->idt.limit = 0xffff;
|
534 |
env->gdt.limit = 0xffff;
|
535 |
env->ldt.limit = 0xffff;
|
536 |
env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
|
537 |
env->tr.limit = 0xffff;
|
538 |
env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
|
539 |
|
540 |
cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, |
541 |
DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | |
542 |
DESC_R_MASK | DESC_A_MASK); |
543 |
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, |
544 |
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
545 |
DESC_A_MASK); |
546 |
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, |
547 |
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
548 |
DESC_A_MASK); |
549 |
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, |
550 |
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
551 |
DESC_A_MASK); |
552 |
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, |
553 |
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
554 |
DESC_A_MASK); |
555 |
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, |
556 |
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
557 |
DESC_A_MASK); |
558 |
|
559 |
env->eip = 0xfff0;
|
560 |
env->regs[R_EDX] = env->cpuid_version; |
561 |
|
562 |
env->eflags = 0x2;
|
563 |
|
564 |
/* FPU init */
|
565 |
for(i = 0;i < 8; i++) |
566 |
env->fptags[i] = 1;
|
567 |
env->fpuc = 0x37f;
|
568 |
|
569 |
env->mxcsr = 0x1f80;
|
570 |
|
571 |
memset(env->dr, 0, sizeof(env->dr)); |
572 |
env->dr[6] = DR6_FIXED_1;
|
573 |
env->dr[7] = DR7_FIXED_1;
|
574 |
cpu_breakpoint_remove_all(env, BP_CPU); |
575 |
cpu_watchpoint_remove_all(env, BP_CPU); |
576 |
} |
577 |
|
578 |
void cpu_x86_close(CPUX86State *env)
|
579 |
{ |
580 |
qemu_free(env); |
581 |
} |
582 |
|
583 |
/***********************************************************/
|
584 |
/* x86 debug */
|
585 |
|
586 |
static const char *cc_op_str[] = { |
587 |
"DYNAMIC",
|
588 |
"EFLAGS",
|
589 |
|
590 |
"MULB",
|
591 |
"MULW",
|
592 |
"MULL",
|
593 |
"MULQ",
|
594 |
|
595 |
"ADDB",
|
596 |
"ADDW",
|
597 |
"ADDL",
|
598 |
"ADDQ",
|
599 |
|
600 |
"ADCB",
|
601 |
"ADCW",
|
602 |
"ADCL",
|
603 |
"ADCQ",
|
604 |
|
605 |
"SUBB",
|
606 |
"SUBW",
|
607 |
"SUBL",
|
608 |
"SUBQ",
|
609 |
|
610 |
"SBBB",
|
611 |
"SBBW",
|
612 |
"SBBL",
|
613 |
"SBBQ",
|
614 |
|
615 |
"LOGICB",
|
616 |
"LOGICW",
|
617 |
"LOGICL",
|
618 |
"LOGICQ",
|
619 |
|
620 |
"INCB",
|
621 |
"INCW",
|
622 |
"INCL",
|
623 |
"INCQ",
|
624 |
|
625 |
"DECB",
|
626 |
"DECW",
|
627 |
"DECL",
|
628 |
"DECQ",
|
629 |
|
630 |
"SHLB",
|
631 |
"SHLW",
|
632 |
"SHLL",
|
633 |
"SHLQ",
|
634 |
|
635 |
"SARB",
|
636 |
"SARW",
|
637 |
"SARL",
|
638 |
"SARQ",
|
639 |
}; |
640 |
|
641 |
static void |
642 |
cpu_x86_dump_seg_cache(CPUState *env, FILE *f, |
643 |
int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
644 |
const char *name, struct SegmentCache *sc) |
645 |
{ |
646 |
#ifdef TARGET_X86_64
|
647 |
if (env->hflags & HF_CS64_MASK) {
|
648 |
cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name, |
649 |
sc->selector, sc->base, sc->limit, sc->flags); |
650 |
} else
|
651 |
#endif
|
652 |
{ |
653 |
cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
|
654 |
(uint32_t)sc->base, sc->limit, sc->flags); |
655 |
} |
656 |
|
657 |
if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
|
658 |
goto done;
|
659 |
|
660 |
cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
|
661 |
if (sc->flags & DESC_S_MASK) {
|
662 |
if (sc->flags & DESC_CS_MASK) {
|
663 |
cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
|
664 |
((sc->flags & DESC_B_MASK) ? "CS32" : "CS16")); |
665 |
cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-', |
666 |
(sc->flags & DESC_R_MASK) ? 'R' : '-'); |
667 |
} else {
|
668 |
cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16"); |
669 |
cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-', |
670 |
(sc->flags & DESC_W_MASK) ? 'W' : '-'); |
671 |
} |
672 |
cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-'); |
673 |
} else {
|
674 |
static const char *sys_type_name[2][16] = { |
675 |
{ /* 32 bit mode */
|
676 |
"Reserved", "TSS16-avl", "LDT", "TSS16-busy", |
677 |
"CallGate16", "TaskGate", "IntGate16", "TrapGate16", |
678 |
"Reserved", "TSS32-avl", "Reserved", "TSS32-busy", |
679 |
"CallGate32", "Reserved", "IntGate32", "TrapGate32" |
680 |
}, |
681 |
{ /* 64 bit mode */
|
682 |
"<hiword>", "Reserved", "LDT", "Reserved", "Reserved", |
683 |
"Reserved", "Reserved", "Reserved", "Reserved", |
684 |
"TSS64-avl", "Reserved", "TSS64-busy", "CallGate64", |
685 |
"Reserved", "IntGate64", "TrapGate64" |
686 |
} |
687 |
}; |
688 |
cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] |
689 |
[(sc->flags & DESC_TYPE_MASK) |
690 |
>> DESC_TYPE_SHIFT]); |
691 |
} |
692 |
done:
|
693 |
cpu_fprintf(f, "\n");
|
694 |
} |
695 |
|
696 |
void cpu_dump_state(CPUState *env, FILE *f,
|
697 |
int (*cpu_fprintf)(FILE *f, const char *fmt, ...), |
698 |
int flags)
|
699 |
{ |
700 |
int eflags, i, nb;
|
701 |
char cc_op_name[32]; |
702 |
static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; |
703 |
|
704 |
if (kvm_enabled())
|
705 |
kvm_arch_get_registers(env); |
706 |
|
707 |
eflags = env->eflags; |
708 |
#ifdef TARGET_X86_64
|
709 |
if (env->hflags & HF_CS64_MASK) {
|
710 |
cpu_fprintf(f, |
711 |
"RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" |
712 |
"RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" |
713 |
"R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" |
714 |
"R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" |
715 |
"RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", |
716 |
env->regs[R_EAX], |
717 |
env->regs[R_EBX], |
718 |
env->regs[R_ECX], |
719 |
env->regs[R_EDX], |
720 |
env->regs[R_ESI], |
721 |
env->regs[R_EDI], |
722 |
env->regs[R_EBP], |
723 |
env->regs[R_ESP], |
724 |
env->regs[8],
|
725 |
env->regs[9],
|
726 |
env->regs[10],
|
727 |
env->regs[11],
|
728 |
env->regs[12],
|
729 |
env->regs[13],
|
730 |
env->regs[14],
|
731 |
env->regs[15],
|
732 |
env->eip, eflags, |
733 |
eflags & DF_MASK ? 'D' : '-', |
734 |
eflags & CC_O ? 'O' : '-', |
735 |
eflags & CC_S ? 'S' : '-', |
736 |
eflags & CC_Z ? 'Z' : '-', |
737 |
eflags & CC_A ? 'A' : '-', |
738 |
eflags & CC_P ? 'P' : '-', |
739 |
eflags & CC_C ? 'C' : '-', |
740 |
env->hflags & HF_CPL_MASK, |
741 |
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
|
742 |
(int)(env->a20_mask >> 20) & 1, |
743 |
(env->hflags >> HF_SMM_SHIFT) & 1,
|
744 |
env->halted); |
745 |
} else
|
746 |
#endif
|
747 |
{ |
748 |
cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
|
749 |
"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
|
750 |
"EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
|
751 |
(uint32_t)env->regs[R_EAX], |
752 |
(uint32_t)env->regs[R_EBX], |
753 |
(uint32_t)env->regs[R_ECX], |
754 |
(uint32_t)env->regs[R_EDX], |
755 |
(uint32_t)env->regs[R_ESI], |
756 |
(uint32_t)env->regs[R_EDI], |
757 |
(uint32_t)env->regs[R_EBP], |
758 |
(uint32_t)env->regs[R_ESP], |
759 |
(uint32_t)env->eip, eflags, |
760 |
eflags & DF_MASK ? 'D' : '-', |
761 |
eflags & CC_O ? 'O' : '-', |
762 |
eflags & CC_S ? 'S' : '-', |
763 |
eflags & CC_Z ? 'Z' : '-', |
764 |
eflags & CC_A ? 'A' : '-', |
765 |
eflags & CC_P ? 'P' : '-', |
766 |
eflags & CC_C ? 'C' : '-', |
767 |
env->hflags & HF_CPL_MASK, |
768 |
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
|
769 |
(int)(env->a20_mask >> 20) & 1, |
770 |
(env->hflags >> HF_SMM_SHIFT) & 1,
|
771 |
env->halted); |
772 |
} |
773 |
|
774 |
for(i = 0; i < 6; i++) { |
775 |
cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i], |
776 |
&env->segs[i]); |
777 |
} |
778 |
cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
|
779 |
cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
|
780 |
|
781 |
#ifdef TARGET_X86_64
|
782 |
if (env->hflags & HF_LMA_MASK) {
|
783 |
cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", |
784 |
env->gdt.base, env->gdt.limit); |
785 |
cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", |
786 |
env->idt.base, env->idt.limit); |
787 |
cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", |
788 |
(uint32_t)env->cr[0],
|
789 |
env->cr[2],
|
790 |
env->cr[3],
|
791 |
(uint32_t)env->cr[4]);
|
792 |
for(i = 0; i < 4; i++) |
793 |
cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]); |
794 |
cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n", |
795 |
env->dr[6], env->dr[7]); |
796 |
} else
|
797 |
#endif
|
798 |
{ |
799 |
cpu_fprintf(f, "GDT= %08x %08x\n",
|
800 |
(uint32_t)env->gdt.base, env->gdt.limit); |
801 |
cpu_fprintf(f, "IDT= %08x %08x\n",
|
802 |
(uint32_t)env->idt.base, env->idt.limit); |
803 |
cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
|
804 |
(uint32_t)env->cr[0],
|
805 |
(uint32_t)env->cr[2],
|
806 |
(uint32_t)env->cr[3],
|
807 |
(uint32_t)env->cr[4]);
|
808 |
for(i = 0; i < 4; i++) |
809 |
cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
|
810 |
cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]); |
811 |
} |
812 |
if (flags & X86_DUMP_CCOP) {
|
813 |
if ((unsigned)env->cc_op < CC_OP_NB) |
814 |
snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); |
815 |
else
|
816 |
snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); |
817 |
#ifdef TARGET_X86_64
|
818 |
if (env->hflags & HF_CS64_MASK) {
|
819 |
cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", |
820 |
env->cc_src, env->cc_dst, |
821 |
cc_op_name); |
822 |
} else
|
823 |
#endif
|
824 |
{ |
825 |
cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
|
826 |
(uint32_t)env->cc_src, (uint32_t)env->cc_dst, |
827 |
cc_op_name); |
828 |
} |
829 |
} |
830 |
if (flags & X86_DUMP_FPU) {
|
831 |
int fptag;
|
832 |
fptag = 0;
|
833 |
for(i = 0; i < 8; i++) { |
834 |
fptag |= ((!env->fptags[i]) << i); |
835 |
} |
836 |
cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
|
837 |
env->fpuc, |
838 |
(env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, |
839 |
env->fpstt, |
840 |
fptag, |
841 |
env->mxcsr); |
842 |
for(i=0;i<8;i++) { |
843 |
#if defined(USE_X86LDOUBLE)
|
844 |
union {
|
845 |
long double d; |
846 |
struct {
|
847 |
uint64_t lower; |
848 |
uint16_t upper; |
849 |
} l; |
850 |
} tmp; |
851 |
tmp.d = env->fpregs[i].d; |
852 |
cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", |
853 |
i, tmp.l.lower, tmp.l.upper); |
854 |
#else
|
855 |
cpu_fprintf(f, "FPR%d=%016" PRIx64,
|
856 |
i, env->fpregs[i].mmx.q); |
857 |
#endif
|
858 |
if ((i & 1) == 1) |
859 |
cpu_fprintf(f, "\n");
|
860 |
else
|
861 |
cpu_fprintf(f, " ");
|
862 |
} |
863 |
if (env->hflags & HF_CS64_MASK)
|
864 |
nb = 16;
|
865 |
else
|
866 |
nb = 8;
|
867 |
for(i=0;i<nb;i++) { |
868 |
cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
|
869 |
i, |
870 |
env->xmm_regs[i].XMM_L(3),
|
871 |
env->xmm_regs[i].XMM_L(2),
|
872 |
env->xmm_regs[i].XMM_L(1),
|
873 |
env->xmm_regs[i].XMM_L(0));
|
874 |
if ((i & 1) == 1) |
875 |
cpu_fprintf(f, "\n");
|
876 |
else
|
877 |
cpu_fprintf(f, " ");
|
878 |
} |
879 |
} |
880 |
} |
881 |
|
882 |
/***********************************************************/
|
883 |
/* x86 mmu */
|
884 |
/* XXX: add PGE support */
|
885 |
|
886 |
void cpu_x86_set_a20(CPUX86State *env, int a20_state) |
887 |
{ |
888 |
a20_state = (a20_state != 0);
|
889 |
if (a20_state != ((env->a20_mask >> 20) & 1)) { |
890 |
#if defined(DEBUG_MMU)
|
891 |
printf("A20 update: a20=%d\n", a20_state);
|
892 |
#endif
|
893 |
/* if the cpu is currently executing code, we must unlink it and
|
894 |
all the potentially executing TB */
|
895 |
cpu_interrupt(env, CPU_INTERRUPT_EXITTB); |
896 |
|
897 |
/* when a20 is changed, all the MMU mappings are invalid, so
|
898 |
we must flush everything */
|
899 |
tlb_flush(env, 1);
|
900 |
env->a20_mask = (~0x100000) | (a20_state << 20); |
901 |
} |
902 |
} |
903 |
|
904 |
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
|
905 |
{ |
906 |
int pe_state;
|
907 |
|
908 |
#if defined(DEBUG_MMU)
|
909 |
printf("CR0 update: CR0=0x%08x\n", new_cr0);
|
910 |
#endif
|
911 |
if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
|
912 |
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
|
913 |
tlb_flush(env, 1);
|
914 |
} |
915 |
|
916 |
#ifdef TARGET_X86_64
|
917 |
if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && |
918 |
(env->efer & MSR_EFER_LME)) { |
919 |
/* enter in long mode */
|
920 |
/* XXX: generate an exception */
|
921 |
if (!(env->cr[4] & CR4_PAE_MASK)) |
922 |
return;
|
923 |
env->efer |= MSR_EFER_LMA; |
924 |
env->hflags |= HF_LMA_MASK; |
925 |
} else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && |
926 |
(env->efer & MSR_EFER_LMA)) { |
927 |
/* exit long mode */
|
928 |
env->efer &= ~MSR_EFER_LMA; |
929 |
env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); |
930 |
env->eip &= 0xffffffff;
|
931 |
} |
932 |
#endif
|
933 |
env->cr[0] = new_cr0 | CR0_ET_MASK;
|
934 |
|
935 |
/* update PE flag in hidden flags */
|
936 |
pe_state = (env->cr[0] & CR0_PE_MASK);
|
937 |
env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); |
938 |
/* ensure that ADDSEG is always set in real mode */
|
939 |
env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
|
940 |
/* update FPU flags */
|
941 |
env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | |
942 |
((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
|
943 |
} |
944 |
|
945 |
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
|
946 |
the PDPT */
|
947 |
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
|
948 |
{ |
949 |
env->cr[3] = new_cr3;
|
950 |
if (env->cr[0] & CR0_PG_MASK) { |
951 |
#if defined(DEBUG_MMU)
|
952 |
printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); |
953 |
#endif
|
954 |
tlb_flush(env, 0);
|
955 |
} |
956 |
} |
957 |
|
958 |
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
|
959 |
{ |
960 |
#if defined(DEBUG_MMU)
|
961 |
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); |
962 |
#endif
|
963 |
if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
|
964 |
(env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
|
965 |
tlb_flush(env, 1);
|
966 |
} |
967 |
/* SSE handling */
|
968 |
if (!(env->cpuid_features & CPUID_SSE))
|
969 |
new_cr4 &= ~CR4_OSFXSR_MASK; |
970 |
if (new_cr4 & CR4_OSFXSR_MASK)
|
971 |
env->hflags |= HF_OSFXSR_MASK; |
972 |
else
|
973 |
env->hflags &= ~HF_OSFXSR_MASK; |
974 |
|
975 |
env->cr[4] = new_cr4;
|
976 |
} |
977 |
|
978 |
#if defined(CONFIG_USER_ONLY)
|
979 |
|
980 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
981 |
int is_write, int mmu_idx, int is_softmmu) |
982 |
{ |
983 |
/* user mode only emulation */
|
984 |
is_write &= 1;
|
985 |
env->cr[2] = addr;
|
986 |
env->error_code = (is_write << PG_ERROR_W_BIT); |
987 |
env->error_code |= PG_ERROR_U_MASK; |
988 |
env->exception_index = EXCP0E_PAGE; |
989 |
return 1; |
990 |
} |
991 |
|
992 |
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) |
993 |
{ |
994 |
return addr;
|
995 |
} |
996 |
|
997 |
#else
|
998 |
|
999 |
/* XXX: This value should match the one returned by CPUID
|
1000 |
* and in exec.c */
|
1001 |
#if defined(CONFIG_KQEMU)
|
1002 |
#define PHYS_ADDR_MASK 0xfffff000LL |
1003 |
#else
|
1004 |
# if defined(TARGET_X86_64)
|
1005 |
# define PHYS_ADDR_MASK 0xfffffff000LL |
1006 |
# else
|
1007 |
# define PHYS_ADDR_MASK 0xffffff000LL |
1008 |
# endif
|
1009 |
#endif
|
1010 |
|
1011 |
/* return value:
|
1012 |
-1 = cannot handle fault
|
1013 |
0 = nothing more to do
|
1014 |
1 = generate PF fault
|
1015 |
2 = soft MMU activation required for this block
|
1016 |
*/
|
1017 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
1018 |
int is_write1, int mmu_idx, int is_softmmu) |
1019 |
{ |
1020 |
uint64_t ptep, pte; |
1021 |
target_ulong pde_addr, pte_addr; |
1022 |
int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
|
1023 |
target_phys_addr_t paddr; |
1024 |
uint32_t page_offset; |
1025 |
target_ulong vaddr, virt_addr; |
1026 |
|
1027 |
is_user = mmu_idx == MMU_USER_IDX; |
1028 |
#if defined(DEBUG_MMU)
|
1029 |
printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", |
1030 |
addr, is_write1, is_user, env->eip); |
1031 |
#endif
|
1032 |
is_write = is_write1 & 1;
|
1033 |
|
1034 |
if (!(env->cr[0] & CR0_PG_MASK)) { |
1035 |
pte = addr; |
1036 |
virt_addr = addr & TARGET_PAGE_MASK; |
1037 |
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
1038 |
page_size = 4096;
|
1039 |
goto do_mapping;
|
1040 |
} |
1041 |
|
1042 |
if (env->cr[4] & CR4_PAE_MASK) { |
1043 |
uint64_t pde, pdpe; |
1044 |
target_ulong pdpe_addr; |
1045 |
|
1046 |
#ifdef TARGET_X86_64
|
1047 |
if (env->hflags & HF_LMA_MASK) {
|
1048 |
uint64_t pml4e_addr, pml4e; |
1049 |
int32_t sext; |
1050 |
|
1051 |
/* test virtual address sign extension */
|
1052 |
sext = (int64_t)addr >> 47;
|
1053 |
if (sext != 0 && sext != -1) { |
1054 |
env->error_code = 0;
|
1055 |
env->exception_index = EXCP0D_GPF; |
1056 |
return 1; |
1057 |
} |
1058 |
|
1059 |
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & |
1060 |
env->a20_mask; |
1061 |
pml4e = ldq_phys(pml4e_addr); |
1062 |
if (!(pml4e & PG_PRESENT_MASK)) {
|
1063 |
error_code = 0;
|
1064 |
goto do_fault;
|
1065 |
} |
1066 |
if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
|
1067 |
error_code = PG_ERROR_RSVD_MASK; |
1068 |
goto do_fault;
|
1069 |
} |
1070 |
if (!(pml4e & PG_ACCESSED_MASK)) {
|
1071 |
pml4e |= PG_ACCESSED_MASK; |
1072 |
stl_phys_notdirty(pml4e_addr, pml4e); |
1073 |
} |
1074 |
ptep = pml4e ^ PG_NX_MASK; |
1075 |
pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & |
1076 |
env->a20_mask; |
1077 |
pdpe = ldq_phys(pdpe_addr); |
1078 |
if (!(pdpe & PG_PRESENT_MASK)) {
|
1079 |
error_code = 0;
|
1080 |
goto do_fault;
|
1081 |
} |
1082 |
if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
|
1083 |
error_code = PG_ERROR_RSVD_MASK; |
1084 |
goto do_fault;
|
1085 |
} |
1086 |
ptep &= pdpe ^ PG_NX_MASK; |
1087 |
if (!(pdpe & PG_ACCESSED_MASK)) {
|
1088 |
pdpe |= PG_ACCESSED_MASK; |
1089 |
stl_phys_notdirty(pdpe_addr, pdpe); |
1090 |
} |
1091 |
} else
|
1092 |
#endif
|
1093 |
{ |
1094 |
/* XXX: load them when cr3 is loaded ? */
|
1095 |
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & |
1096 |
env->a20_mask; |
1097 |
pdpe = ldq_phys(pdpe_addr); |
1098 |
if (!(pdpe & PG_PRESENT_MASK)) {
|
1099 |
error_code = 0;
|
1100 |
goto do_fault;
|
1101 |
} |
1102 |
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; |
1103 |
} |
1104 |
|
1105 |
pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & |
1106 |
env->a20_mask; |
1107 |
pde = ldq_phys(pde_addr); |
1108 |
if (!(pde & PG_PRESENT_MASK)) {
|
1109 |
error_code = 0;
|
1110 |
goto do_fault;
|
1111 |
} |
1112 |
if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
|
1113 |
error_code = PG_ERROR_RSVD_MASK; |
1114 |
goto do_fault;
|
1115 |
} |
1116 |
ptep &= pde ^ PG_NX_MASK; |
1117 |
if (pde & PG_PSE_MASK) {
|
1118 |
/* 2 MB page */
|
1119 |
page_size = 2048 * 1024; |
1120 |
ptep ^= PG_NX_MASK; |
1121 |
if ((ptep & PG_NX_MASK) && is_write1 == 2) |
1122 |
goto do_fault_protect;
|
1123 |
if (is_user) {
|
1124 |
if (!(ptep & PG_USER_MASK))
|
1125 |
goto do_fault_protect;
|
1126 |
if (is_write && !(ptep & PG_RW_MASK))
|
1127 |
goto do_fault_protect;
|
1128 |
} else {
|
1129 |
if ((env->cr[0] & CR0_WP_MASK) && |
1130 |
is_write && !(ptep & PG_RW_MASK)) |
1131 |
goto do_fault_protect;
|
1132 |
} |
1133 |
is_dirty = is_write && !(pde & PG_DIRTY_MASK); |
1134 |
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
|
1135 |
pde |= PG_ACCESSED_MASK; |
1136 |
if (is_dirty)
|
1137 |
pde |= PG_DIRTY_MASK; |
1138 |
stl_phys_notdirty(pde_addr, pde); |
1139 |
} |
1140 |
/* align to page_size */
|
1141 |
pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); |
1142 |
virt_addr = addr & ~(page_size - 1);
|
1143 |
} else {
|
1144 |
/* 4 KB page */
|
1145 |
if (!(pde & PG_ACCESSED_MASK)) {
|
1146 |
pde |= PG_ACCESSED_MASK; |
1147 |
stl_phys_notdirty(pde_addr, pde); |
1148 |
} |
1149 |
pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & |
1150 |
env->a20_mask; |
1151 |
pte = ldq_phys(pte_addr); |
1152 |
if (!(pte & PG_PRESENT_MASK)) {
|
1153 |
error_code = 0;
|
1154 |
goto do_fault;
|
1155 |
} |
1156 |
if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
|
1157 |
error_code = PG_ERROR_RSVD_MASK; |
1158 |
goto do_fault;
|
1159 |
} |
1160 |
/* combine pde and pte nx, user and rw protections */
|
1161 |
ptep &= pte ^ PG_NX_MASK; |
1162 |
ptep ^= PG_NX_MASK; |
1163 |
if ((ptep & PG_NX_MASK) && is_write1 == 2) |
1164 |
goto do_fault_protect;
|
1165 |
if (is_user) {
|
1166 |
if (!(ptep & PG_USER_MASK))
|
1167 |
goto do_fault_protect;
|
1168 |
if (is_write && !(ptep & PG_RW_MASK))
|
1169 |
goto do_fault_protect;
|
1170 |
} else {
|
1171 |
if ((env->cr[0] & CR0_WP_MASK) && |
1172 |
is_write && !(ptep & PG_RW_MASK)) |
1173 |
goto do_fault_protect;
|
1174 |
} |
1175 |
is_dirty = is_write && !(pte & PG_DIRTY_MASK); |
1176 |
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
1177 |
pte |= PG_ACCESSED_MASK; |
1178 |
if (is_dirty)
|
1179 |
pte |= PG_DIRTY_MASK; |
1180 |
stl_phys_notdirty(pte_addr, pte); |
1181 |
} |
1182 |
page_size = 4096;
|
1183 |
virt_addr = addr & ~0xfff;
|
1184 |
pte = pte & (PHYS_ADDR_MASK | 0xfff);
|
1185 |
} |
1186 |
} else {
|
1187 |
uint32_t pde; |
1188 |
|
1189 |
/* page directory entry */
|
1190 |
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & |
1191 |
env->a20_mask; |
1192 |
pde = ldl_phys(pde_addr); |
1193 |
if (!(pde & PG_PRESENT_MASK)) {
|
1194 |
error_code = 0;
|
1195 |
goto do_fault;
|
1196 |
} |
1197 |
/* if PSE bit is set, then we use a 4MB page */
|
1198 |
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { |
1199 |
page_size = 4096 * 1024; |
1200 |
if (is_user) {
|
1201 |
if (!(pde & PG_USER_MASK))
|
1202 |
goto do_fault_protect;
|
1203 |
if (is_write && !(pde & PG_RW_MASK))
|
1204 |
goto do_fault_protect;
|
1205 |
} else {
|
1206 |
if ((env->cr[0] & CR0_WP_MASK) && |
1207 |
is_write && !(pde & PG_RW_MASK)) |
1208 |
goto do_fault_protect;
|
1209 |
} |
1210 |
is_dirty = is_write && !(pde & PG_DIRTY_MASK); |
1211 |
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
|
1212 |
pde |= PG_ACCESSED_MASK; |
1213 |
if (is_dirty)
|
1214 |
pde |= PG_DIRTY_MASK; |
1215 |
stl_phys_notdirty(pde_addr, pde); |
1216 |
} |
1217 |
|
1218 |
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ |
1219 |
ptep = pte; |
1220 |
virt_addr = addr & ~(page_size - 1);
|
1221 |
} else {
|
1222 |
if (!(pde & PG_ACCESSED_MASK)) {
|
1223 |
pde |= PG_ACCESSED_MASK; |
1224 |
stl_phys_notdirty(pde_addr, pde); |
1225 |
} |
1226 |
|
1227 |
/* page directory entry */
|
1228 |
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & |
1229 |
env->a20_mask; |
1230 |
pte = ldl_phys(pte_addr); |
1231 |
if (!(pte & PG_PRESENT_MASK)) {
|
1232 |
error_code = 0;
|
1233 |
goto do_fault;
|
1234 |
} |
1235 |
/* combine pde and pte user and rw protections */
|
1236 |
ptep = pte & pde; |
1237 |
if (is_user) {
|
1238 |
if (!(ptep & PG_USER_MASK))
|
1239 |
goto do_fault_protect;
|
1240 |
if (is_write && !(ptep & PG_RW_MASK))
|
1241 |
goto do_fault_protect;
|
1242 |
} else {
|
1243 |
if ((env->cr[0] & CR0_WP_MASK) && |
1244 |
is_write && !(ptep & PG_RW_MASK)) |
1245 |
goto do_fault_protect;
|
1246 |
} |
1247 |
is_dirty = is_write && !(pte & PG_DIRTY_MASK); |
1248 |
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
1249 |
pte |= PG_ACCESSED_MASK; |
1250 |
if (is_dirty)
|
1251 |
pte |= PG_DIRTY_MASK; |
1252 |
stl_phys_notdirty(pte_addr, pte); |
1253 |
} |
1254 |
page_size = 4096;
|
1255 |
virt_addr = addr & ~0xfff;
|
1256 |
} |
1257 |
} |
1258 |
/* the page can be put in the TLB */
|
1259 |
prot = PAGE_READ; |
1260 |
if (!(ptep & PG_NX_MASK))
|
1261 |
prot |= PAGE_EXEC; |
1262 |
if (pte & PG_DIRTY_MASK) {
|
1263 |
/* only set write access if already dirty... otherwise wait
|
1264 |
for dirty access */
|
1265 |
if (is_user) {
|
1266 |
if (ptep & PG_RW_MASK)
|
1267 |
prot |= PAGE_WRITE; |
1268 |
} else {
|
1269 |
if (!(env->cr[0] & CR0_WP_MASK) || |
1270 |
(ptep & PG_RW_MASK)) |
1271 |
prot |= PAGE_WRITE; |
1272 |
} |
1273 |
} |
1274 |
do_mapping:
|
1275 |
pte = pte & env->a20_mask; |
1276 |
|
1277 |
/* Even if 4MB pages, we map only one 4KB page in the cache to
|
1278 |
avoid filling it too fast */
|
1279 |
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
|
1280 |
paddr = (pte & TARGET_PAGE_MASK) + page_offset; |
1281 |
vaddr = virt_addr + page_offset; |
1282 |
|
1283 |
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); |
1284 |
return ret;
|
1285 |
do_fault_protect:
|
1286 |
error_code = PG_ERROR_P_MASK; |
1287 |
do_fault:
|
1288 |
error_code |= (is_write << PG_ERROR_W_BIT); |
1289 |
if (is_user)
|
1290 |
error_code |= PG_ERROR_U_MASK; |
1291 |
if (is_write1 == 2 && |
1292 |
(env->efer & MSR_EFER_NXE) && |
1293 |
(env->cr[4] & CR4_PAE_MASK))
|
1294 |
error_code |= PG_ERROR_I_D_MASK; |
1295 |
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { |
1296 |
/* cr2 is not modified in case of exceptions */
|
1297 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
1298 |
addr); |
1299 |
} else {
|
1300 |
env->cr[2] = addr;
|
1301 |
} |
1302 |
env->error_code = error_code; |
1303 |
env->exception_index = EXCP0E_PAGE; |
1304 |
return 1; |
1305 |
} |
1306 |
|
1307 |
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) |
1308 |
{ |
1309 |
target_ulong pde_addr, pte_addr; |
1310 |
uint64_t pte; |
1311 |
target_phys_addr_t paddr; |
1312 |
uint32_t page_offset; |
1313 |
int page_size;
|
1314 |
|
1315 |
if (env->cr[4] & CR4_PAE_MASK) { |
1316 |
target_ulong pdpe_addr; |
1317 |
uint64_t pde, pdpe; |
1318 |
|
1319 |
#ifdef TARGET_X86_64
|
1320 |
if (env->hflags & HF_LMA_MASK) {
|
1321 |
uint64_t pml4e_addr, pml4e; |
1322 |
int32_t sext; |
1323 |
|
1324 |
/* test virtual address sign extension */
|
1325 |
sext = (int64_t)addr >> 47;
|
1326 |
if (sext != 0 && sext != -1) |
1327 |
return -1; |
1328 |
|
1329 |
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & |
1330 |
env->a20_mask; |
1331 |
pml4e = ldq_phys(pml4e_addr); |
1332 |
if (!(pml4e & PG_PRESENT_MASK))
|
1333 |
return -1; |
1334 |
|
1335 |
pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & |
1336 |
env->a20_mask; |
1337 |
pdpe = ldq_phys(pdpe_addr); |
1338 |
if (!(pdpe & PG_PRESENT_MASK))
|
1339 |
return -1; |
1340 |
} else
|
1341 |
#endif
|
1342 |
{ |
1343 |
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & |
1344 |
env->a20_mask; |
1345 |
pdpe = ldq_phys(pdpe_addr); |
1346 |
if (!(pdpe & PG_PRESENT_MASK))
|
1347 |
return -1; |
1348 |
} |
1349 |
|
1350 |
pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & |
1351 |
env->a20_mask; |
1352 |
pde = ldq_phys(pde_addr); |
1353 |
if (!(pde & PG_PRESENT_MASK)) {
|
1354 |
return -1; |
1355 |
} |
1356 |
if (pde & PG_PSE_MASK) {
|
1357 |
/* 2 MB page */
|
1358 |
page_size = 2048 * 1024; |
1359 |
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ |
1360 |
} else {
|
1361 |
/* 4 KB page */
|
1362 |
pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & |
1363 |
env->a20_mask; |
1364 |
page_size = 4096;
|
1365 |
pte = ldq_phys(pte_addr); |
1366 |
} |
1367 |
if (!(pte & PG_PRESENT_MASK))
|
1368 |
return -1; |
1369 |
} else {
|
1370 |
uint32_t pde; |
1371 |
|
1372 |
if (!(env->cr[0] & CR0_PG_MASK)) { |
1373 |
pte = addr; |
1374 |
page_size = 4096;
|
1375 |
} else {
|
1376 |
/* page directory entry */
|
1377 |
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; |
1378 |
pde = ldl_phys(pde_addr); |
1379 |
if (!(pde & PG_PRESENT_MASK))
|
1380 |
return -1; |
1381 |
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { |
1382 |
pte = pde & ~0x003ff000; /* align to 4MB */ |
1383 |
page_size = 4096 * 1024; |
1384 |
} else {
|
1385 |
/* page directory entry */
|
1386 |
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; |
1387 |
pte = ldl_phys(pte_addr); |
1388 |
if (!(pte & PG_PRESENT_MASK))
|
1389 |
return -1; |
1390 |
page_size = 4096;
|
1391 |
} |
1392 |
} |
1393 |
pte = pte & env->a20_mask; |
1394 |
} |
1395 |
|
1396 |
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
|
1397 |
paddr = (pte & TARGET_PAGE_MASK) + page_offset; |
1398 |
return paddr;
|
1399 |
} |
1400 |
|
1401 |
void hw_breakpoint_insert(CPUState *env, int index) |
1402 |
{ |
1403 |
int type, err = 0; |
1404 |
|
1405 |
switch (hw_breakpoint_type(env->dr[7], index)) { |
1406 |
case 0: |
1407 |
if (hw_breakpoint_enabled(env->dr[7], index)) |
1408 |
err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU, |
1409 |
&env->cpu_breakpoint[index]); |
1410 |
break;
|
1411 |
case 1: |
1412 |
type = BP_CPU | BP_MEM_WRITE; |
1413 |
goto insert_wp;
|
1414 |
case 2: |
1415 |
/* No support for I/O watchpoints yet */
|
1416 |
break;
|
1417 |
case 3: |
1418 |
type = BP_CPU | BP_MEM_ACCESS; |
1419 |
insert_wp:
|
1420 |
err = cpu_watchpoint_insert(env, env->dr[index], |
1421 |
hw_breakpoint_len(env->dr[7], index),
|
1422 |
type, &env->cpu_watchpoint[index]); |
1423 |
break;
|
1424 |
} |
1425 |
if (err)
|
1426 |
env->cpu_breakpoint[index] = NULL;
|
1427 |
} |
1428 |
|
1429 |
void hw_breakpoint_remove(CPUState *env, int index) |
1430 |
{ |
1431 |
if (!env->cpu_breakpoint[index])
|
1432 |
return;
|
1433 |
switch (hw_breakpoint_type(env->dr[7], index)) { |
1434 |
case 0: |
1435 |
if (hw_breakpoint_enabled(env->dr[7], index)) |
1436 |
cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]); |
1437 |
break;
|
1438 |
case 1: |
1439 |
case 3: |
1440 |
cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]); |
1441 |
break;
|
1442 |
case 2: |
1443 |
/* No support for I/O watchpoints yet */
|
1444 |
break;
|
1445 |
} |
1446 |
} |
1447 |
|
1448 |
int check_hw_breakpoints(CPUState *env, int force_dr6_update) |
1449 |
{ |
1450 |
target_ulong dr6; |
1451 |
int reg, type;
|
1452 |
int hit_enabled = 0; |
1453 |
|
1454 |
dr6 = env->dr[6] & ~0xf; |
1455 |
for (reg = 0; reg < 4; reg++) { |
1456 |
type = hw_breakpoint_type(env->dr[7], reg);
|
1457 |
if ((type == 0 && env->dr[reg] == env->eip) || |
1458 |
((type & 1) && env->cpu_watchpoint[reg] &&
|
1459 |
(env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) { |
1460 |
dr6 |= 1 << reg;
|
1461 |
if (hw_breakpoint_enabled(env->dr[7], reg)) |
1462 |
hit_enabled = 1;
|
1463 |
} |
1464 |
} |
1465 |
if (hit_enabled || force_dr6_update)
|
1466 |
env->dr[6] = dr6;
|
1467 |
return hit_enabled;
|
1468 |
} |
1469 |
|
1470 |
static CPUDebugExcpHandler *prev_debug_excp_handler;
|
1471 |
|
1472 |
void raise_exception(int exception_index); |
1473 |
|
1474 |
static void breakpoint_handler(CPUState *env) |
1475 |
{ |
1476 |
CPUBreakpoint *bp; |
1477 |
|
1478 |
if (env->watchpoint_hit) {
|
1479 |
if (env->watchpoint_hit->flags & BP_CPU) {
|
1480 |
env->watchpoint_hit = NULL;
|
1481 |
if (check_hw_breakpoints(env, 0)) |
1482 |
raise_exception(EXCP01_DB); |
1483 |
else
|
1484 |
cpu_resume_from_signal(env, NULL);
|
1485 |
} |
1486 |
} else {
|
1487 |
TAILQ_FOREACH(bp, &env->breakpoints, entry) |
1488 |
if (bp->pc == env->eip) {
|
1489 |
if (bp->flags & BP_CPU) {
|
1490 |
check_hw_breakpoints(env, 1);
|
1491 |
raise_exception(EXCP01_DB); |
1492 |
} |
1493 |
break;
|
1494 |
} |
1495 |
} |
1496 |
if (prev_debug_excp_handler)
|
1497 |
prev_debug_excp_handler(env); |
1498 |
} |
1499 |
#endif /* !CONFIG_USER_ONLY */ |
1500 |
|
1501 |
static void host_cpuid(uint32_t function, uint32_t count, |
1502 |
uint32_t *eax, uint32_t *ebx, |
1503 |
uint32_t *ecx, uint32_t *edx) |
1504 |
{ |
1505 |
#if defined(CONFIG_KVM)
|
1506 |
uint32_t vec[4];
|
1507 |
|
1508 |
#ifdef __x86_64__
|
1509 |
asm volatile("cpuid" |
1510 |
: "=a"(vec[0]), "=b"(vec[1]), |
1511 |
"=c"(vec[2]), "=d"(vec[3]) |
1512 |
: "0"(function), "c"(count) : "cc"); |
1513 |
#else
|
1514 |
asm volatile("pusha \n\t" |
1515 |
"cpuid \n\t"
|
1516 |
"mov %%eax, 0(%2) \n\t"
|
1517 |
"mov %%ebx, 4(%2) \n\t"
|
1518 |
"mov %%ecx, 8(%2) \n\t"
|
1519 |
"mov %%edx, 12(%2) \n\t"
|
1520 |
"popa"
|
1521 |
: : "a"(function), "c"(count), "S"(vec) |
1522 |
: "memory", "cc"); |
1523 |
#endif
|
1524 |
|
1525 |
if (eax)
|
1526 |
*eax = vec[0];
|
1527 |
if (ebx)
|
1528 |
*ebx = vec[1];
|
1529 |
if (ecx)
|
1530 |
*ecx = vec[2];
|
1531 |
if (edx)
|
1532 |
*edx = vec[3];
|
1533 |
#endif
|
1534 |
} |
1535 |
|
1536 |
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
1537 |
uint32_t *eax, uint32_t *ebx, |
1538 |
uint32_t *ecx, uint32_t *edx) |
1539 |
{ |
1540 |
/* test if maximum index reached */
|
1541 |
if (index & 0x80000000) { |
1542 |
if (index > env->cpuid_xlevel)
|
1543 |
index = env->cpuid_level; |
1544 |
} else {
|
1545 |
if (index > env->cpuid_level)
|
1546 |
index = env->cpuid_level; |
1547 |
} |
1548 |
|
1549 |
switch(index) {
|
1550 |
case 0: |
1551 |
*eax = env->cpuid_level; |
1552 |
*ebx = env->cpuid_vendor1; |
1553 |
*edx = env->cpuid_vendor2; |
1554 |
*ecx = env->cpuid_vendor3; |
1555 |
|
1556 |
/* sysenter isn't supported on compatibility mode on AMD. and syscall
|
1557 |
* isn't supported in compatibility mode on Intel. so advertise the
|
1558 |
* actuall cpu, and say goodbye to migration between different vendors
|
1559 |
* is you use compatibility mode. */
|
1560 |
if (kvm_enabled() && !env->cpuid_vendor_override)
|
1561 |
host_cpuid(0, 0, NULL, ebx, ecx, edx); |
1562 |
break;
|
1563 |
case 1: |
1564 |
*eax = env->cpuid_version; |
1565 |
*ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ |
1566 |
*ecx = env->cpuid_ext_features; |
1567 |
*edx = env->cpuid_features; |
1568 |
break;
|
1569 |
case 2: |
1570 |
/* cache info: needed for Pentium Pro compatibility */
|
1571 |
*eax = 1;
|
1572 |
*ebx = 0;
|
1573 |
*ecx = 0;
|
1574 |
*edx = 0x2c307d;
|
1575 |
break;
|
1576 |
case 4: |
1577 |
/* cache info: needed for Core compatibility */
|
1578 |
switch (count) {
|
1579 |
case 0: /* L1 dcache info */ |
1580 |
*eax = 0x0000121;
|
1581 |
*ebx = 0x1c0003f;
|
1582 |
*ecx = 0x000003f;
|
1583 |
*edx = 0x0000001;
|
1584 |
break;
|
1585 |
case 1: /* L1 icache info */ |
1586 |
*eax = 0x0000122;
|
1587 |
*ebx = 0x1c0003f;
|
1588 |
*ecx = 0x000003f;
|
1589 |
*edx = 0x0000001;
|
1590 |
break;
|
1591 |
case 2: /* L2 cache info */ |
1592 |
*eax = 0x0000143;
|
1593 |
*ebx = 0x3c0003f;
|
1594 |
*ecx = 0x0000fff;
|
1595 |
*edx = 0x0000001;
|
1596 |
break;
|
1597 |
default: /* end of info */ |
1598 |
*eax = 0;
|
1599 |
*ebx = 0;
|
1600 |
*ecx = 0;
|
1601 |
*edx = 0;
|
1602 |
break;
|
1603 |
} |
1604 |
break;
|
1605 |
case 5: |
1606 |
/* mwait info: needed for Core compatibility */
|
1607 |
*eax = 0; /* Smallest monitor-line size in bytes */ |
1608 |
*ebx = 0; /* Largest monitor-line size in bytes */ |
1609 |
*ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; |
1610 |
*edx = 0;
|
1611 |
break;
|
1612 |
case 6: |
1613 |
/* Thermal and Power Leaf */
|
1614 |
*eax = 0;
|
1615 |
*ebx = 0;
|
1616 |
*ecx = 0;
|
1617 |
*edx = 0;
|
1618 |
break;
|
1619 |
case 9: |
1620 |
/* Direct Cache Access Information Leaf */
|
1621 |
*eax = 0; /* Bits 0-31 in DCA_CAP MSR */ |
1622 |
*ebx = 0;
|
1623 |
*ecx = 0;
|
1624 |
*edx = 0;
|
1625 |
break;
|
1626 |
case 0xA: |
1627 |
/* Architectural Performance Monitoring Leaf */
|
1628 |
*eax = 0;
|
1629 |
*ebx = 0;
|
1630 |
*ecx = 0;
|
1631 |
*edx = 0;
|
1632 |
break;
|
1633 |
case 0x80000000: |
1634 |
*eax = env->cpuid_xlevel; |
1635 |
*ebx = env->cpuid_vendor1; |
1636 |
*edx = env->cpuid_vendor2; |
1637 |
*ecx = env->cpuid_vendor3; |
1638 |
break;
|
1639 |
case 0x80000001: |
1640 |
*eax = env->cpuid_version; |
1641 |
*ebx = 0;
|
1642 |
*ecx = env->cpuid_ext3_features; |
1643 |
*edx = env->cpuid_ext2_features; |
1644 |
|
1645 |
if (kvm_enabled()) {
|
1646 |
uint32_t h_eax, h_edx; |
1647 |
|
1648 |
host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx); |
1649 |
|
1650 |
/* disable CPU features that the host does not support */
|
1651 |
|
1652 |
/* long mode */
|
1653 |
if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */) |
1654 |
*edx &= ~0x20000000;
|
1655 |
/* syscall */
|
1656 |
if ((h_edx & 0x00000800) == 0) |
1657 |
*edx &= ~0x00000800;
|
1658 |
/* nx */
|
1659 |
if ((h_edx & 0x00100000) == 0) |
1660 |
*edx &= ~0x00100000;
|
1661 |
|
1662 |
/* disable CPU features that KVM cannot support */
|
1663 |
|
1664 |
/* svm */
|
1665 |
*ecx &= ~4UL;
|
1666 |
/* 3dnow */
|
1667 |
*edx &= ~0xc0000000;
|
1668 |
} |
1669 |
break;
|
1670 |
case 0x80000002: |
1671 |
case 0x80000003: |
1672 |
case 0x80000004: |
1673 |
*eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; |
1674 |
*ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; |
1675 |
*ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; |
1676 |
*edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; |
1677 |
break;
|
1678 |
case 0x80000005: |
1679 |
/* cache info (L1 cache) */
|
1680 |
*eax = 0x01ff01ff;
|
1681 |
*ebx = 0x01ff01ff;
|
1682 |
*ecx = 0x40020140;
|
1683 |
*edx = 0x40020140;
|
1684 |
break;
|
1685 |
case 0x80000006: |
1686 |
/* cache info (L2 cache) */
|
1687 |
*eax = 0;
|
1688 |
*ebx = 0x42004200;
|
1689 |
*ecx = 0x02008140;
|
1690 |
*edx = 0;
|
1691 |
break;
|
1692 |
case 0x80000008: |
1693 |
/* virtual & phys address size in low 2 bytes. */
|
1694 |
/* XXX: This value must match the one used in the MMU code. */
|
1695 |
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
|
1696 |
/* 64 bit processor */
|
1697 |
#if defined(CONFIG_KQEMU)
|
1698 |
*eax = 0x00003020; /* 48 bits virtual, 32 bits physical */ |
1699 |
#else
|
1700 |
/* XXX: The physical address space is limited to 42 bits in exec.c. */
|
1701 |
*eax = 0x00003028; /* 48 bits virtual, 40 bits physical */ |
1702 |
#endif
|
1703 |
} else {
|
1704 |
#if defined(CONFIG_KQEMU)
|
1705 |
*eax = 0x00000020; /* 32 bits physical */ |
1706 |
#else
|
1707 |
if (env->cpuid_features & CPUID_PSE36)
|
1708 |
*eax = 0x00000024; /* 36 bits physical */ |
1709 |
else
|
1710 |
*eax = 0x00000020; /* 32 bits physical */ |
1711 |
#endif
|
1712 |
} |
1713 |
*ebx = 0;
|
1714 |
*ecx = 0;
|
1715 |
*edx = 0;
|
1716 |
break;
|
1717 |
case 0x8000000A: |
1718 |
*eax = 0x00000001; /* SVM Revision */ |
1719 |
*ebx = 0x00000010; /* nr of ASIDs */ |
1720 |
*ecx = 0;
|
1721 |
*edx = 0; /* optional features */ |
1722 |
break;
|
1723 |
default:
|
1724 |
/* reserved values: zero */
|
1725 |
*eax = 0;
|
1726 |
*ebx = 0;
|
1727 |
*ecx = 0;
|
1728 |
*edx = 0;
|
1729 |
break;
|
1730 |
} |
1731 |
} |
1732 |
|
1733 |
CPUX86State *cpu_x86_init(const char *cpu_model) |
1734 |
{ |
1735 |
CPUX86State *env; |
1736 |
static int inited; |
1737 |
|
1738 |
env = qemu_mallocz(sizeof(CPUX86State));
|
1739 |
cpu_exec_init(env); |
1740 |
env->cpu_model_str = cpu_model; |
1741 |
|
1742 |
/* init various static tables */
|
1743 |
if (!inited) {
|
1744 |
inited = 1;
|
1745 |
optimize_flags_init(); |
1746 |
#ifndef CONFIG_USER_ONLY
|
1747 |
prev_debug_excp_handler = |
1748 |
cpu_set_debug_excp_handler(breakpoint_handler); |
1749 |
#endif
|
1750 |
} |
1751 |
if (cpu_x86_register(env, cpu_model) < 0) { |
1752 |
cpu_x86_close(env); |
1753 |
return NULL; |
1754 |
} |
1755 |
cpu_reset(env); |
1756 |
#ifdef CONFIG_KQEMU
|
1757 |
kqemu_init(env); |
1758 |
#endif
|
1759 |
|
1760 |
qemu_init_vcpu(env); |
1761 |
|
1762 |
return env;
|
1763 |
} |
1764 |
|
1765 |
#if !defined(CONFIG_USER_ONLY)
|
1766 |
void do_cpu_init(CPUState *env)
|
1767 |
{ |
1768 |
int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
|
1769 |
cpu_reset(env); |
1770 |
env->interrupt_request = sipi; |
1771 |
apic_init_reset(env); |
1772 |
} |
1773 |
|
1774 |
void do_cpu_sipi(CPUState *env)
|
1775 |
{ |
1776 |
apic_sipi(env); |
1777 |
} |
1778 |
#else
|
1779 |
void do_cpu_init(CPUState *env)
|
1780 |
{ |
1781 |
} |
1782 |
void do_cpu_sipi(CPUState *env)
|
1783 |
{ |
1784 |
} |
1785 |
#endif
|