Revision f7b2429f
b/target-i386/Makefile.objs | ||
---|---|---|
1 | 1 |
obj-y += translate.o op_helper.o helper.o cpu.o |
2 | 2 |
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o |
3 |
obj-y += smm_helper.o |
|
3 |
obj-y += smm_helper.o misc_helper.o
|
|
4 | 4 |
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o |
5 | 5 |
obj-$(CONFIG_KVM) += kvm.o hyperv.o |
6 | 6 |
obj-$(CONFIG_LINUX_USER) += ioport-user.o |
... | ... | |
12 | 12 |
$(obj)/int_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) |
13 | 13 |
$(obj)/svm_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) |
14 | 14 |
$(obj)/smm_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) |
15 |
$(obj)/misc_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) |
b/target-i386/misc_helper.c | ||
---|---|---|
1 |
/* |
|
2 |
* x86 misc helpers |
|
3 |
* |
|
4 |
* Copyright (c) 2003 Fabrice Bellard |
|
5 |
* |
|
6 |
* This library is free software; you can redistribute it and/or |
|
7 |
* modify it under the terms of the GNU Lesser General Public |
|
8 |
* License as published by the Free Software Foundation; either |
|
9 |
* version 2 of the License, or (at your option) any later version. |
|
10 |
* |
|
11 |
* This library is distributed in the hope that it will be useful, |
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 |
* Lesser General Public License for more details. |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU Lesser General Public |
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>. |
|
18 |
*/ |
|
19 |
|
|
20 |
#include "cpu.h" |
|
21 |
#include "dyngen-exec.h" |
|
22 |
#include "ioport.h" |
|
23 |
#include "helper.h" |
|
24 |
|
|
25 |
#if !defined(CONFIG_USER_ONLY) |
|
26 |
#include "softmmu_exec.h" |
|
27 |
#endif /* !defined(CONFIG_USER_ONLY) */ |
|
28 |
|
|
29 |
/* check if Port I/O is allowed in TSS */ |
|
30 |
static inline void check_io(int addr, int size) |
|
31 |
{ |
|
32 |
int io_offset, val, mask; |
|
33 |
|
|
34 |
/* TSS must be a valid 32 bit one */ |
|
35 |
if (!(env->tr.flags & DESC_P_MASK) || |
|
36 |
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || |
|
37 |
env->tr.limit < 103) { |
|
38 |
goto fail; |
|
39 |
} |
|
40 |
io_offset = lduw_kernel(env->tr.base + 0x66); |
|
41 |
io_offset += (addr >> 3); |
|
42 |
/* Note: the check needs two bytes */ |
|
43 |
if ((io_offset + 1) > env->tr.limit) { |
|
44 |
goto fail; |
|
45 |
} |
|
46 |
val = lduw_kernel(env->tr.base + io_offset); |
|
47 |
val >>= (addr & 7); |
|
48 |
mask = (1 << size) - 1; |
|
49 |
/* all bits must be zero to allow the I/O */ |
|
50 |
if ((val & mask) != 0) { |
|
51 |
fail: |
|
52 |
raise_exception_err(env, EXCP0D_GPF, 0); |
|
53 |
} |
|
54 |
} |
|
55 |
|
|
56 |
void helper_check_iob(uint32_t t0) |
|
57 |
{ |
|
58 |
check_io(t0, 1); |
|
59 |
} |
|
60 |
|
|
61 |
void helper_check_iow(uint32_t t0) |
|
62 |
{ |
|
63 |
check_io(t0, 2); |
|
64 |
} |
|
65 |
|
|
66 |
void helper_check_iol(uint32_t t0) |
|
67 |
{ |
|
68 |
check_io(t0, 4); |
|
69 |
} |
|
70 |
|
|
71 |
void helper_outb(uint32_t port, uint32_t data) |
|
72 |
{ |
|
73 |
cpu_outb(port, data & 0xff); |
|
74 |
} |
|
75 |
|
|
76 |
target_ulong helper_inb(uint32_t port) |
|
77 |
{ |
|
78 |
return cpu_inb(port); |
|
79 |
} |
|
80 |
|
|
81 |
void helper_outw(uint32_t port, uint32_t data) |
|
82 |
{ |
|
83 |
cpu_outw(port, data & 0xffff); |
|
84 |
} |
|
85 |
|
|
86 |
target_ulong helper_inw(uint32_t port) |
|
87 |
{ |
|
88 |
return cpu_inw(port); |
|
89 |
} |
|
90 |
|
|
91 |
void helper_outl(uint32_t port, uint32_t data) |
|
92 |
{ |
|
93 |
cpu_outl(port, data); |
|
94 |
} |
|
95 |
|
|
96 |
target_ulong helper_inl(uint32_t port) |
|
97 |
{ |
|
98 |
return cpu_inl(port); |
|
99 |
} |
|
100 |
|
|
101 |
void helper_into(int next_eip_addend) |
|
102 |
{ |
|
103 |
int eflags; |
|
104 |
|
|
105 |
eflags = helper_cc_compute_all(CC_OP); |
|
106 |
if (eflags & CC_O) { |
|
107 |
raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); |
|
108 |
} |
|
109 |
} |
|
110 |
|
|
111 |
void helper_single_step(void) |
|
112 |
{ |
|
113 |
#ifndef CONFIG_USER_ONLY |
|
114 |
check_hw_breakpoints(env, 1); |
|
115 |
env->dr[6] |= DR6_BS; |
|
116 |
#endif |
|
117 |
raise_exception(env, EXCP01_DB); |
|
118 |
} |
|
119 |
|
|
120 |
void helper_cpuid(void) |
|
121 |
{ |
|
122 |
uint32_t eax, ebx, ecx, edx; |
|
123 |
|
|
124 |
cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0); |
|
125 |
|
|
126 |
cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx); |
|
127 |
EAX = eax; |
|
128 |
EBX = ebx; |
|
129 |
ECX = ecx; |
|
130 |
EDX = edx; |
|
131 |
} |
|
132 |
|
|
133 |
#if defined(CONFIG_USER_ONLY) |
|
134 |
target_ulong helper_read_crN(int reg) |
|
135 |
{ |
|
136 |
return 0; |
|
137 |
} |
|
138 |
|
|
139 |
void helper_write_crN(int reg, target_ulong t0) |
|
140 |
{ |
|
141 |
} |
|
142 |
|
|
143 |
void helper_movl_drN_T0(int reg, target_ulong t0) |
|
144 |
{ |
|
145 |
} |
|
146 |
#else |
|
147 |
target_ulong helper_read_crN(int reg) |
|
148 |
{ |
|
149 |
target_ulong val; |
|
150 |
|
|
151 |
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0); |
|
152 |
switch (reg) { |
|
153 |
default: |
|
154 |
val = env->cr[reg]; |
|
155 |
break; |
|
156 |
case 8: |
|
157 |
if (!(env->hflags2 & HF2_VINTR_MASK)) { |
|
158 |
val = cpu_get_apic_tpr(env->apic_state); |
|
159 |
} else { |
|
160 |
val = env->v_tpr; |
|
161 |
} |
|
162 |
break; |
|
163 |
} |
|
164 |
return val; |
|
165 |
} |
|
166 |
|
|
167 |
void helper_write_crN(int reg, target_ulong t0) |
|
168 |
{ |
|
169 |
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0); |
|
170 |
switch (reg) { |
|
171 |
case 0: |
|
172 |
cpu_x86_update_cr0(env, t0); |
|
173 |
break; |
|
174 |
case 3: |
|
175 |
cpu_x86_update_cr3(env, t0); |
|
176 |
break; |
|
177 |
case 4: |
|
178 |
cpu_x86_update_cr4(env, t0); |
|
179 |
break; |
|
180 |
case 8: |
|
181 |
if (!(env->hflags2 & HF2_VINTR_MASK)) { |
|
182 |
cpu_set_apic_tpr(env->apic_state, t0); |
|
183 |
} |
|
184 |
env->v_tpr = t0 & 0x0f; |
|
185 |
break; |
|
186 |
default: |
|
187 |
env->cr[reg] = t0; |
|
188 |
break; |
|
189 |
} |
|
190 |
} |
|
191 |
|
|
192 |
void helper_movl_drN_T0(int reg, target_ulong t0) |
|
193 |
{ |
|
194 |
int i; |
|
195 |
|
|
196 |
if (reg < 4) { |
|
197 |
hw_breakpoint_remove(env, reg); |
|
198 |
env->dr[reg] = t0; |
|
199 |
hw_breakpoint_insert(env, reg); |
|
200 |
} else if (reg == 7) { |
|
201 |
for (i = 0; i < 4; i++) { |
|
202 |
hw_breakpoint_remove(env, i); |
|
203 |
} |
|
204 |
env->dr[7] = t0; |
|
205 |
for (i = 0; i < 4; i++) { |
|
206 |
hw_breakpoint_insert(env, i); |
|
207 |
} |
|
208 |
} else { |
|
209 |
env->dr[reg] = t0; |
|
210 |
} |
|
211 |
} |
|
212 |
#endif |
|
213 |
|
|
214 |
void helper_lmsw(target_ulong t0) |
|
215 |
{ |
|
216 |
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero |
|
217 |
if already set to one. */ |
|
218 |
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); |
|
219 |
helper_write_crN(0, t0); |
|
220 |
} |
|
221 |
|
|
222 |
void helper_invlpg(target_ulong addr) |
|
223 |
{ |
|
224 |
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); |
|
225 |
tlb_flush_page(env, addr); |
|
226 |
} |
|
227 |
|
|
228 |
void helper_rdtsc(void) |
|
229 |
{ |
|
230 |
uint64_t val; |
|
231 |
|
|
232 |
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { |
|
233 |
raise_exception(env, EXCP0D_GPF); |
|
234 |
} |
|
235 |
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0); |
|
236 |
|
|
237 |
val = cpu_get_tsc(env) + env->tsc_offset; |
|
238 |
EAX = (uint32_t)(val); |
|
239 |
EDX = (uint32_t)(val >> 32); |
|
240 |
} |
|
241 |
|
|
242 |
void helper_rdtscp(void) |
|
243 |
{ |
|
244 |
helper_rdtsc(); |
|
245 |
ECX = (uint32_t)(env->tsc_aux); |
|
246 |
} |
|
247 |
|
|
248 |
void helper_rdpmc(void) |
|
249 |
{ |
|
250 |
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { |
|
251 |
raise_exception(env, EXCP0D_GPF); |
|
252 |
} |
|
253 |
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0); |
|
254 |
|
|
255 |
/* currently unimplemented */ |
|
256 |
qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); |
|
257 |
raise_exception_err(env, EXCP06_ILLOP, 0); |
|
258 |
} |
|
259 |
|
|
260 |
#if defined(CONFIG_USER_ONLY) |
|
261 |
void helper_wrmsr(void) |
|
262 |
{ |
|
263 |
} |
|
264 |
|
|
265 |
void helper_rdmsr(void) |
|
266 |
{ |
|
267 |
} |
|
268 |
#else |
|
269 |
void helper_wrmsr(void) |
|
270 |
{ |
|
271 |
uint64_t val; |
|
272 |
|
|
273 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1); |
|
274 |
|
|
275 |
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); |
|
276 |
|
|
277 |
switch ((uint32_t)ECX) { |
|
278 |
case MSR_IA32_SYSENTER_CS: |
|
279 |
env->sysenter_cs = val & 0xffff; |
|
280 |
break; |
|
281 |
case MSR_IA32_SYSENTER_ESP: |
|
282 |
env->sysenter_esp = val; |
|
283 |
break; |
|
284 |
case MSR_IA32_SYSENTER_EIP: |
|
285 |
env->sysenter_eip = val; |
|
286 |
break; |
|
287 |
case MSR_IA32_APICBASE: |
|
288 |
cpu_set_apic_base(env->apic_state, val); |
|
289 |
break; |
|
290 |
case MSR_EFER: |
|
291 |
{ |
|
292 |
uint64_t update_mask; |
|
293 |
|
|
294 |
update_mask = 0; |
|
295 |
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) { |
|
296 |
update_mask |= MSR_EFER_SCE; |
|
297 |
} |
|
298 |
if (env->cpuid_ext2_features & CPUID_EXT2_LM) { |
|
299 |
update_mask |= MSR_EFER_LME; |
|
300 |
} |
|
301 |
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { |
|
302 |
update_mask |= MSR_EFER_FFXSR; |
|
303 |
} |
|
304 |
if (env->cpuid_ext2_features & CPUID_EXT2_NX) { |
|
305 |
update_mask |= MSR_EFER_NXE; |
|
306 |
} |
|
307 |
if (env->cpuid_ext3_features & CPUID_EXT3_SVM) { |
|
308 |
update_mask |= MSR_EFER_SVME; |
|
309 |
} |
|
310 |
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { |
|
311 |
update_mask |= MSR_EFER_FFXSR; |
|
312 |
} |
|
313 |
cpu_load_efer(env, (env->efer & ~update_mask) | |
|
314 |
(val & update_mask)); |
|
315 |
} |
|
316 |
break; |
|
317 |
case MSR_STAR: |
|
318 |
env->star = val; |
|
319 |
break; |
|
320 |
case MSR_PAT: |
|
321 |
env->pat = val; |
|
322 |
break; |
|
323 |
case MSR_VM_HSAVE_PA: |
|
324 |
env->vm_hsave = val; |
|
325 |
break; |
|
326 |
#ifdef TARGET_X86_64 |
|
327 |
case MSR_LSTAR: |
|
328 |
env->lstar = val; |
|
329 |
break; |
|
330 |
case MSR_CSTAR: |
|
331 |
env->cstar = val; |
|
332 |
break; |
|
333 |
case MSR_FMASK: |
|
334 |
env->fmask = val; |
|
335 |
break; |
|
336 |
case MSR_FSBASE: |
|
337 |
env->segs[R_FS].base = val; |
|
338 |
break; |
|
339 |
case MSR_GSBASE: |
|
340 |
env->segs[R_GS].base = val; |
|
341 |
break; |
|
342 |
case MSR_KERNELGSBASE: |
|
343 |
env->kernelgsbase = val; |
|
344 |
break; |
|
345 |
#endif |
|
346 |
case MSR_MTRRphysBase(0): |
|
347 |
case MSR_MTRRphysBase(1): |
|
348 |
case MSR_MTRRphysBase(2): |
|
349 |
case MSR_MTRRphysBase(3): |
|
350 |
case MSR_MTRRphysBase(4): |
|
351 |
case MSR_MTRRphysBase(5): |
|
352 |
case MSR_MTRRphysBase(6): |
|
353 |
case MSR_MTRRphysBase(7): |
|
354 |
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val; |
|
355 |
break; |
|
356 |
case MSR_MTRRphysMask(0): |
|
357 |
case MSR_MTRRphysMask(1): |
|
358 |
case MSR_MTRRphysMask(2): |
|
359 |
case MSR_MTRRphysMask(3): |
|
360 |
case MSR_MTRRphysMask(4): |
|
361 |
case MSR_MTRRphysMask(5): |
|
362 |
case MSR_MTRRphysMask(6): |
|
363 |
case MSR_MTRRphysMask(7): |
|
364 |
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val; |
|
365 |
break; |
|
366 |
case MSR_MTRRfix64K_00000: |
|
367 |
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val; |
|
368 |
break; |
|
369 |
case MSR_MTRRfix16K_80000: |
|
370 |
case MSR_MTRRfix16K_A0000: |
|
371 |
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val; |
|
372 |
break; |
|
373 |
case MSR_MTRRfix4K_C0000: |
|
374 |
case MSR_MTRRfix4K_C8000: |
|
375 |
case MSR_MTRRfix4K_D0000: |
|
376 |
case MSR_MTRRfix4K_D8000: |
|
377 |
case MSR_MTRRfix4K_E0000: |
|
378 |
case MSR_MTRRfix4K_E8000: |
|
379 |
case MSR_MTRRfix4K_F0000: |
|
380 |
case MSR_MTRRfix4K_F8000: |
|
381 |
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val; |
|
382 |
break; |
|
383 |
case MSR_MTRRdefType: |
|
384 |
env->mtrr_deftype = val; |
|
385 |
break; |
|
386 |
case MSR_MCG_STATUS: |
|
387 |
env->mcg_status = val; |
|
388 |
break; |
|
389 |
case MSR_MCG_CTL: |
|
390 |
if ((env->mcg_cap & MCG_CTL_P) |
|
391 |
&& (val == 0 || val == ~(uint64_t)0)) { |
|
392 |
env->mcg_ctl = val; |
|
393 |
} |
|
394 |
break; |
|
395 |
case MSR_TSC_AUX: |
|
396 |
env->tsc_aux = val; |
|
397 |
break; |
|
398 |
case MSR_IA32_MISC_ENABLE: |
|
399 |
env->msr_ia32_misc_enable = val; |
|
400 |
break; |
|
401 |
default: |
|
402 |
if ((uint32_t)ECX >= MSR_MC0_CTL |
|
403 |
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { |
|
404 |
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; |
|
405 |
if ((offset & 0x3) != 0 |
|
406 |
|| (val == 0 || val == ~(uint64_t)0)) { |
|
407 |
env->mce_banks[offset] = val; |
|
408 |
} |
|
409 |
break; |
|
410 |
} |
|
411 |
/* XXX: exception? */ |
|
412 |
break; |
|
413 |
} |
|
414 |
} |
|
415 |
|
|
416 |
void helper_rdmsr(void) |
|
417 |
{ |
|
418 |
uint64_t val; |
|
419 |
|
|
420 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0); |
|
421 |
|
|
422 |
switch ((uint32_t)ECX) { |
|
423 |
case MSR_IA32_SYSENTER_CS: |
|
424 |
val = env->sysenter_cs; |
|
425 |
break; |
|
426 |
case MSR_IA32_SYSENTER_ESP: |
|
427 |
val = env->sysenter_esp; |
|
428 |
break; |
|
429 |
case MSR_IA32_SYSENTER_EIP: |
|
430 |
val = env->sysenter_eip; |
|
431 |
break; |
|
432 |
case MSR_IA32_APICBASE: |
|
433 |
val = cpu_get_apic_base(env->apic_state); |
|
434 |
break; |
|
435 |
case MSR_EFER: |
|
436 |
val = env->efer; |
|
437 |
break; |
|
438 |
case MSR_STAR: |
|
439 |
val = env->star; |
|
440 |
break; |
|
441 |
case MSR_PAT: |
|
442 |
val = env->pat; |
|
443 |
break; |
|
444 |
case MSR_VM_HSAVE_PA: |
|
445 |
val = env->vm_hsave; |
|
446 |
break; |
|
447 |
case MSR_IA32_PERF_STATUS: |
|
448 |
/* tsc_increment_by_tick */ |
|
449 |
val = 1000ULL; |
|
450 |
/* CPU multiplier */ |
|
451 |
val |= (((uint64_t)4ULL) << 40); |
|
452 |
break; |
|
453 |
#ifdef TARGET_X86_64 |
|
454 |
case MSR_LSTAR: |
|
455 |
val = env->lstar; |
|
456 |
break; |
|
457 |
case MSR_CSTAR: |
|
458 |
val = env->cstar; |
|
459 |
break; |
|
460 |
case MSR_FMASK: |
|
461 |
val = env->fmask; |
|
462 |
break; |
|
463 |
case MSR_FSBASE: |
|
464 |
val = env->segs[R_FS].base; |
|
465 |
break; |
|
466 |
case MSR_GSBASE: |
|
467 |
val = env->segs[R_GS].base; |
|
468 |
break; |
|
469 |
case MSR_KERNELGSBASE: |
|
470 |
val = env->kernelgsbase; |
|
471 |
break; |
|
472 |
case MSR_TSC_AUX: |
|
473 |
val = env->tsc_aux; |
|
474 |
break; |
|
475 |
#endif |
|
476 |
case MSR_MTRRphysBase(0): |
|
477 |
case MSR_MTRRphysBase(1): |
|
478 |
case MSR_MTRRphysBase(2): |
|
479 |
case MSR_MTRRphysBase(3): |
|
480 |
case MSR_MTRRphysBase(4): |
|
481 |
case MSR_MTRRphysBase(5): |
|
482 |
case MSR_MTRRphysBase(6): |
|
483 |
case MSR_MTRRphysBase(7): |
|
484 |
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base; |
|
485 |
break; |
|
486 |
case MSR_MTRRphysMask(0): |
|
487 |
case MSR_MTRRphysMask(1): |
|
488 |
case MSR_MTRRphysMask(2): |
|
489 |
case MSR_MTRRphysMask(3): |
|
490 |
case MSR_MTRRphysMask(4): |
|
491 |
case MSR_MTRRphysMask(5): |
|
492 |
case MSR_MTRRphysMask(6): |
|
493 |
case MSR_MTRRphysMask(7): |
|
494 |
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask; |
|
495 |
break; |
|
496 |
case MSR_MTRRfix64K_00000: |
|
497 |
val = env->mtrr_fixed[0]; |
|
498 |
break; |
|
499 |
case MSR_MTRRfix16K_80000: |
|
500 |
case MSR_MTRRfix16K_A0000: |
|
501 |
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1]; |
|
502 |
break; |
|
503 |
case MSR_MTRRfix4K_C0000: |
|
504 |
case MSR_MTRRfix4K_C8000: |
|
505 |
case MSR_MTRRfix4K_D0000: |
|
506 |
case MSR_MTRRfix4K_D8000: |
|
507 |
case MSR_MTRRfix4K_E0000: |
|
508 |
case MSR_MTRRfix4K_E8000: |
|
509 |
case MSR_MTRRfix4K_F0000: |
|
510 |
case MSR_MTRRfix4K_F8000: |
|
511 |
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3]; |
|
512 |
break; |
|
513 |
case MSR_MTRRdefType: |
|
514 |
val = env->mtrr_deftype; |
|
515 |
break; |
|
516 |
case MSR_MTRRcap: |
|
517 |
if (env->cpuid_features & CPUID_MTRR) { |
|
518 |
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | |
|
519 |
MSR_MTRRcap_WC_SUPPORTED; |
|
520 |
} else { |
|
521 |
/* XXX: exception? */ |
|
522 |
val = 0; |
|
523 |
} |
|
524 |
break; |
|
525 |
case MSR_MCG_CAP: |
|
526 |
val = env->mcg_cap; |
|
527 |
break; |
|
528 |
case MSR_MCG_CTL: |
|
529 |
if (env->mcg_cap & MCG_CTL_P) { |
|
530 |
val = env->mcg_ctl; |
|
531 |
} else { |
|
532 |
val = 0; |
|
533 |
} |
|
534 |
break; |
|
535 |
case MSR_MCG_STATUS: |
|
536 |
val = env->mcg_status; |
|
537 |
break; |
|
538 |
case MSR_IA32_MISC_ENABLE: |
|
539 |
val = env->msr_ia32_misc_enable; |
|
540 |
break; |
|
541 |
default: |
|
542 |
if ((uint32_t)ECX >= MSR_MC0_CTL |
|
543 |
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { |
|
544 |
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; |
|
545 |
val = env->mce_banks[offset]; |
|
546 |
break; |
|
547 |
} |
|
548 |
/* XXX: exception? */ |
|
549 |
val = 0; |
|
550 |
break; |
|
551 |
} |
|
552 |
EAX = (uint32_t)(val); |
|
553 |
EDX = (uint32_t)(val >> 32); |
|
554 |
} |
|
555 |
#endif |
|
556 |
|
|
557 |
static void do_hlt(void) |
|
558 |
{ |
|
559 |
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ |
|
560 |
env->halted = 1; |
|
561 |
env->exception_index = EXCP_HLT; |
|
562 |
cpu_loop_exit(env); |
|
563 |
} |
|
564 |
|
|
565 |
void helper_hlt(int next_eip_addend) |
|
566 |
{ |
|
567 |
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); |
|
568 |
EIP += next_eip_addend; |
|
569 |
|
|
570 |
do_hlt(); |
|
571 |
} |
|
572 |
|
|
573 |
void helper_monitor(target_ulong ptr) |
|
574 |
{ |
|
575 |
if ((uint32_t)ECX != 0) { |
|
576 |
raise_exception(env, EXCP0D_GPF); |
|
577 |
} |
|
578 |
/* XXX: store address? */ |
|
579 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0); |
|
580 |
} |
|
581 |
|
|
582 |
void helper_mwait(int next_eip_addend) |
|
583 |
{ |
|
584 |
if ((uint32_t)ECX != 0) { |
|
585 |
raise_exception(env, EXCP0D_GPF); |
|
586 |
} |
|
587 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); |
|
588 |
EIP += next_eip_addend; |
|
589 |
|
|
590 |
/* XXX: not complete but not completely erroneous */ |
|
591 |
if (env->cpu_index != 0 || env->next_cpu != NULL) { |
|
592 |
/* more than one CPU: do not sleep because another CPU may |
|
593 |
wake this one */ |
|
594 |
} else { |
|
595 |
do_hlt(); |
|
596 |
} |
|
597 |
} |
|
598 |
|
|
599 |
void helper_debug(void) |
|
600 |
{ |
|
601 |
env->exception_index = EXCP_DEBUG; |
|
602 |
cpu_loop_exit(env); |
|
603 |
} |
b/target-i386/op_helper.c | ||
---|---|---|
19 | 19 |
|
20 | 20 |
#include "cpu.h" |
21 | 21 |
#include "dyngen-exec.h" |
22 |
#include "ioport.h" |
|
23 | 22 |
#include "qemu-log.h" |
24 |
#include "cpu-defs.h" |
|
25 | 23 |
#include "helper.h" |
26 | 24 |
|
27 | 25 |
#if !defined(CONFIG_USER_ONLY) |
... | ... | |
489 | 487 |
#endif |
490 | 488 |
} |
491 | 489 |
|
492 |
/* check if Port I/O is allowed in TSS */ |
|
493 |
static inline void check_io(int addr, int size) |
|
494 |
{ |
|
495 |
int io_offset, val, mask; |
|
496 |
|
|
497 |
/* TSS must be a valid 32 bit one */ |
|
498 |
if (!(env->tr.flags & DESC_P_MASK) || |
|
499 |
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || |
|
500 |
env->tr.limit < 103) { |
|
501 |
goto fail; |
|
502 |
} |
|
503 |
io_offset = lduw_kernel(env->tr.base + 0x66); |
|
504 |
io_offset += (addr >> 3); |
|
505 |
/* Note: the check needs two bytes */ |
|
506 |
if ((io_offset + 1) > env->tr.limit) { |
|
507 |
goto fail; |
|
508 |
} |
|
509 |
val = lduw_kernel(env->tr.base + io_offset); |
|
510 |
val >>= (addr & 7); |
|
511 |
mask = (1 << size) - 1; |
|
512 |
/* all bits must be zero to allow the I/O */ |
|
513 |
if ((val & mask) != 0) { |
|
514 |
fail: |
|
515 |
raise_exception_err(env, EXCP0D_GPF, 0); |
|
516 |
} |
|
517 |
} |
|
518 |
|
|
519 |
void helper_check_iob(uint32_t t0) |
|
520 |
{ |
|
521 |
check_io(t0, 1); |
|
522 |
} |
|
523 |
|
|
524 |
void helper_check_iow(uint32_t t0) |
|
525 |
{ |
|
526 |
check_io(t0, 2); |
|
527 |
} |
|
528 |
|
|
529 |
void helper_check_iol(uint32_t t0) |
|
530 |
{ |
|
531 |
check_io(t0, 4); |
|
532 |
} |
|
533 |
|
|
534 |
void helper_outb(uint32_t port, uint32_t data) |
|
535 |
{ |
|
536 |
cpu_outb(port, data & 0xff); |
|
537 |
} |
|
538 |
|
|
539 |
target_ulong helper_inb(uint32_t port) |
|
540 |
{ |
|
541 |
return cpu_inb(port); |
|
542 |
} |
|
543 |
|
|
544 |
void helper_outw(uint32_t port, uint32_t data) |
|
545 |
{ |
|
546 |
cpu_outw(port, data & 0xffff); |
|
547 |
} |
|
548 |
|
|
549 |
target_ulong helper_inw(uint32_t port) |
|
550 |
{ |
|
551 |
return cpu_inw(port); |
|
552 |
} |
|
553 |
|
|
554 |
void helper_outl(uint32_t port, uint32_t data) |
|
555 |
{ |
|
556 |
cpu_outl(port, data); |
|
557 |
} |
|
558 |
|
|
559 |
target_ulong helper_inl(uint32_t port) |
|
560 |
{ |
|
561 |
return cpu_inl(port); |
|
562 |
} |
|
563 |
|
|
564 | 490 |
static inline unsigned int get_sp_mask(unsigned int e2) |
565 | 491 |
{ |
566 | 492 |
if (e2 & DESC_B_MASK) { |
... | ... | |
1353 | 1279 |
env = saved_env; |
1354 | 1280 |
} |
1355 | 1281 |
|
1356 |
void helper_into(int next_eip_addend) |
|
1357 |
{ |
|
1358 |
int eflags; |
|
1359 |
|
|
1360 |
eflags = helper_cc_compute_all(CC_OP); |
|
1361 |
if (eflags & CC_O) { |
|
1362 |
raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); |
|
1363 |
} |
|
1364 |
} |
|
1365 |
|
|
1366 | 1282 |
void helper_cmpxchg8b(target_ulong a0) |
1367 | 1283 |
{ |
1368 | 1284 |
uint64_t d; |
... | ... | |
1411 | 1327 |
} |
1412 | 1328 |
#endif |
1413 | 1329 |
|
1414 |
void helper_single_step(void) |
|
1415 |
{ |
|
1416 |
#ifndef CONFIG_USER_ONLY |
|
1417 |
check_hw_breakpoints(env, 1); |
|
1418 |
env->dr[6] |= DR6_BS; |
|
1419 |
#endif |
|
1420 |
raise_exception(env, EXCP01_DB); |
|
1421 |
} |
|
1422 |
|
|
1423 |
void helper_cpuid(void) |
|
1424 |
{ |
|
1425 |
uint32_t eax, ebx, ecx, edx; |
|
1426 |
|
|
1427 |
cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0); |
|
1428 |
|
|
1429 |
cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx); |
|
1430 |
EAX = eax; |
|
1431 |
EBX = ebx; |
|
1432 |
ECX = ecx; |
|
1433 |
EDX = edx; |
|
1434 |
} |
|
1435 |
|
|
1436 | 1330 |
void helper_enter_level(int level, int data32, target_ulong t1) |
1437 | 1331 |
{ |
1438 | 1332 |
target_ulong ssp; |
... | ... | |
2454 | 2348 |
EIP = EDX; |
2455 | 2349 |
} |
2456 | 2350 |
|
2457 |
#if defined(CONFIG_USER_ONLY) |
|
2458 |
target_ulong helper_read_crN(int reg) |
|
2459 |
{ |
|
2460 |
return 0; |
|
2461 |
} |
|
2462 |
|
|
2463 |
void helper_write_crN(int reg, target_ulong t0) |
|
2464 |
{ |
|
2465 |
} |
|
2466 |
|
|
2467 |
void helper_movl_drN_T0(int reg, target_ulong t0) |
|
2468 |
{ |
|
2469 |
} |
|
2470 |
#else |
|
2471 |
target_ulong helper_read_crN(int reg) |
|
2472 |
{ |
|
2473 |
target_ulong val; |
|
2474 |
|
|
2475 |
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0); |
|
2476 |
switch (reg) { |
|
2477 |
default: |
|
2478 |
val = env->cr[reg]; |
|
2479 |
break; |
|
2480 |
case 8: |
|
2481 |
if (!(env->hflags2 & HF2_VINTR_MASK)) { |
|
2482 |
val = cpu_get_apic_tpr(env->apic_state); |
|
2483 |
} else { |
|
2484 |
val = env->v_tpr; |
|
2485 |
} |
|
2486 |
break; |
|
2487 |
} |
|
2488 |
return val; |
|
2489 |
} |
|
2490 |
|
|
2491 |
void helper_write_crN(int reg, target_ulong t0) |
|
2492 |
{ |
|
2493 |
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0); |
|
2494 |
switch (reg) { |
|
2495 |
case 0: |
|
2496 |
cpu_x86_update_cr0(env, t0); |
|
2497 |
break; |
|
2498 |
case 3: |
|
2499 |
cpu_x86_update_cr3(env, t0); |
|
2500 |
break; |
|
2501 |
case 4: |
|
2502 |
cpu_x86_update_cr4(env, t0); |
|
2503 |
break; |
|
2504 |
case 8: |
|
2505 |
if (!(env->hflags2 & HF2_VINTR_MASK)) { |
|
2506 |
cpu_set_apic_tpr(env->apic_state, t0); |
|
2507 |
} |
|
2508 |
env->v_tpr = t0 & 0x0f; |
|
2509 |
break; |
|
2510 |
default: |
|
2511 |
env->cr[reg] = t0; |
|
2512 |
break; |
|
2513 |
} |
|
2514 |
} |
|
2515 |
|
|
2516 |
void helper_movl_drN_T0(int reg, target_ulong t0) |
|
2517 |
{ |
|
2518 |
int i; |
|
2519 |
|
|
2520 |
if (reg < 4) { |
|
2521 |
hw_breakpoint_remove(env, reg); |
|
2522 |
env->dr[reg] = t0; |
|
2523 |
hw_breakpoint_insert(env, reg); |
|
2524 |
} else if (reg == 7) { |
|
2525 |
for (i = 0; i < 4; i++) { |
|
2526 |
hw_breakpoint_remove(env, i); |
|
2527 |
} |
|
2528 |
env->dr[7] = t0; |
|
2529 |
for (i = 0; i < 4; i++) { |
|
2530 |
hw_breakpoint_insert(env, i); |
|
2531 |
} |
|
2532 |
} else { |
|
2533 |
env->dr[reg] = t0; |
|
2534 |
} |
|
2535 |
} |
|
2536 |
#endif |
|
2537 |
|
|
2538 |
void helper_lmsw(target_ulong t0) |
|
2539 |
{ |
|
2540 |
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero |
|
2541 |
if already set to one. */ |
|
2542 |
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); |
|
2543 |
helper_write_crN(0, t0); |
|
2544 |
} |
|
2545 |
|
|
2546 |
void helper_invlpg(target_ulong addr) |
|
2547 |
{ |
|
2548 |
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); |
|
2549 |
tlb_flush_page(env, addr); |
|
2550 |
} |
|
2551 |
|
|
2552 |
void helper_rdtsc(void) |
|
2553 |
{ |
|
2554 |
uint64_t val; |
|
2555 |
|
|
2556 |
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { |
|
2557 |
raise_exception(env, EXCP0D_GPF); |
|
2558 |
} |
|
2559 |
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0); |
|
2560 |
|
|
2561 |
val = cpu_get_tsc(env) + env->tsc_offset; |
|
2562 |
EAX = (uint32_t)(val); |
|
2563 |
EDX = (uint32_t)(val >> 32); |
|
2564 |
} |
|
2565 |
|
|
2566 |
void helper_rdtscp(void) |
|
2567 |
{ |
|
2568 |
helper_rdtsc(); |
|
2569 |
ECX = (uint32_t)(env->tsc_aux); |
|
2570 |
} |
|
2571 |
|
|
2572 |
void helper_rdpmc(void) |
|
2573 |
{ |
|
2574 |
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { |
|
2575 |
raise_exception(env, EXCP0D_GPF); |
|
2576 |
} |
|
2577 |
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0); |
|
2578 |
|
|
2579 |
/* currently unimplemented */ |
|
2580 |
qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); |
|
2581 |
raise_exception_err(env, EXCP06_ILLOP, 0); |
|
2582 |
} |
|
2583 |
|
|
2584 |
#if defined(CONFIG_USER_ONLY) |
|
2585 |
void helper_wrmsr(void) |
|
2586 |
{ |
|
2587 |
} |
|
2588 |
|
|
2589 |
void helper_rdmsr(void) |
|
2590 |
{ |
|
2591 |
} |
|
2592 |
#else |
|
2593 |
void helper_wrmsr(void) |
|
2594 |
{ |
|
2595 |
uint64_t val; |
|
2596 |
|
|
2597 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1); |
|
2598 |
|
|
2599 |
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); |
|
2600 |
|
|
2601 |
switch ((uint32_t)ECX) { |
|
2602 |
case MSR_IA32_SYSENTER_CS: |
|
2603 |
env->sysenter_cs = val & 0xffff; |
|
2604 |
break; |
|
2605 |
case MSR_IA32_SYSENTER_ESP: |
|
2606 |
env->sysenter_esp = val; |
|
2607 |
break; |
|
2608 |
case MSR_IA32_SYSENTER_EIP: |
|
2609 |
env->sysenter_eip = val; |
|
2610 |
break; |
|
2611 |
case MSR_IA32_APICBASE: |
|
2612 |
cpu_set_apic_base(env->apic_state, val); |
|
2613 |
break; |
|
2614 |
case MSR_EFER: |
|
2615 |
{ |
|
2616 |
uint64_t update_mask; |
|
2617 |
|
|
2618 |
update_mask = 0; |
|
2619 |
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) { |
|
2620 |
update_mask |= MSR_EFER_SCE; |
|
2621 |
} |
|
2622 |
if (env->cpuid_ext2_features & CPUID_EXT2_LM) { |
|
2623 |
update_mask |= MSR_EFER_LME; |
|
2624 |
} |
|
2625 |
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { |
|
2626 |
update_mask |= MSR_EFER_FFXSR; |
|
2627 |
} |
|
2628 |
if (env->cpuid_ext2_features & CPUID_EXT2_NX) { |
|
2629 |
update_mask |= MSR_EFER_NXE; |
|
2630 |
} |
|
2631 |
if (env->cpuid_ext3_features & CPUID_EXT3_SVM) { |
|
2632 |
update_mask |= MSR_EFER_SVME; |
|
2633 |
} |
|
2634 |
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { |
|
2635 |
update_mask |= MSR_EFER_FFXSR; |
|
2636 |
} |
|
2637 |
cpu_load_efer(env, (env->efer & ~update_mask) | |
|
2638 |
(val & update_mask)); |
|
2639 |
} |
|
2640 |
break; |
|
2641 |
case MSR_STAR: |
|
2642 |
env->star = val; |
|
2643 |
break; |
|
2644 |
case MSR_PAT: |
|
2645 |
env->pat = val; |
|
2646 |
break; |
|
2647 |
case MSR_VM_HSAVE_PA: |
|
2648 |
env->vm_hsave = val; |
|
2649 |
break; |
|
2650 |
#ifdef TARGET_X86_64 |
|
2651 |
case MSR_LSTAR: |
|
2652 |
env->lstar = val; |
|
2653 |
break; |
|
2654 |
case MSR_CSTAR: |
|
2655 |
env->cstar = val; |
|
2656 |
break; |
|
2657 |
case MSR_FMASK: |
|
2658 |
env->fmask = val; |
|
2659 |
break; |
|
2660 |
case MSR_FSBASE: |
|
2661 |
env->segs[R_FS].base = val; |
|
2662 |
break; |
|
2663 |
case MSR_GSBASE: |
|
2664 |
env->segs[R_GS].base = val; |
|
2665 |
break; |
|
2666 |
case MSR_KERNELGSBASE: |
|
2667 |
env->kernelgsbase = val; |
|
2668 |
break; |
|
2669 |
#endif |
|
2670 |
case MSR_MTRRphysBase(0): |
|
2671 |
case MSR_MTRRphysBase(1): |
|
2672 |
case MSR_MTRRphysBase(2): |
|
2673 |
case MSR_MTRRphysBase(3): |
|
2674 |
case MSR_MTRRphysBase(4): |
|
2675 |
case MSR_MTRRphysBase(5): |
|
2676 |
case MSR_MTRRphysBase(6): |
|
2677 |
case MSR_MTRRphysBase(7): |
|
2678 |
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val; |
|
2679 |
break; |
|
2680 |
case MSR_MTRRphysMask(0): |
|
2681 |
case MSR_MTRRphysMask(1): |
|
2682 |
case MSR_MTRRphysMask(2): |
|
2683 |
case MSR_MTRRphysMask(3): |
|
2684 |
case MSR_MTRRphysMask(4): |
|
2685 |
case MSR_MTRRphysMask(5): |
|
2686 |
case MSR_MTRRphysMask(6): |
|
2687 |
case MSR_MTRRphysMask(7): |
|
2688 |
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val; |
|
2689 |
break; |
|
2690 |
case MSR_MTRRfix64K_00000: |
|
2691 |
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val; |
|
2692 |
break; |
|
2693 |
case MSR_MTRRfix16K_80000: |
|
2694 |
case MSR_MTRRfix16K_A0000: |
|
2695 |
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val; |
|
2696 |
break; |
|
2697 |
case MSR_MTRRfix4K_C0000: |
|
2698 |
case MSR_MTRRfix4K_C8000: |
|
2699 |
case MSR_MTRRfix4K_D0000: |
|
2700 |
case MSR_MTRRfix4K_D8000: |
|
2701 |
case MSR_MTRRfix4K_E0000: |
|
2702 |
case MSR_MTRRfix4K_E8000: |
|
2703 |
case MSR_MTRRfix4K_F0000: |
|
2704 |
case MSR_MTRRfix4K_F8000: |
|
2705 |
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val; |
|
2706 |
break; |
|
2707 |
case MSR_MTRRdefType: |
|
2708 |
env->mtrr_deftype = val; |
|
2709 |
break; |
|
2710 |
case MSR_MCG_STATUS: |
|
2711 |
env->mcg_status = val; |
|
2712 |
break; |
|
2713 |
case MSR_MCG_CTL: |
|
2714 |
if ((env->mcg_cap & MCG_CTL_P) |
|
2715 |
&& (val == 0 || val == ~(uint64_t)0)) { |
|
2716 |
env->mcg_ctl = val; |
|
2717 |
} |
|
2718 |
break; |
|
2719 |
case MSR_TSC_AUX: |
|
2720 |
env->tsc_aux = val; |
|
2721 |
break; |
|
2722 |
case MSR_IA32_MISC_ENABLE: |
|
2723 |
env->msr_ia32_misc_enable = val; |
|
2724 |
break; |
|
2725 |
default: |
|
2726 |
if ((uint32_t)ECX >= MSR_MC0_CTL |
|
2727 |
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { |
|
2728 |
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; |
|
2729 |
if ((offset & 0x3) != 0 |
|
2730 |
|| (val == 0 || val == ~(uint64_t)0)) { |
|
2731 |
env->mce_banks[offset] = val; |
|
2732 |
} |
|
2733 |
break; |
|
2734 |
} |
|
2735 |
/* XXX: exception? */ |
|
2736 |
break; |
|
2737 |
} |
|
2738 |
} |
|
2739 |
|
|
2740 |
void helper_rdmsr(void) |
|
2741 |
{ |
|
2742 |
uint64_t val; |
|
2743 |
|
|
2744 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0); |
|
2745 |
|
|
2746 |
switch ((uint32_t)ECX) { |
|
2747 |
case MSR_IA32_SYSENTER_CS: |
|
2748 |
val = env->sysenter_cs; |
|
2749 |
break; |
|
2750 |
case MSR_IA32_SYSENTER_ESP: |
|
2751 |
val = env->sysenter_esp; |
|
2752 |
break; |
|
2753 |
case MSR_IA32_SYSENTER_EIP: |
|
2754 |
val = env->sysenter_eip; |
|
2755 |
break; |
|
2756 |
case MSR_IA32_APICBASE: |
|
2757 |
val = cpu_get_apic_base(env->apic_state); |
|
2758 |
break; |
|
2759 |
case MSR_EFER: |
|
2760 |
val = env->efer; |
|
2761 |
break; |
|
2762 |
case MSR_STAR: |
|
2763 |
val = env->star; |
|
2764 |
break; |
|
2765 |
case MSR_PAT: |
|
2766 |
val = env->pat; |
|
2767 |
break; |
|
2768 |
case MSR_VM_HSAVE_PA: |
|
2769 |
val = env->vm_hsave; |
|
2770 |
break; |
|
2771 |
case MSR_IA32_PERF_STATUS: |
|
2772 |
/* tsc_increment_by_tick */ |
|
2773 |
val = 1000ULL; |
|
2774 |
/* CPU multiplier */ |
|
2775 |
val |= (((uint64_t)4ULL) << 40); |
|
2776 |
break; |
|
2777 |
#ifdef TARGET_X86_64 |
|
2778 |
case MSR_LSTAR: |
|
2779 |
val = env->lstar; |
|
2780 |
break; |
|
2781 |
case MSR_CSTAR: |
|
2782 |
val = env->cstar; |
|
2783 |
break; |
|
2784 |
case MSR_FMASK: |
|
2785 |
val = env->fmask; |
|
2786 |
break; |
|
2787 |
case MSR_FSBASE: |
|
2788 |
val = env->segs[R_FS].base; |
|
2789 |
break; |
|
2790 |
case MSR_GSBASE: |
|
2791 |
val = env->segs[R_GS].base; |
|
2792 |
break; |
|
2793 |
case MSR_KERNELGSBASE: |
|
2794 |
val = env->kernelgsbase; |
|
2795 |
break; |
|
2796 |
case MSR_TSC_AUX: |
|
2797 |
val = env->tsc_aux; |
|
2798 |
break; |
|
2799 |
#endif |
|
2800 |
case MSR_MTRRphysBase(0): |
|
2801 |
case MSR_MTRRphysBase(1): |
|
2802 |
case MSR_MTRRphysBase(2): |
|
2803 |
case MSR_MTRRphysBase(3): |
|
2804 |
case MSR_MTRRphysBase(4): |
|
2805 |
case MSR_MTRRphysBase(5): |
|
2806 |
case MSR_MTRRphysBase(6): |
|
2807 |
case MSR_MTRRphysBase(7): |
|
2808 |
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base; |
|
2809 |
break; |
|
2810 |
case MSR_MTRRphysMask(0): |
|
2811 |
case MSR_MTRRphysMask(1): |
|
2812 |
case MSR_MTRRphysMask(2): |
|
2813 |
case MSR_MTRRphysMask(3): |
|
2814 |
case MSR_MTRRphysMask(4): |
|
2815 |
case MSR_MTRRphysMask(5): |
|
2816 |
case MSR_MTRRphysMask(6): |
|
2817 |
case MSR_MTRRphysMask(7): |
|
2818 |
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask; |
|
2819 |
break; |
|
2820 |
case MSR_MTRRfix64K_00000: |
|
2821 |
val = env->mtrr_fixed[0]; |
|
2822 |
break; |
|
2823 |
case MSR_MTRRfix16K_80000: |
|
2824 |
case MSR_MTRRfix16K_A0000: |
|
2825 |
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1]; |
|
2826 |
break; |
|
2827 |
case MSR_MTRRfix4K_C0000: |
|
2828 |
case MSR_MTRRfix4K_C8000: |
|
2829 |
case MSR_MTRRfix4K_D0000: |
|
2830 |
case MSR_MTRRfix4K_D8000: |
|
2831 |
case MSR_MTRRfix4K_E0000: |
|
2832 |
case MSR_MTRRfix4K_E8000: |
|
2833 |
case MSR_MTRRfix4K_F0000: |
|
2834 |
case MSR_MTRRfix4K_F8000: |
|
2835 |
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3]; |
|
2836 |
break; |
|
2837 |
case MSR_MTRRdefType: |
|
2838 |
val = env->mtrr_deftype; |
|
2839 |
break; |
|
2840 |
case MSR_MTRRcap: |
|
2841 |
if (env->cpuid_features & CPUID_MTRR) { |
|
2842 |
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | |
|
2843 |
MSR_MTRRcap_WC_SUPPORTED; |
|
2844 |
} else { |
|
2845 |
/* XXX: exception? */ |
|
2846 |
val = 0; |
|
2847 |
} |
|
2848 |
break; |
|
2849 |
case MSR_MCG_CAP: |
|
2850 |
val = env->mcg_cap; |
|
2851 |
break; |
|
2852 |
case MSR_MCG_CTL: |
|
2853 |
if (env->mcg_cap & MCG_CTL_P) { |
|
2854 |
val = env->mcg_ctl; |
|
2855 |
} else { |
|
2856 |
val = 0; |
|
2857 |
} |
|
2858 |
break; |
|
2859 |
case MSR_MCG_STATUS: |
|
2860 |
val = env->mcg_status; |
|
2861 |
break; |
|
2862 |
case MSR_IA32_MISC_ENABLE: |
|
2863 |
val = env->msr_ia32_misc_enable; |
|
2864 |
break; |
|
2865 |
default: |
|
2866 |
if ((uint32_t)ECX >= MSR_MC0_CTL |
|
2867 |
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { |
|
2868 |
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; |
|
2869 |
val = env->mce_banks[offset]; |
|
2870 |
break; |
|
2871 |
} |
|
2872 |
/* XXX: exception? */ |
|
2873 |
val = 0; |
|
2874 |
break; |
|
2875 |
} |
|
2876 |
EAX = (uint32_t)(val); |
|
2877 |
EDX = (uint32_t)(val >> 32); |
|
2878 |
} |
|
2879 |
#endif |
|
2880 |
|
|
2881 | 2351 |
target_ulong helper_lsl(target_ulong selector1) |
2882 | 2352 |
{ |
2883 | 2353 |
unsigned int limit; |
... | ... | |
3065 | 2535 |
} |
3066 | 2536 |
#endif |
3067 | 2537 |
|
3068 |
static void do_hlt(void) |
|
3069 |
{ |
|
3070 |
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ |
|
3071 |
env->halted = 1; |
|
3072 |
env->exception_index = EXCP_HLT; |
|
3073 |
cpu_loop_exit(env); |
|
3074 |
} |
|
3075 |
|
|
3076 |
void helper_hlt(int next_eip_addend) |
|
3077 |
{ |
|
3078 |
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); |
|
3079 |
EIP += next_eip_addend; |
|
3080 |
|
|
3081 |
do_hlt(); |
|
3082 |
} |
|
3083 |
|
|
3084 |
void helper_monitor(target_ulong ptr) |
|
3085 |
{ |
|
3086 |
if ((uint32_t)ECX != 0) { |
|
3087 |
raise_exception(env, EXCP0D_GPF); |
|
3088 |
} |
|
3089 |
/* XXX: store address? */ |
|
3090 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0); |
|
3091 |
} |
|
3092 |
|
|
3093 |
void helper_mwait(int next_eip_addend) |
|
3094 |
{ |
|
3095 |
if ((uint32_t)ECX != 0) { |
|
3096 |
raise_exception(env, EXCP0D_GPF); |
|
3097 |
} |
|
3098 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); |
|
3099 |
EIP += next_eip_addend; |
|
3100 |
|
|
3101 |
/* XXX: not complete but not completely erroneous */ |
|
3102 |
if (env->cpu_index != 0 || env->next_cpu != NULL) { |
|
3103 |
/* more than one CPU: do not sleep because another CPU may |
|
3104 |
wake this one */ |
|
3105 |
} else { |
|
3106 |
do_hlt(); |
|
3107 |
} |
|
3108 |
} |
|
3109 |
|
|
3110 |
void helper_debug(void) |
|
3111 |
{ |
|
3112 |
env->exception_index = EXCP_DEBUG; |
|
3113 |
cpu_loop_exit(env); |
|
3114 |
} |
|
3115 |
|
|
3116 | 2538 |
void helper_boundw(target_ulong a0, int v) |
3117 | 2539 |
{ |
3118 | 2540 |
int low, high; |
Also available in: Unified diff