root / target-i386 / misc_helper.c @ 182735ef
History | View | Annotate | Download (15.9 kB)
1 |
/*
|
---|---|
2 |
* x86 misc helpers
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#include "cpu.h" |
21 |
#include "exec/ioport.h" |
22 |
#include "helper.h" |
23 |
|
24 |
#if !defined(CONFIG_USER_ONLY)
|
25 |
#include "exec/softmmu_exec.h" |
26 |
#endif /* !defined(CONFIG_USER_ONLY) */ |
27 |
|
28 |
/* check if Port I/O is allowed in TSS */
|
29 |
static inline void check_io(CPUX86State *env, int addr, int size) |
30 |
{ |
31 |
int io_offset, val, mask;
|
32 |
|
33 |
/* TSS must be a valid 32 bit one */
|
34 |
if (!(env->tr.flags & DESC_P_MASK) ||
|
35 |
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || |
36 |
env->tr.limit < 103) {
|
37 |
goto fail;
|
38 |
} |
39 |
io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
|
40 |
io_offset += (addr >> 3);
|
41 |
/* Note: the check needs two bytes */
|
42 |
if ((io_offset + 1) > env->tr.limit) { |
43 |
goto fail;
|
44 |
} |
45 |
val = cpu_lduw_kernel(env, env->tr.base + io_offset); |
46 |
val >>= (addr & 7);
|
47 |
mask = (1 << size) - 1; |
48 |
/* all bits must be zero to allow the I/O */
|
49 |
if ((val & mask) != 0) { |
50 |
fail:
|
51 |
raise_exception_err(env, EXCP0D_GPF, 0);
|
52 |
} |
53 |
} |
54 |
|
55 |
void helper_check_iob(CPUX86State *env, uint32_t t0)
|
56 |
{ |
57 |
check_io(env, t0, 1);
|
58 |
} |
59 |
|
60 |
void helper_check_iow(CPUX86State *env, uint32_t t0)
|
61 |
{ |
62 |
check_io(env, t0, 2);
|
63 |
} |
64 |
|
65 |
void helper_check_iol(CPUX86State *env, uint32_t t0)
|
66 |
{ |
67 |
check_io(env, t0, 4);
|
68 |
} |
69 |
|
70 |
void helper_outb(uint32_t port, uint32_t data)
|
71 |
{ |
72 |
cpu_outb(port, data & 0xff);
|
73 |
} |
74 |
|
75 |
target_ulong helper_inb(uint32_t port) |
76 |
{ |
77 |
return cpu_inb(port);
|
78 |
} |
79 |
|
80 |
void helper_outw(uint32_t port, uint32_t data)
|
81 |
{ |
82 |
cpu_outw(port, data & 0xffff);
|
83 |
} |
84 |
|
85 |
target_ulong helper_inw(uint32_t port) |
86 |
{ |
87 |
return cpu_inw(port);
|
88 |
} |
89 |
|
90 |
void helper_outl(uint32_t port, uint32_t data)
|
91 |
{ |
92 |
cpu_outl(port, data); |
93 |
} |
94 |
|
95 |
target_ulong helper_inl(uint32_t port) |
96 |
{ |
97 |
return cpu_inl(port);
|
98 |
} |
99 |
|
100 |
void helper_into(CPUX86State *env, int next_eip_addend) |
101 |
{ |
102 |
int eflags;
|
103 |
|
104 |
eflags = cpu_cc_compute_all(env, CC_OP); |
105 |
if (eflags & CC_O) {
|
106 |
raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); |
107 |
} |
108 |
} |
109 |
|
110 |
void helper_single_step(CPUX86State *env)
|
111 |
{ |
112 |
#ifndef CONFIG_USER_ONLY
|
113 |
check_hw_breakpoints(env, true);
|
114 |
env->dr[6] |= DR6_BS;
|
115 |
#endif
|
116 |
raise_exception(env, EXCP01_DB); |
117 |
} |
118 |
|
119 |
void helper_cpuid(CPUX86State *env)
|
120 |
{ |
121 |
uint32_t eax, ebx, ecx, edx; |
122 |
|
123 |
cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
|
124 |
|
125 |
cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX], |
126 |
&eax, &ebx, &ecx, &edx); |
127 |
env->regs[R_EAX] = eax; |
128 |
env->regs[R_EBX] = ebx; |
129 |
env->regs[R_ECX] = ecx; |
130 |
env->regs[R_EDX] = edx; |
131 |
} |
132 |
|
133 |
#if defined(CONFIG_USER_ONLY)
|
134 |
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
135 |
{ |
136 |
return 0; |
137 |
} |
138 |
|
139 |
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) |
140 |
{ |
141 |
} |
142 |
|
143 |
void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0) |
144 |
{ |
145 |
} |
146 |
#else
|
147 |
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
148 |
{ |
149 |
target_ulong val; |
150 |
|
151 |
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
|
152 |
switch (reg) {
|
153 |
default:
|
154 |
val = env->cr[reg]; |
155 |
break;
|
156 |
case 8: |
157 |
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
158 |
val = cpu_get_apic_tpr(env->apic_state); |
159 |
} else {
|
160 |
val = env->v_tpr; |
161 |
} |
162 |
break;
|
163 |
} |
164 |
return val;
|
165 |
} |
166 |
|
167 |
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) |
168 |
{ |
169 |
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
|
170 |
switch (reg) {
|
171 |
case 0: |
172 |
cpu_x86_update_cr0(env, t0); |
173 |
break;
|
174 |
case 3: |
175 |
cpu_x86_update_cr3(env, t0); |
176 |
break;
|
177 |
case 4: |
178 |
cpu_x86_update_cr4(env, t0); |
179 |
break;
|
180 |
case 8: |
181 |
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
182 |
cpu_set_apic_tpr(env->apic_state, t0); |
183 |
} |
184 |
env->v_tpr = t0 & 0x0f;
|
185 |
break;
|
186 |
default:
|
187 |
env->cr[reg] = t0; |
188 |
break;
|
189 |
} |
190 |
} |
191 |
|
192 |
void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0) |
193 |
{ |
194 |
int i;
|
195 |
|
196 |
if (reg < 4) { |
197 |
hw_breakpoint_remove(env, reg); |
198 |
env->dr[reg] = t0; |
199 |
hw_breakpoint_insert(env, reg); |
200 |
} else if (reg == 7) { |
201 |
for (i = 0; i < DR7_MAX_BP; i++) { |
202 |
hw_breakpoint_remove(env, i); |
203 |
} |
204 |
env->dr[7] = t0;
|
205 |
for (i = 0; i < DR7_MAX_BP; i++) { |
206 |
hw_breakpoint_insert(env, i); |
207 |
} |
208 |
} else {
|
209 |
env->dr[reg] = t0; |
210 |
} |
211 |
} |
212 |
#endif
|
213 |
|
214 |
void helper_lmsw(CPUX86State *env, target_ulong t0)
|
215 |
{ |
216 |
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
|
217 |
if already set to one. */
|
218 |
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); |
219 |
helper_write_crN(env, 0, t0);
|
220 |
} |
221 |
|
222 |
void helper_invlpg(CPUX86State *env, target_ulong addr)
|
223 |
{ |
224 |
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
|
225 |
tlb_flush_page(env, addr); |
226 |
} |
227 |
|
228 |
void helper_rdtsc(CPUX86State *env)
|
229 |
{ |
230 |
uint64_t val; |
231 |
|
232 |
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { |
233 |
raise_exception(env, EXCP0D_GPF); |
234 |
} |
235 |
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
|
236 |
|
237 |
val = cpu_get_tsc(env) + env->tsc_offset; |
238 |
env->regs[R_EAX] = (uint32_t)(val); |
239 |
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
240 |
} |
241 |
|
242 |
void helper_rdtscp(CPUX86State *env)
|
243 |
{ |
244 |
helper_rdtsc(env); |
245 |
env->regs[R_ECX] = (uint32_t)(env->tsc_aux); |
246 |
} |
247 |
|
248 |
void helper_rdpmc(CPUX86State *env)
|
249 |
{ |
250 |
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { |
251 |
raise_exception(env, EXCP0D_GPF); |
252 |
} |
253 |
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
|
254 |
|
255 |
/* currently unimplemented */
|
256 |
qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
|
257 |
raise_exception_err(env, EXCP06_ILLOP, 0);
|
258 |
} |
259 |
|
260 |
#if defined(CONFIG_USER_ONLY)
|
261 |
void helper_wrmsr(CPUX86State *env)
|
262 |
{ |
263 |
} |
264 |
|
265 |
void helper_rdmsr(CPUX86State *env)
|
266 |
{ |
267 |
} |
268 |
#else
|
269 |
void helper_wrmsr(CPUX86State *env)
|
270 |
{ |
271 |
uint64_t val; |
272 |
|
273 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
|
274 |
|
275 |
val = ((uint32_t)env->regs[R_EAX]) | |
276 |
((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
277 |
|
278 |
switch ((uint32_t)env->regs[R_ECX]) {
|
279 |
case MSR_IA32_SYSENTER_CS:
|
280 |
env->sysenter_cs = val & 0xffff;
|
281 |
break;
|
282 |
case MSR_IA32_SYSENTER_ESP:
|
283 |
env->sysenter_esp = val; |
284 |
break;
|
285 |
case MSR_IA32_SYSENTER_EIP:
|
286 |
env->sysenter_eip = val; |
287 |
break;
|
288 |
case MSR_IA32_APICBASE:
|
289 |
cpu_set_apic_base(env->apic_state, val); |
290 |
break;
|
291 |
case MSR_EFER:
|
292 |
{ |
293 |
uint64_t update_mask; |
294 |
|
295 |
update_mask = 0;
|
296 |
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
|
297 |
update_mask |= MSR_EFER_SCE; |
298 |
} |
299 |
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
300 |
update_mask |= MSR_EFER_LME; |
301 |
} |
302 |
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
303 |
update_mask |= MSR_EFER_FFXSR; |
304 |
} |
305 |
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
|
306 |
update_mask |= MSR_EFER_NXE; |
307 |
} |
308 |
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
|
309 |
update_mask |= MSR_EFER_SVME; |
310 |
} |
311 |
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
312 |
update_mask |= MSR_EFER_FFXSR; |
313 |
} |
314 |
cpu_load_efer(env, (env->efer & ~update_mask) | |
315 |
(val & update_mask)); |
316 |
} |
317 |
break;
|
318 |
case MSR_STAR:
|
319 |
env->star = val; |
320 |
break;
|
321 |
case MSR_PAT:
|
322 |
env->pat = val; |
323 |
break;
|
324 |
case MSR_VM_HSAVE_PA:
|
325 |
env->vm_hsave = val; |
326 |
break;
|
327 |
#ifdef TARGET_X86_64
|
328 |
case MSR_LSTAR:
|
329 |
env->lstar = val; |
330 |
break;
|
331 |
case MSR_CSTAR:
|
332 |
env->cstar = val; |
333 |
break;
|
334 |
case MSR_FMASK:
|
335 |
env->fmask = val; |
336 |
break;
|
337 |
case MSR_FSBASE:
|
338 |
env->segs[R_FS].base = val; |
339 |
break;
|
340 |
case MSR_GSBASE:
|
341 |
env->segs[R_GS].base = val; |
342 |
break;
|
343 |
case MSR_KERNELGSBASE:
|
344 |
env->kernelgsbase = val; |
345 |
break;
|
346 |
#endif
|
347 |
case MSR_MTRRphysBase(0): |
348 |
case MSR_MTRRphysBase(1): |
349 |
case MSR_MTRRphysBase(2): |
350 |
case MSR_MTRRphysBase(3): |
351 |
case MSR_MTRRphysBase(4): |
352 |
case MSR_MTRRphysBase(5): |
353 |
case MSR_MTRRphysBase(6): |
354 |
case MSR_MTRRphysBase(7): |
355 |
env->mtrr_var[((uint32_t)env->regs[R_ECX] - |
356 |
MSR_MTRRphysBase(0)) / 2].base = val; |
357 |
break;
|
358 |
case MSR_MTRRphysMask(0): |
359 |
case MSR_MTRRphysMask(1): |
360 |
case MSR_MTRRphysMask(2): |
361 |
case MSR_MTRRphysMask(3): |
362 |
case MSR_MTRRphysMask(4): |
363 |
case MSR_MTRRphysMask(5): |
364 |
case MSR_MTRRphysMask(6): |
365 |
case MSR_MTRRphysMask(7): |
366 |
env->mtrr_var[((uint32_t)env->regs[R_ECX] - |
367 |
MSR_MTRRphysMask(0)) / 2].mask = val; |
368 |
break;
|
369 |
case MSR_MTRRfix64K_00000:
|
370 |
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - |
371 |
MSR_MTRRfix64K_00000] = val; |
372 |
break;
|
373 |
case MSR_MTRRfix16K_80000:
|
374 |
case MSR_MTRRfix16K_A0000:
|
375 |
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - |
376 |
MSR_MTRRfix16K_80000 + 1] = val;
|
377 |
break;
|
378 |
case MSR_MTRRfix4K_C0000:
|
379 |
case MSR_MTRRfix4K_C8000:
|
380 |
case MSR_MTRRfix4K_D0000:
|
381 |
case MSR_MTRRfix4K_D8000:
|
382 |
case MSR_MTRRfix4K_E0000:
|
383 |
case MSR_MTRRfix4K_E8000:
|
384 |
case MSR_MTRRfix4K_F0000:
|
385 |
case MSR_MTRRfix4K_F8000:
|
386 |
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - |
387 |
MSR_MTRRfix4K_C0000 + 3] = val;
|
388 |
break;
|
389 |
case MSR_MTRRdefType:
|
390 |
env->mtrr_deftype = val; |
391 |
break;
|
392 |
case MSR_MCG_STATUS:
|
393 |
env->mcg_status = val; |
394 |
break;
|
395 |
case MSR_MCG_CTL:
|
396 |
if ((env->mcg_cap & MCG_CTL_P)
|
397 |
&& (val == 0 || val == ~(uint64_t)0)) { |
398 |
env->mcg_ctl = val; |
399 |
} |
400 |
break;
|
401 |
case MSR_TSC_AUX:
|
402 |
env->tsc_aux = val; |
403 |
break;
|
404 |
case MSR_IA32_MISC_ENABLE:
|
405 |
env->msr_ia32_misc_enable = val; |
406 |
break;
|
407 |
default:
|
408 |
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
409 |
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + |
410 |
(4 * env->mcg_cap & 0xff)) { |
411 |
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; |
412 |
if ((offset & 0x3) != 0 |
413 |
|| (val == 0 || val == ~(uint64_t)0)) { |
414 |
env->mce_banks[offset] = val; |
415 |
} |
416 |
break;
|
417 |
} |
418 |
/* XXX: exception? */
|
419 |
break;
|
420 |
} |
421 |
} |
422 |
|
423 |
void helper_rdmsr(CPUX86State *env)
|
424 |
{ |
425 |
uint64_t val; |
426 |
|
427 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
|
428 |
|
429 |
switch ((uint32_t)env->regs[R_ECX]) {
|
430 |
case MSR_IA32_SYSENTER_CS:
|
431 |
val = env->sysenter_cs; |
432 |
break;
|
433 |
case MSR_IA32_SYSENTER_ESP:
|
434 |
val = env->sysenter_esp; |
435 |
break;
|
436 |
case MSR_IA32_SYSENTER_EIP:
|
437 |
val = env->sysenter_eip; |
438 |
break;
|
439 |
case MSR_IA32_APICBASE:
|
440 |
val = cpu_get_apic_base(env->apic_state); |
441 |
break;
|
442 |
case MSR_EFER:
|
443 |
val = env->efer; |
444 |
break;
|
445 |
case MSR_STAR:
|
446 |
val = env->star; |
447 |
break;
|
448 |
case MSR_PAT:
|
449 |
val = env->pat; |
450 |
break;
|
451 |
case MSR_VM_HSAVE_PA:
|
452 |
val = env->vm_hsave; |
453 |
break;
|
454 |
case MSR_IA32_PERF_STATUS:
|
455 |
/* tsc_increment_by_tick */
|
456 |
val = 1000ULL;
|
457 |
/* CPU multiplier */
|
458 |
val |= (((uint64_t)4ULL) << 40); |
459 |
break;
|
460 |
#ifdef TARGET_X86_64
|
461 |
case MSR_LSTAR:
|
462 |
val = env->lstar; |
463 |
break;
|
464 |
case MSR_CSTAR:
|
465 |
val = env->cstar; |
466 |
break;
|
467 |
case MSR_FMASK:
|
468 |
val = env->fmask; |
469 |
break;
|
470 |
case MSR_FSBASE:
|
471 |
val = env->segs[R_FS].base; |
472 |
break;
|
473 |
case MSR_GSBASE:
|
474 |
val = env->segs[R_GS].base; |
475 |
break;
|
476 |
case MSR_KERNELGSBASE:
|
477 |
val = env->kernelgsbase; |
478 |
break;
|
479 |
case MSR_TSC_AUX:
|
480 |
val = env->tsc_aux; |
481 |
break;
|
482 |
#endif
|
483 |
case MSR_MTRRphysBase(0): |
484 |
case MSR_MTRRphysBase(1): |
485 |
case MSR_MTRRphysBase(2): |
486 |
case MSR_MTRRphysBase(3): |
487 |
case MSR_MTRRphysBase(4): |
488 |
case MSR_MTRRphysBase(5): |
489 |
case MSR_MTRRphysBase(6): |
490 |
case MSR_MTRRphysBase(7): |
491 |
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - |
492 |
MSR_MTRRphysBase(0)) / 2].base; |
493 |
break;
|
494 |
case MSR_MTRRphysMask(0): |
495 |
case MSR_MTRRphysMask(1): |
496 |
case MSR_MTRRphysMask(2): |
497 |
case MSR_MTRRphysMask(3): |
498 |
case MSR_MTRRphysMask(4): |
499 |
case MSR_MTRRphysMask(5): |
500 |
case MSR_MTRRphysMask(6): |
501 |
case MSR_MTRRphysMask(7): |
502 |
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - |
503 |
MSR_MTRRphysMask(0)) / 2].mask; |
504 |
break;
|
505 |
case MSR_MTRRfix64K_00000:
|
506 |
val = env->mtrr_fixed[0];
|
507 |
break;
|
508 |
case MSR_MTRRfix16K_80000:
|
509 |
case MSR_MTRRfix16K_A0000:
|
510 |
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - |
511 |
MSR_MTRRfix16K_80000 + 1];
|
512 |
break;
|
513 |
case MSR_MTRRfix4K_C0000:
|
514 |
case MSR_MTRRfix4K_C8000:
|
515 |
case MSR_MTRRfix4K_D0000:
|
516 |
case MSR_MTRRfix4K_D8000:
|
517 |
case MSR_MTRRfix4K_E0000:
|
518 |
case MSR_MTRRfix4K_E8000:
|
519 |
case MSR_MTRRfix4K_F0000:
|
520 |
case MSR_MTRRfix4K_F8000:
|
521 |
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - |
522 |
MSR_MTRRfix4K_C0000 + 3];
|
523 |
break;
|
524 |
case MSR_MTRRdefType:
|
525 |
val = env->mtrr_deftype; |
526 |
break;
|
527 |
case MSR_MTRRcap:
|
528 |
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
529 |
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | |
530 |
MSR_MTRRcap_WC_SUPPORTED; |
531 |
} else {
|
532 |
/* XXX: exception? */
|
533 |
val = 0;
|
534 |
} |
535 |
break;
|
536 |
case MSR_MCG_CAP:
|
537 |
val = env->mcg_cap; |
538 |
break;
|
539 |
case MSR_MCG_CTL:
|
540 |
if (env->mcg_cap & MCG_CTL_P) {
|
541 |
val = env->mcg_ctl; |
542 |
} else {
|
543 |
val = 0;
|
544 |
} |
545 |
break;
|
546 |
case MSR_MCG_STATUS:
|
547 |
val = env->mcg_status; |
548 |
break;
|
549 |
case MSR_IA32_MISC_ENABLE:
|
550 |
val = env->msr_ia32_misc_enable; |
551 |
break;
|
552 |
default:
|
553 |
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
554 |
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + |
555 |
(4 * env->mcg_cap & 0xff)) { |
556 |
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; |
557 |
val = env->mce_banks[offset]; |
558 |
break;
|
559 |
} |
560 |
/* XXX: exception? */
|
561 |
val = 0;
|
562 |
break;
|
563 |
} |
564 |
env->regs[R_EAX] = (uint32_t)(val); |
565 |
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
566 |
} |
567 |
#endif
|
568 |
|
569 |
static void do_hlt(X86CPU *cpu) |
570 |
{ |
571 |
CPUState *cs = CPU(cpu); |
572 |
CPUX86State *env = &cpu->env; |
573 |
|
574 |
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
|
575 |
cs->halted = 1;
|
576 |
env->exception_index = EXCP_HLT; |
577 |
cpu_loop_exit(env); |
578 |
} |
579 |
|
580 |
void helper_hlt(CPUX86State *env, int next_eip_addend) |
581 |
{ |
582 |
X86CPU *cpu = x86_env_get_cpu(env); |
583 |
|
584 |
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
|
585 |
env->eip += next_eip_addend; |
586 |
|
587 |
do_hlt(cpu); |
588 |
} |
589 |
|
590 |
void helper_monitor(CPUX86State *env, target_ulong ptr)
|
591 |
{ |
592 |
if ((uint32_t)env->regs[R_ECX] != 0) { |
593 |
raise_exception(env, EXCP0D_GPF); |
594 |
} |
595 |
/* XXX: store address? */
|
596 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
|
597 |
} |
598 |
|
599 |
void helper_mwait(CPUX86State *env, int next_eip_addend) |
600 |
{ |
601 |
CPUState *cs; |
602 |
X86CPU *cpu; |
603 |
|
604 |
if ((uint32_t)env->regs[R_ECX] != 0) { |
605 |
raise_exception(env, EXCP0D_GPF); |
606 |
} |
607 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
|
608 |
env->eip += next_eip_addend; |
609 |
|
610 |
cpu = x86_env_get_cpu(env); |
611 |
cs = CPU(cpu); |
612 |
/* XXX: not complete but not completely erroneous */
|
613 |
if (cs->cpu_index != 0 || cs->next_cpu != NULL) { |
614 |
/* more than one CPU: do not sleep because another CPU may
|
615 |
wake this one */
|
616 |
} else {
|
617 |
do_hlt(cpu); |
618 |
} |
619 |
} |
620 |
|
621 |
void helper_debug(CPUX86State *env)
|
622 |
{ |
623 |
env->exception_index = EXCP_DEBUG; |
624 |
cpu_loop_exit(env); |
625 |
} |