Revision a78d0eab
b/target-i386/cpu.h | ||
---|---|---|
1101 | 1101 |
? MMU_KSMAP_IDX : MMU_KERNEL_IDX; |
1102 | 1102 |
} |
1103 | 1103 |
|
1104 |
#undef EIP |
|
1105 |
#define EIP (env->eip) |
|
1106 | 1104 |
#define DF (env->df) |
1107 | 1105 |
|
1108 | 1106 |
#define CC_DST (env->cc_dst) |
b/target-i386/excp_helper.c | ||
---|---|---|
87 | 87 |
/* |
88 | 88 |
* Signal an interruption. It is executed in the main CPU loop. |
89 | 89 |
* is_int is TRUE if coming from the int instruction. next_eip is the |
90 |
* EIP value AFTER the interrupt instruction. It is only relevant if
|
|
90 |
* env->eip value AFTER the interrupt instruction. It is only relevant if
|
|
91 | 91 |
* is_int is TRUE. |
92 | 92 |
*/ |
93 | 93 |
static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, |
b/target-i386/misc_helper.c | ||
---|---|---|
569 | 569 |
X86CPU *cpu = x86_env_get_cpu(env); |
570 | 570 |
|
571 | 571 |
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); |
572 |
EIP += next_eip_addend;
|
|
572 |
env->eip += next_eip_addend;
|
|
573 | 573 |
|
574 | 574 |
do_hlt(cpu); |
575 | 575 |
} |
... | ... | |
592 | 592 |
raise_exception(env, EXCP0D_GPF); |
593 | 593 |
} |
594 | 594 |
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); |
595 |
EIP += next_eip_addend;
|
|
595 |
env->eip += next_eip_addend;
|
|
596 | 596 |
|
597 | 597 |
cpu = x86_env_get_cpu(env); |
598 | 598 |
cs = CPU(cpu); |
b/target-i386/seg_helper.c | ||
---|---|---|
457 | 457 |
tss_load_seg(env, R_GS, new_segs[R_GS]); |
458 | 458 |
} |
459 | 459 |
|
460 |
/* check that EIP is in the CS segment limits */
|
|
460 |
/* check that env->eip is in the CS segment limits */
|
|
461 | 461 |
if (new_eip > env->segs[R_CS].limit) { |
462 | 462 |
/* XXX: different exception if CALL? */ |
463 | 463 |
raise_exception_err(env, EXCP0D_GPF, 0); |
... | ... | |
1122 | 1122 |
exiting the emulation with the suitable exception and error |
1123 | 1123 |
code */ |
1124 | 1124 |
if (is_int) { |
1125 |
EIP = next_eip;
|
|
1125 |
env->eip = next_eip;
|
|
1126 | 1126 |
} |
1127 | 1127 |
} |
1128 | 1128 |
|
... | ... | |
1157 | 1157 |
|
1158 | 1158 |
/* |
1159 | 1159 |
* Begin execution of an interruption. is_int is TRUE if coming from |
1160 |
* the int instruction. next_eip is the EIP value AFTER the interrupt
|
|
1160 |
* the int instruction. next_eip is the env->eip value AFTER the interrupt
|
|
1161 | 1161 |
* instruction. It is only relevant if is_int is TRUE. |
1162 | 1162 |
*/ |
1163 | 1163 |
static void do_interrupt_all(CPUX86State *env, int intno, int is_int, |
... | ... | |
1171 | 1171 |
" pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, |
1172 | 1172 |
count, intno, error_code, is_int, |
1173 | 1173 |
env->hflags & HF_CPL_MASK, |
1174 |
env->segs[R_CS].selector, EIP,
|
|
1175 |
(int)env->segs[R_CS].base + EIP,
|
|
1174 |
env->segs[R_CS].selector, env->eip,
|
|
1175 |
(int)env->segs[R_CS].base + env->eip,
|
|
1176 | 1176 |
env->segs[R_SS].selector, env->regs[R_ESP]); |
1177 | 1177 |
if (intno == 0x0e) { |
1178 | 1178 |
qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); |
... | ... | |
1584 | 1584 |
} |
1585 | 1585 |
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, |
1586 | 1586 |
get_seg_base(e1, e2), limit, e2); |
1587 |
EIP = new_eip;
|
|
1587 |
env->eip = new_eip;
|
|
1588 | 1588 |
} else { |
1589 | 1589 |
/* jump to call or task gate */ |
1590 | 1590 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
... | ... | |
1637 | 1637 |
} |
1638 | 1638 |
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, |
1639 | 1639 |
get_seg_base(e1, e2), limit, e2); |
1640 |
EIP = new_eip;
|
|
1640 |
env->eip = new_eip;
|
|
1641 | 1641 |
break; |
1642 | 1642 |
default: |
1643 | 1643 |
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); |
... | ... | |
1731 | 1731 |
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, |
1732 | 1732 |
get_seg_base(e1, e2), |
1733 | 1733 |
get_seg_limit(e1, e2), e2); |
1734 |
EIP = new_eip;
|
|
1734 |
env->eip = new_eip;
|
|
1735 | 1735 |
} else |
1736 | 1736 |
#endif |
1737 | 1737 |
{ |
... | ... | |
1754 | 1754 |
SET_ESP(sp, sp_mask); |
1755 | 1755 |
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, |
1756 | 1756 |
get_seg_base(e1, e2), limit, e2); |
1757 |
EIP = new_eip;
|
|
1757 |
env->eip = new_eip;
|
|
1758 | 1758 |
} |
1759 | 1759 |
} else { |
1760 | 1760 |
/* check gate type */ |
... | ... | |
1895 | 1895 |
e2); |
1896 | 1896 |
cpu_x86_set_cpl(env, dpl); |
1897 | 1897 |
SET_ESP(sp, sp_mask); |
1898 |
EIP = offset;
|
|
1898 |
env->eip = offset;
|
|
1899 | 1899 |
} |
1900 | 1900 |
} |
1901 | 1901 |
|
... | ... | |
2251 | 2251 |
DESC_S_MASK | |
2252 | 2252 |
DESC_W_MASK | DESC_A_MASK); |
2253 | 2253 |
env->regs[R_ESP] = env->sysenter_esp; |
2254 |
EIP = env->sysenter_eip;
|
|
2254 |
env->eip = env->sysenter_eip;
|
|
2255 | 2255 |
} |
2256 | 2256 |
|
2257 | 2257 |
void helper_sysexit(CPUX86State *env, int dflag) |
... | ... | |
2291 | 2291 |
DESC_W_MASK | DESC_A_MASK); |
2292 | 2292 |
} |
2293 | 2293 |
env->regs[R_ESP] = env->regs[R_ECX]; |
2294 |
EIP = env->regs[R_EDX];
|
|
2294 |
env->eip = env->regs[R_EDX];
|
|
2295 | 2295 |
} |
2296 | 2296 |
|
2297 | 2297 |
target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) |
b/target-i386/svm_helper.c | ||
---|---|---|
170 | 170 |
&env->segs[R_DS]); |
171 | 171 |
|
172 | 172 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), |
173 |
EIP + next_eip_addend);
|
|
173 |
env->eip + next_eip_addend);
|
|
174 | 174 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); |
175 | 175 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); |
176 | 176 |
|
... | ... | |
248 | 248 |
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), |
249 | 249 |
R_DS); |
250 | 250 |
|
251 |
EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
|
252 |
env->eip = EIP;
|
|
251 |
env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
|
252 |
env->eip = env->eip;
|
|
253 | 253 |
env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp)); |
254 | 254 |
env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax)); |
255 | 255 |
env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7)); |
... | ... | |
302 | 302 |
env->exception_index = EXCP02_NMI; |
303 | 303 |
env->error_code = event_inj_err; |
304 | 304 |
env->exception_is_int = 0; |
305 |
env->exception_next_eip = EIP;
|
|
305 |
env->exception_next_eip = env->eip;
|
|
306 | 306 |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); |
307 | 307 |
cpu_loop_exit(env); |
308 | 308 |
break; |
... | ... | |
318 | 318 |
env->exception_index = vector; |
319 | 319 |
env->error_code = event_inj_err; |
320 | 320 |
env->exception_is_int = 1; |
321 |
env->exception_next_eip = EIP;
|
|
321 |
env->exception_next_eip = env->eip;
|
|
322 | 322 |
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); |
323 | 323 |
cpu_loop_exit(env); |
324 | 324 |
break; |
... | ... | |
539 | 539 |
uint16_t mask = (1 << ((param >> 4) & 7)) - 1; |
540 | 540 |
|
541 | 541 |
if (lduw_phys(addr + port / 8) & (mask << (port & 7))) { |
542 |
/* next EIP */
|
|
542 |
/* next env->eip */
|
|
543 | 543 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), |
544 | 544 |
env->eip + next_eip_addend); |
545 | 545 |
helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16)); |
... | ... | |
558 | 558 |
exit_code, exit_info_1, |
559 | 559 |
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, |
560 | 560 |
control.exit_info_2)), |
561 |
EIP);
|
|
561 |
env->eip);
|
|
562 | 562 |
|
563 | 563 |
if (env->hflags & HF_INHIBIT_IRQ_MASK) { |
564 | 564 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), |
... | ... | |
657 | 657 |
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), |
658 | 658 |
R_DS); |
659 | 659 |
|
660 |
EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
|
|
660 |
env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
|
|
661 | 661 |
env->regs[R_ESP] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp)); |
662 | 662 |
env->regs[R_EAX] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax)); |
663 | 663 |
|
Also available in: Unified diff