Revision 0573fbfc target-i386/helper.c
b/target-i386/helper.c | ||
---|---|---|
594 | 594 |
int has_error_code, new_stack, shift; |
595 | 595 |
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; |
596 | 596 |
uint32_t old_eip, sp_mask; |
597 |
int svm_should_check = 1; |
|
597 | 598 |
|
599 |
if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) { |
|
600 |
next_eip = EIP; |
|
601 |
svm_should_check = 0; |
|
602 |
} |
|
603 |
|
|
604 |
if (svm_should_check |
|
605 |
&& (INTERCEPTEDl(_exceptions, 1 << intno) |
|
606 |
&& !is_int)) { |
|
607 |
raise_interrupt(intno, is_int, error_code, 0); |
|
608 |
} |
|
598 | 609 |
has_error_code = 0; |
599 | 610 |
if (!is_int && !is_hw) { |
600 | 611 |
switch(intno) { |
... | ... | |
830 | 841 |
int has_error_code, new_stack; |
831 | 842 |
uint32_t e1, e2, e3, ss; |
832 | 843 |
target_ulong old_eip, esp, offset; |
844 |
int svm_should_check = 1; |
|
833 | 845 |
|
846 |
if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) { |
|
847 |
next_eip = EIP; |
|
848 |
svm_should_check = 0; |
|
849 |
} |
|
850 |
if (svm_should_check |
|
851 |
&& INTERCEPTEDl(_exceptions, 1 << intno) |
|
852 |
&& !is_int) { |
|
853 |
raise_interrupt(intno, is_int, error_code, 0); |
|
854 |
} |
|
834 | 855 |
has_error_code = 0; |
835 | 856 |
if (!is_int && !is_hw) { |
836 | 857 |
switch(intno) { |
... | ... | |
1077 | 1098 |
int selector; |
1078 | 1099 |
uint32_t offset, esp; |
1079 | 1100 |
uint32_t old_cs, old_eip; |
1101 |
int svm_should_check = 1; |
|
1080 | 1102 |
|
1103 |
if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) { |
|
1104 |
next_eip = EIP; |
|
1105 |
svm_should_check = 0; |
|
1106 |
} |
|
1107 |
if (svm_should_check |
|
1108 |
&& INTERCEPTEDl(_exceptions, 1 << intno) |
|
1109 |
&& !is_int) { |
|
1110 |
raise_interrupt(intno, is_int, error_code, 0); |
|
1111 |
} |
|
1081 | 1112 |
/* real mode (simpler !) */ |
1082 | 1113 |
dt = &env->idt; |
1083 | 1114 |
if (intno * 4 + 3 > dt->limit) |
... | ... | |
1227 | 1258 |
void raise_interrupt(int intno, int is_int, int error_code, |
1228 | 1259 |
int next_eip_addend) |
1229 | 1260 |
{ |
1230 |
if (!is_int) |
|
1261 |
if (!is_int) { |
|
1262 |
svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code); |
|
1231 | 1263 |
intno = check_exception(intno, &error_code); |
1264 |
} |
|
1232 | 1265 |
|
1233 | 1266 |
env->exception_index = intno; |
1234 | 1267 |
env->error_code = error_code; |
... | ... | |
1671 | 1704 |
case 0x80000001: |
1672 | 1705 |
EAX = env->cpuid_features; |
1673 | 1706 |
EBX = 0; |
1674 |
ECX = 0;
|
|
1707 |
ECX = env->cpuid_ext3_features;
|
|
1675 | 1708 |
EDX = env->cpuid_ext2_features; |
1676 | 1709 |
break; |
1677 | 1710 |
case 0x80000002: |
... | ... | |
2745 | 2778 |
case MSR_PAT: |
2746 | 2779 |
env->pat = val; |
2747 | 2780 |
break; |
2781 |
case MSR_VM_HSAVE_PA: |
|
2782 |
env->vm_hsave = val; |
|
2783 |
break; |
|
2748 | 2784 |
#ifdef TARGET_X86_64 |
2749 | 2785 |
case MSR_LSTAR: |
2750 | 2786 |
env->lstar = val; |
... | ... | |
2796 | 2832 |
case MSR_PAT: |
2797 | 2833 |
val = env->pat; |
2798 | 2834 |
break; |
2835 |
case MSR_VM_HSAVE_PA: |
|
2836 |
val = env->vm_hsave; |
|
2837 |
break; |
|
2799 | 2838 |
#ifdef TARGET_X86_64 |
2800 | 2839 |
case MSR_LSTAR: |
2801 | 2840 |
val = env->lstar; |
... | ... | |
3877 | 3916 |
} |
3878 | 3917 |
env = saved_env; |
3879 | 3918 |
} |
3919 |
|
|
3920 |
|
|
3921 |
/* Secure Virtual Machine helpers */ |
|
3922 |
|
|
3923 |
void helper_stgi(void) |
|
3924 |
{ |
|
3925 |
env->hflags |= HF_GIF_MASK; |
|
3926 |
} |
|
3927 |
|
|
3928 |
void helper_clgi(void) |
|
3929 |
{ |
|
3930 |
env->hflags &= ~HF_GIF_MASK; |
|
3931 |
} |
|
3932 |
|
|
3933 |
#if defined(CONFIG_USER_ONLY) |
|
3934 |
|
|
3935 |
void helper_vmrun(target_ulong addr) { } |
|
3936 |
void helper_vmmcall(void) { } |
|
3937 |
void helper_vmload(target_ulong addr) { } |
|
3938 |
void helper_vmsave(target_ulong addr) { } |
|
3939 |
void helper_skinit(void) { } |
|
3940 |
void helper_invlpga(void) { } |
|
3941 |
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { } |
|
3942 |
int svm_check_intercept_param(uint32_t type, uint64_t param) |
|
3943 |
{ |
|
3944 |
return 0; |
|
3945 |
} |
|
3946 |
|
|
3947 |
#else |
|
3948 |
|
|
3949 |
static inline uint32_t |
|
3950 |
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit) |
|
3951 |
{ |
|
3952 |
return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */ |
|
3953 |
| ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */ |
|
3954 |
| ((vmcb_base >> 16) & 0xff) /* Base 23-16 */ |
|
3955 |
| (vmcb_base & 0xff000000) /* Base 31-24 */ |
|
3956 |
| (vmcb_limit & 0xf0000); /* Limit 19-16 */ |
|
3957 |
} |
|
3958 |
|
|
3959 |
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib) |
|
3960 |
{ |
|
3961 |
return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */ |
|
3962 |
| ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */ |
|
3963 |
} |
|
3964 |
|
|
3965 |
extern uint8_t *phys_ram_base; |
|
3966 |
void helper_vmrun(target_ulong addr) |
|
3967 |
{ |
|
3968 |
uint32_t event_inj; |
|
3969 |
uint32_t int_ctl; |
|
3970 |
|
|
3971 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
3972 |
fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr); |
|
3973 |
|
|
3974 |
env->vm_vmcb = addr; |
|
3975 |
regs_to_env(); |
|
3976 |
|
|
3977 |
/* save the current CPU state in the hsave page */ |
|
3978 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); |
|
3979 |
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); |
|
3980 |
|
|
3981 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base); |
|
3982 |
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); |
|
3983 |
|
|
3984 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); |
|
3985 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); |
|
3986 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); |
|
3987 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); |
|
3988 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]); |
|
3989 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); |
|
3990 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); |
|
3991 |
|
|
3992 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); |
|
3993 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags()); |
|
3994 |
|
|
3995 |
SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es); |
|
3996 |
SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs); |
|
3997 |
SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss); |
|
3998 |
SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds); |
|
3999 |
|
|
4000 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP); |
|
4001 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP); |
|
4002 |
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX); |
|
4003 |
|
|
4004 |
/* load the interception bitmaps so we do not need to access the |
|
4005 |
vmcb in svm mode */ |
|
4006 |
/* We shift all the intercept bits so we can OR them with the TB |
|
4007 |
flags later on */ |
|
4008 |
env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK; |
|
4009 |
env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read)); |
|
4010 |
env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write)); |
|
4011 |
env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read)); |
|
4012 |
env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write)); |
|
4013 |
env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions)); |
|
4014 |
|
|
4015 |
env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base)); |
|
4016 |
env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit)); |
|
4017 |
|
|
4018 |
env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base)); |
|
4019 |
env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit)); |
|
4020 |
|
|
4021 |
/* clear exit_info_2 so we behave like the real hardware */ |
|
4022 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); |
|
4023 |
|
|
4024 |
cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0))); |
|
4025 |
cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4))); |
|
4026 |
cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3))); |
|
4027 |
env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); |
|
4028 |
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); |
|
4029 |
if (int_ctl & V_INTR_MASKING_MASK) { |
|
4030 |
env->cr[8] = int_ctl & V_TPR_MASK; |
|
4031 |
if (env->eflags & IF_MASK) |
|
4032 |
env->hflags |= HF_HIF_MASK; |
|
4033 |
} |
|
4034 |
|
|
4035 |
#ifdef TARGET_X86_64 |
|
4036 |
env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)); |
|
4037 |
env->hflags &= ~HF_LMA_MASK; |
|
4038 |
if (env->efer & MSR_EFER_LMA) |
|
4039 |
env->hflags |= HF_LMA_MASK; |
|
4040 |
#endif |
|
4041 |
env->eflags = 0; |
|
4042 |
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)), |
|
4043 |
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
|
4044 |
CC_OP = CC_OP_EFLAGS; |
|
4045 |
CC_DST = 0xffffffff; |
|
4046 |
|
|
4047 |
SVM_LOAD_SEG(env->vm_vmcb, ES, es); |
|
4048 |
SVM_LOAD_SEG(env->vm_vmcb, CS, cs); |
|
4049 |
SVM_LOAD_SEG(env->vm_vmcb, SS, ss); |
|
4050 |
SVM_LOAD_SEG(env->vm_vmcb, DS, ds); |
|
4051 |
|
|
4052 |
EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip)); |
|
4053 |
env->eip = EIP; |
|
4054 |
ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp)); |
|
4055 |
EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax)); |
|
4056 |
env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7)); |
|
4057 |
env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6)); |
|
4058 |
cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl))); |
|
4059 |
|
|
4060 |
/* FIXME: guest state consistency checks */ |
|
4061 |
|
|
4062 |
switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { |
|
4063 |
case TLB_CONTROL_DO_NOTHING: |
|
4064 |
break; |
|
4065 |
case TLB_CONTROL_FLUSH_ALL_ASID: |
|
4066 |
/* FIXME: this is not 100% correct but should work for now */ |
|
4067 |
tlb_flush(env, 1); |
|
4068 |
break; |
|
4069 |
} |
|
4070 |
|
|
4071 |
helper_stgi(); |
|
4072 |
|
|
4073 |
regs_to_env(); |
|
4074 |
|
|
4075 |
/* maybe we need to inject an event */ |
|
4076 |
event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); |
|
4077 |
if (event_inj & SVM_EVTINJ_VALID) { |
|
4078 |
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; |
|
4079 |
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; |
|
4080 |
uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)); |
|
4081 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); |
|
4082 |
|
|
4083 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4084 |
fprintf(logfile, "Injecting(%#hx): ", valid_err); |
|
4085 |
/* FIXME: need to implement valid_err */ |
|
4086 |
switch (event_inj & SVM_EVTINJ_TYPE_MASK) { |
|
4087 |
case SVM_EVTINJ_TYPE_INTR: |
|
4088 |
env->exception_index = vector; |
|
4089 |
env->error_code = event_inj_err; |
|
4090 |
env->exception_is_int = 1; |
|
4091 |
env->exception_next_eip = -1; |
|
4092 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4093 |
fprintf(logfile, "INTR"); |
|
4094 |
break; |
|
4095 |
case SVM_EVTINJ_TYPE_NMI: |
|
4096 |
env->exception_index = vector; |
|
4097 |
env->error_code = event_inj_err; |
|
4098 |
env->exception_is_int = 1; |
|
4099 |
env->exception_next_eip = EIP; |
|
4100 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4101 |
fprintf(logfile, "NMI"); |
|
4102 |
break; |
|
4103 |
case SVM_EVTINJ_TYPE_EXEPT: |
|
4104 |
env->exception_index = vector; |
|
4105 |
env->error_code = event_inj_err; |
|
4106 |
env->exception_is_int = 0; |
|
4107 |
env->exception_next_eip = -1; |
|
4108 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4109 |
fprintf(logfile, "EXEPT"); |
|
4110 |
break; |
|
4111 |
case SVM_EVTINJ_TYPE_SOFT: |
|
4112 |
env->exception_index = vector; |
|
4113 |
env->error_code = event_inj_err; |
|
4114 |
env->exception_is_int = 1; |
|
4115 |
env->exception_next_eip = EIP; |
|
4116 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4117 |
fprintf(logfile, "SOFT"); |
|
4118 |
break; |
|
4119 |
} |
|
4120 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4121 |
fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code); |
|
4122 |
} |
|
4123 |
if (int_ctl & V_IRQ_MASK) |
|
4124 |
env->interrupt_request |= CPU_INTERRUPT_VIRQ; |
|
4125 |
|
|
4126 |
cpu_loop_exit(); |
|
4127 |
} |
|
4128 |
|
|
4129 |
void helper_vmmcall(void) |
|
4130 |
{ |
|
4131 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4132 |
fprintf(logfile,"vmmcall!\n"); |
|
4133 |
} |
|
4134 |
|
|
4135 |
void helper_vmload(target_ulong addr) |
|
4136 |
{ |
|
4137 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4138 |
fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", |
|
4139 |
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), |
|
4140 |
env->segs[R_FS].base); |
|
4141 |
|
|
4142 |
SVM_LOAD_SEG2(addr, segs[R_FS], fs); |
|
4143 |
SVM_LOAD_SEG2(addr, segs[R_GS], gs); |
|
4144 |
SVM_LOAD_SEG2(addr, tr, tr); |
|
4145 |
SVM_LOAD_SEG2(addr, ldt, ldtr); |
|
4146 |
|
|
4147 |
#ifdef TARGET_X86_64 |
|
4148 |
env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base)); |
|
4149 |
env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar)); |
|
4150 |
env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar)); |
|
4151 |
env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask)); |
|
4152 |
#endif |
|
4153 |
env->star = ldq_phys(addr + offsetof(struct vmcb, save.star)); |
|
4154 |
env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs)); |
|
4155 |
env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp)); |
|
4156 |
env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip)); |
|
4157 |
} |
|
4158 |
|
|
4159 |
void helper_vmsave(target_ulong addr) |
|
4160 |
{ |
|
4161 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4162 |
fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", |
|
4163 |
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), |
|
4164 |
env->segs[R_FS].base); |
|
4165 |
|
|
4166 |
SVM_SAVE_SEG(addr, segs[R_FS], fs); |
|
4167 |
SVM_SAVE_SEG(addr, segs[R_GS], gs); |
|
4168 |
SVM_SAVE_SEG(addr, tr, tr); |
|
4169 |
SVM_SAVE_SEG(addr, ldt, ldtr); |
|
4170 |
|
|
4171 |
#ifdef TARGET_X86_64 |
|
4172 |
stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase); |
|
4173 |
stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar); |
|
4174 |
stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar); |
|
4175 |
stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask); |
|
4176 |
#endif |
|
4177 |
stq_phys(addr + offsetof(struct vmcb, save.star), env->star); |
|
4178 |
stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); |
|
4179 |
stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp); |
|
4180 |
stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip); |
|
4181 |
} |
|
4182 |
|
|
4183 |
void helper_skinit(void) |
|
4184 |
{ |
|
4185 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4186 |
fprintf(logfile,"skinit!\n"); |
|
4187 |
} |
|
4188 |
|
|
4189 |
void helper_invlpga(void) |
|
4190 |
{ |
|
4191 |
tlb_flush(env, 0); |
|
4192 |
} |
|
4193 |
|
|
4194 |
int svm_check_intercept_param(uint32_t type, uint64_t param) |
|
4195 |
{ |
|
4196 |
switch(type) { |
|
4197 |
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: |
|
4198 |
if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) { |
|
4199 |
vmexit(type, param); |
|
4200 |
return 1; |
|
4201 |
} |
|
4202 |
break; |
|
4203 |
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8: |
|
4204 |
if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) { |
|
4205 |
vmexit(type, param); |
|
4206 |
return 1; |
|
4207 |
} |
|
4208 |
break; |
|
4209 |
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: |
|
4210 |
if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) { |
|
4211 |
vmexit(type, param); |
|
4212 |
return 1; |
|
4213 |
} |
|
4214 |
break; |
|
4215 |
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8: |
|
4216 |
if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) { |
|
4217 |
vmexit(type, param); |
|
4218 |
return 1; |
|
4219 |
} |
|
4220 |
break; |
|
4221 |
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16: |
|
4222 |
if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) { |
|
4223 |
vmexit(type, param); |
|
4224 |
return 1; |
|
4225 |
} |
|
4226 |
break; |
|
4227 |
case SVM_EXIT_IOIO: |
|
4228 |
if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) { |
|
4229 |
/* FIXME: this should be read in at vmrun (faster this way?) */ |
|
4230 |
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa)); |
|
4231 |
uint16_t port = (uint16_t) (param >> 16); |
|
4232 |
|
|
4233 |
if(ldub_phys(addr + port / 8) & (1 << (port % 8))) |
|
4234 |
vmexit(type, param); |
|
4235 |
} |
|
4236 |
break; |
|
4237 |
|
|
4238 |
case SVM_EXIT_MSR: |
|
4239 |
if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) { |
|
4240 |
/* FIXME: this should be read in at vmrun (faster this way?) */ |
|
4241 |
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa)); |
|
4242 |
switch((uint32_t)ECX) { |
|
4243 |
case 0 ... 0x1fff: |
|
4244 |
T0 = (ECX * 2) % 8; |
|
4245 |
T1 = ECX / 8; |
|
4246 |
break; |
|
4247 |
case 0xc0000000 ... 0xc0001fff: |
|
4248 |
T0 = (8192 + ECX - 0xc0000000) * 2; |
|
4249 |
T1 = (T0 / 8); |
|
4250 |
T0 %= 8; |
|
4251 |
break; |
|
4252 |
case 0xc0010000 ... 0xc0011fff: |
|
4253 |
T0 = (16384 + ECX - 0xc0010000) * 2; |
|
4254 |
T1 = (T0 / 8); |
|
4255 |
T0 %= 8; |
|
4256 |
break; |
|
4257 |
default: |
|
4258 |
vmexit(type, param); |
|
4259 |
return 1; |
|
4260 |
} |
|
4261 |
if (ldub_phys(addr + T1) & ((1 << param) << T0)) |
|
4262 |
vmexit(type, param); |
|
4263 |
return 1; |
|
4264 |
} |
|
4265 |
break; |
|
4266 |
default: |
|
4267 |
if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) { |
|
4268 |
vmexit(type, param); |
|
4269 |
return 1; |
|
4270 |
} |
|
4271 |
break; |
|
4272 |
} |
|
4273 |
return 0; |
|
4274 |
} |
|
4275 |
|
|
4276 |
void vmexit(uint64_t exit_code, uint64_t exit_info_1) |
|
4277 |
{ |
|
4278 |
uint32_t int_ctl; |
|
4279 |
|
|
4280 |
if (loglevel & CPU_LOG_TB_IN_ASM) |
|
4281 |
fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", |
|
4282 |
exit_code, exit_info_1, |
|
4283 |
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)), |
|
4284 |
EIP); |
|
4285 |
|
|
4286 |
/* Save the VM state in the vmcb */ |
|
4287 |
SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es); |
|
4288 |
SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs); |
|
4289 |
SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss); |
|
4290 |
SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds); |
|
4291 |
|
|
4292 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); |
|
4293 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); |
|
4294 |
|
|
4295 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base); |
|
4296 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); |
|
4297 |
|
|
4298 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); |
|
4299 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); |
|
4300 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); |
|
4301 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); |
|
4302 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); |
|
4303 |
|
|
4304 |
if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) { |
|
4305 |
int_ctl &= ~V_TPR_MASK; |
|
4306 |
int_ctl |= env->cr[8] & V_TPR_MASK; |
|
4307 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); |
|
4308 |
} |
|
4309 |
|
|
4310 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags()); |
|
4311 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); |
|
4312 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP); |
|
4313 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX); |
|
4314 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); |
|
4315 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); |
|
4316 |
stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); |
|
4317 |
|
|
4318 |
/* Reload the host state from vm_hsave */ |
|
4319 |
env->hflags &= ~HF_HIF_MASK; |
|
4320 |
env->intercept = 0; |
|
4321 |
env->intercept_exceptions = 0; |
|
4322 |
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
|
4323 |
|
|
4324 |
env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base)); |
|
4325 |
env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit)); |
|
4326 |
|
|
4327 |
env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base)); |
|
4328 |
env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit)); |
|
4329 |
|
|
4330 |
cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); |
|
4331 |
cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4))); |
|
4332 |
cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3))); |
|
4333 |
if (int_ctl & V_INTR_MASKING_MASK) |
|
4334 |
env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8)); |
|
4335 |
/* we need to set the efer after the crs so the hidden flags get set properly */ |
|
4336 |
#ifdef TARGET_X86_64 |
|
4337 |
env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)); |
|
4338 |
env->hflags &= ~HF_LMA_MASK; |
|
4339 |
if (env->efer & MSR_EFER_LMA) |
|
4340 |
env->hflags |= HF_LMA_MASK; |
|
4341 |
#endif |
|
4342 |
|
|
4343 |
env->eflags = 0; |
|
4344 |
load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)), |
|
4345 |
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
|
4346 |
CC_OP = CC_OP_EFLAGS; |
|
4347 |
|
|
4348 |
SVM_LOAD_SEG(env->vm_hsave, ES, es); |
|
4349 |
SVM_LOAD_SEG(env->vm_hsave, CS, cs); |
|
4350 |
SVM_LOAD_SEG(env->vm_hsave, SS, ss); |
|
4351 |
SVM_LOAD_SEG(env->vm_hsave, DS, ds); |
|
4352 |
|
|
4353 |
EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip)); |
|
4354 |
ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp)); |
|
4355 |
EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax)); |
|
4356 |
|
|
4357 |
env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6)); |
|
4358 |
env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7)); |
|
4359 |
|
|
4360 |
/* other setups */ |
|
4361 |
cpu_x86_set_cpl(env, 0); |
|
4362 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32)); |
|
4363 |
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code); |
|
4364 |
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1); |
|
4365 |
|
|
4366 |
helper_clgi(); |
|
4367 |
/* FIXME: Resets the current ASID register to zero (host ASID). */ |
|
4368 |
|
|
4369 |
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ |
|
4370 |
|
|
4371 |
/* Clears the TSC_OFFSET inside the processor. */ |
|
4372 |
|
|
4373 |
/* If the host is in PAE mode, the processor reloads the host's PDPEs |
|
4374 |
from the page table indicated the host's CR3. If the PDPEs contain |
|
4375 |
illegal state, the processor causes a shutdown. */ |
|
4376 |
|
|
4377 |
/* Forces CR0.PE = 1, RFLAGS.VM = 0. */ |
|
4378 |
env->cr[0] |= CR0_PE_MASK; |
|
4379 |
env->eflags &= ~VM_MASK; |
|
4380 |
|
|
4381 |
/* Disables all breakpoints in the host DR7 register. */ |
|
4382 |
|
|
4383 |
/* Checks the reloaded host state for consistency. */ |
|
4384 |
|
|
4385 |
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the |
|
4386 |
host's code segment or non-canonical (in the case of long mode), a |
|
4387 |
#GP fault is delivered inside the host.) */ |
|
4388 |
|
|
4389 |
/* remove any pending exception */ |
|
4390 |
env->exception_index = -1; |
|
4391 |
env->error_code = 0; |
|
4392 |
env->old_exception = -1; |
|
4393 |
|
|
4394 |
regs_to_env(); |
|
4395 |
cpu_loop_exit(); |
|
4396 |
} |
|
4397 |
|
|
4398 |
#endif |
Also available in: Unified diff