Revision 872929aa

b/cpu-exec.c
171 171
#if defined(TARGET_I386)
172 172
    flags = env->hflags;
173 173
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174
    flags |= env->intercept;
175 174
    cs_base = env->segs[R_CS].base;
176 175
    pc = cs_base + env->eip;
177 176
#elif defined(TARGET_ARM)
b/target-i386/TODO
1 1
Correctness issues:
2 2

  
3 3
- some eflags manipulation incorrectly reset the bit 0x2.
4
- SVM: rework the implementation: simplify code, move most intercept
5
  tests as dynamic, correct segment access, verify exception safety,
6
  cpu save/restore, SMM save/restore. 
4
- SVM: test, cpu save/restore, SMM save/restore. 
7 5
- x86_64: lcall/ljmp intel/amd differences ?
8 6
- better code fetch (different exception handling + CS.limit support)
9 7
- user/kernel PUSHL/POPL in helper.c
b/target-i386/cpu.h
149 149
#define HF_GIF_SHIFT        20 /* if set CPU takes interrupts */
150 150
#define HF_HIF_SHIFT        21 /* shadow copy of IF_MASK when in SVM */
151 151
#define HF_NMI_SHIFT        22 /* CPU serving NMI */
152
#define HF_SVME_SHIFT       23 /* SVME enabled (copy of EFER.SVME */
153
#define HF_SVMI_SHIFT       24 /* SVM intercepts are active */
152 154

  
153 155
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
154 156
#define HF_SOFTMMU_MASK      (1 << HF_SOFTMMU_SHIFT)
......
169 171
#define HF_GIF_MASK          (1 << HF_GIF_SHIFT)
170 172
#define HF_HIF_MASK          (1 << HF_HIF_SHIFT)
171 173
#define HF_NMI_MASK          (1 << HF_NMI_SHIFT)
174
#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
175
#define HF_SVMI_MASK         (1 << HF_SVMI_SHIFT)
172 176

  
173 177
#define CR0_PE_MASK  (1 << 0)
174 178
#define CR0_MP_MASK  (1 << 1)
......
242 246
#define MSR_EFER_LME   (1 << 8)
243 247
#define MSR_EFER_LMA   (1 << 10)
244 248
#define MSR_EFER_NXE   (1 << 11)
249
#define MSR_EFER_SVME  (1 << 12)
245 250
#define MSR_EFER_FFXSR (1 << 14)
246 251

  
247 252
#define MSR_STAR                        0xc0000081
......
322 327
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
323 328
#define CPUID_EXT3_OSVW    (1 << 9)
324 329
#define CPUID_EXT3_IBS     (1 << 10)
330
#define CPUID_EXT3_SKINIT  (1 << 12)
325 331

  
326 332
#define EXCP00_DIVZ	0
327 333
#define EXCP01_SSTP	1
b/target-i386/helper.c
1096 1096
        (env->efer & MSR_EFER_NXE) &&
1097 1097
        (env->cr[4] & CR4_PAE_MASK))
1098 1098
        error_code |= PG_ERROR_I_D_MASK;
1099
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1100
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1099
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1100
        /* cr2 is not modified in case of exceptions */
1101
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
1102
                 addr);
1101 1103
    } else {
1102 1104
        env->cr[2] = addr;
1103 1105
    }
1104 1106
    env->error_code = error_code;
1105 1107
    env->exception_index = EXCP0E_PAGE;
1106
    /* the VMM will handle this */
1107
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1108
        return 2;
1109 1108
    return 1;
1110 1109
}
1111 1110

  
b/target-i386/helper.h
43 43
DEF_HELPER(void, helper_iret_real, (int shift))
44 44
DEF_HELPER(void, helper_iret_protected, (int shift, int next_eip))
45 45
DEF_HELPER(void, helper_lret_protected, (int shift, int addend))
46
DEF_HELPER(void, helper_movl_crN_T0, (int reg, target_ulong t0))
46
DEF_HELPER(target_ulong, helper_read_crN, (int reg))
47
DEF_HELPER(void, helper_write_crN, (int reg, target_ulong t0))
47 48
DEF_HELPER(void, helper_lmsw, (target_ulong t0))
48 49
DEF_HELPER(void, helper_clts, (void))
49 50
#if !defined(CONFIG_USER_ONLY)
b/target-i386/op_helper.c
625 625
    int has_error_code, new_stack, shift;
626 626
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627 627
    uint32_t old_eip, sp_mask;
628
    int svm_should_check = 1;
629 628

  
630
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
631
        next_eip = EIP;
632
        svm_should_check = 0;
633
    }
634

  
635
    if (svm_should_check
636
        && (INTERCEPTEDl(_exceptions, 1 << intno)
637
        && !is_int)) {
638
        raise_interrupt(intno, is_int, error_code, 0);
639
    }
640 629
    has_error_code = 0;
641 630
    if (!is_int && !is_hw) {
642 631
        switch(intno) {
......
872 861
    int has_error_code, new_stack;
873 862
    uint32_t e1, e2, e3, ss;
874 863
    target_ulong old_eip, esp, offset;
875
    int svm_should_check = 1;
876 864

  
877
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
878
        next_eip = EIP;
879
        svm_should_check = 0;
880
    }
881
    if (svm_should_check
882
        && INTERCEPTEDl(_exceptions, 1 << intno)
883
        && !is_int) {
884
        raise_interrupt(intno, is_int, error_code, 0);
885
    }
886 865
    has_error_code = 0;
887 866
    if (!is_int && !is_hw) {
888 867
        switch(intno) {
......
1139 1118
    int selector;
1140 1119
    uint32_t offset, esp;
1141 1120
    uint32_t old_cs, old_eip;
1142
    int svm_should_check = 1;
1143 1121

  
1144
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1145
        next_eip = EIP;
1146
        svm_should_check = 0;
1147
    }
1148
    if (svm_should_check
1149
        && INTERCEPTEDl(_exceptions, 1 << intno)
1150
        && !is_int) {
1151
        raise_interrupt(intno, is_int, error_code, 0);
1152
    }
1153 1122
    /* real mode (simpler !) */
1154 1123
    dt = &env->idt;
1155 1124
    if (intno * 4 + 3 > dt->limit)
......
1307 1276
    if (!is_int) {
1308 1277
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1309 1278
        intno = check_exception(intno, &error_code);
1279
    } else {
1280
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1310 1281
    }
1311 1282

  
1312 1283
    env->exception_index = intno;
......
1316 1287
    cpu_loop_exit();
1317 1288
}
1318 1289

  
1319
/* same as raise_exception_err, but do not restore global registers */
1320
static void raise_exception_err_norestore(int exception_index, int error_code)
1321
{
1322
    exception_index = check_exception(exception_index, &error_code);
1323

  
1324
    env->exception_index = exception_index;
1325
    env->error_code = error_code;
1326
    env->exception_is_int = 0;
1327
    env->exception_next_eip = 0;
1328
    longjmp(env->jmp_env, 1);
1329
}
1330

  
1331 1290
/* shortcuts to generate exceptions */
1332 1291

  
1333 1292
void (raise_exception_err)(int exception_index, int error_code)
......
1921 1880
void helper_cpuid(void)
1922 1881
{
1923 1882
    uint32_t index;
1924
    index = (uint32_t)EAX;
1925 1883

  
1884
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1885
    
1886
    index = (uint32_t)EAX;
1926 1887
    /* test if maximum index reached */
1927 1888
    if (index & 0x80000000) {
1928 1889
        if (index > env->cpuid_xlevel)
......
2957 2918
#endif
2958 2919
}
2959 2920

  
2960
void helper_movl_crN_T0(int reg, target_ulong t0)
2921
#if defined(CONFIG_USER_ONLY)
2922
target_ulong helper_read_crN(int reg)
2961 2923
{
2962
#if !defined(CONFIG_USER_ONLY)
2924
    return 0;
2925
}
2926

  
2927
void helper_write_crN(int reg, target_ulong t0)
2928
{
2929
}
2930
#else
2931
target_ulong helper_read_crN(int reg)
2932
{
2933
    target_ulong val;
2934

  
2935
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2936
    switch(reg) {
2937
    default:
2938
        val = env->cr[reg];
2939
        break;
2940
    case 8:
2941
        val = cpu_get_apic_tpr(env);
2942
        break;
2943
    }
2944
    return val;
2945
}
2946

  
2947
void helper_write_crN(int reg, target_ulong t0)
2948
{
2949
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2963 2950
    switch(reg) {
2964 2951
    case 0:
2965 2952
        cpu_x86_update_cr0(env, t0);
......
2978 2965
        env->cr[reg] = t0;
2979 2966
        break;
2980 2967
    }
2981
#endif
2982 2968
}
2969
#endif
2983 2970

  
2984 2971
void helper_lmsw(target_ulong t0)
2985 2972
{
2986 2973
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2987 2974
       if already set to one. */
2988 2975
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2989
    helper_movl_crN_T0(0, t0);
2976
    helper_write_crN(0, t0);
2990 2977
}
2991 2978

  
2992 2979
void helper_clts(void)
......
3010 2997

  
3011 2998
void helper_invlpg(target_ulong addr)
3012 2999
{
3000
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3013 3001
    cpu_x86_flush_tlb(env, addr);
3014 3002
}
3015 3003

  
......
3020 3008
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3021 3009
        raise_exception(EXCP0D_GPF);
3022 3010
    }
3011
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3012

  
3023 3013
    val = cpu_get_tsc(env);
3024 3014
    EAX = (uint32_t)(val);
3025 3015
    EDX = (uint32_t)(val >> 32);
......
3030 3020
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031 3021
        raise_exception(EXCP0D_GPF);
3032 3022
    }
3033

  
3034 3023
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3035 3024
    
3036 3025
    /* currently unimplemented */
......
3050 3039
{
3051 3040
    uint64_t val;
3052 3041

  
3042
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3043

  
3053 3044
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3054 3045

  
3055 3046
    switch((uint32_t)ECX) {
......
3119 3110
void helper_rdmsr(void)
3120 3111
{
3121 3112
    uint64_t val;
3113

  
3114
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3115

  
3122 3116
    switch((uint32_t)ECX) {
3123 3117
    case MSR_IA32_SYSENTER_CS:
3124 3118
        val = env->sysenter_cs;
......
4549 4543

  
4550 4544
void helper_hlt(void)
4551 4545
{
4546
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4547
    
4552 4548
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4553 4549
    env->hflags |= HF_HALTED_MASK;
4554 4550
    env->exception_index = EXCP_HLT;
......
4560 4556
    if ((uint32_t)ECX != 0)
4561 4557
        raise_exception(EXCP0D_GPF);
4562 4558
    /* XXX: store address ? */
4559
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4563 4560
}
4564 4561

  
4565 4562
void helper_mwait(void)
4566 4563
{
4567 4564
    if ((uint32_t)ECX != 0)
4568 4565
        raise_exception(EXCP0D_GPF);
4566
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4569 4567
    /* XXX: not complete but not completely erroneous */
4570 4568
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4571 4569
        /* more than one CPU: do not sleep because another CPU may
......
4706 4704
                cpu_restore_state(tb, env, pc, NULL);
4707 4705
            }
4708 4706
        }
4709
        if (retaddr)
4710
            raise_exception_err(env->exception_index, env->error_code);
4711
        else
4712
            raise_exception_err_norestore(env->exception_index, env->error_code);
4707
        raise_exception_err(env->exception_index, env->error_code);
4713 4708
    }
4714 4709
    env = saved_env;
4715 4710
}
......
4717 4712

  
4718 4713
/* Secure Virtual Machine helpers */
4719 4714

  
4720
void helper_stgi(void)
4721
{
4722
    env->hflags |= HF_GIF_MASK;
4723
}
4724

  
4725
void helper_clgi(void)
4726
{
4727
    env->hflags &= ~HF_GIF_MASK;
4728
}
4729

  
4730 4715
#if defined(CONFIG_USER_ONLY)
4731 4716

  
4732 4717
void helper_vmrun(void) 
......
4741 4726
void helper_vmsave(void) 
4742 4727
{ 
4743 4728
}
4729
void helper_stgi(void)
4730
{
4731
}
4732
void helper_clgi(void)
4733
{
4734
}
4744 4735
void helper_skinit(void) 
4745 4736
{ 
4746 4737
}
......
4760 4751
}
4761 4752
#else
4762 4753

  
4763
static inline uint32_t
4764
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4754
static inline void svm_save_seg(target_phys_addr_t addr,
4755
                                const SegmentCache *sc)
4765 4756
{
4766
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4767
	    | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4768
	    | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4769
	    | (vmcb_base & 0xff000000)               /* Base 31-24 */
4770
	    | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4757
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4758
             sc->selector);
4759
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4760
             sc->base);
4761
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4762
             sc->limit);
4763
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4764
             (sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4765
}
4766
                                
4767
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4768
{
4769
    unsigned int flags;
4770

  
4771
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4772
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4773
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4774
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4775
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4771 4776
}
4772 4777

  
4773
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4778
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4779
                                      CPUState *env, int seg_reg)
4774 4780
{
4775
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4776
	    | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4781
    SegmentCache sc1, *sc = &sc1;
4782
    svm_load_seg(addr, sc);
4783
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4784
                           sc->base, sc->limit, sc->flags);
4777 4785
}
4778 4786

  
4779 4787
void helper_vmrun(void)
......
4782 4790
    uint32_t event_inj;
4783 4791
    uint32_t int_ctl;
4784 4792

  
4793
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4794

  
4785 4795
    addr = EAX;
4786 4796
    if (loglevel & CPU_LOG_TB_IN_ASM)
4787 4797
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
......
4806 4816
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4807 4817
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4808 4818

  
4809
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4810
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4811
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4812
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4819
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4820
                  &env->segs[R_ES]);
4821
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4822
                 &env->segs[R_CS]);
4823
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4824
                 &env->segs[R_SS]);
4825
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4826
                 &env->segs[R_DS]);
4813 4827

  
4814 4828
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4815 4829
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
......
4817 4831

  
4818 4832
    /* load the interception bitmaps so we do not need to access the
4819 4833
       vmcb in svm mode */
4820
    /* We shift all the intercept bits so we can OR them with the TB
4821
       flags later on */
4822
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4834
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4823 4835
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4824 4836
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4825 4837
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4826 4838
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4827 4839
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4828 4840

  
4841
    /* enable intercepts */
4842
    env->hflags |= HF_SVMI_MASK;
4843

  
4829 4844
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4830 4845
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4831 4846

  
......
4857 4872
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4858 4873
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4859 4874
    CC_OP = CC_OP_EFLAGS;
4860
    CC_DST = 0xffffffff;
4861 4875

  
4862
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4863
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4864
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4865
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4876
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4877
                       env, R_ES);
4878
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4879
                       env, R_CS);
4880
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4881
                       env, R_SS);
4882
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4883
                       env, R_DS);
4866 4884

  
4867 4885
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4868 4886
    env->eip = EIP;
......
4933 4951
        if (loglevel & CPU_LOG_TB_IN_ASM)
4934 4952
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4935 4953
    }
4936
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4954
    if ((int_ctl & V_IRQ_MASK) || 
4955
        (env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) {
4937 4956
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4938 4957
    }
4939 4958

  
......
4942 4961

  
4943 4962
void helper_vmmcall(void)
4944 4963
{
4945
    if (loglevel & CPU_LOG_TB_IN_ASM)
4946
        fprintf(logfile,"vmmcall!\n");
4964
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4965
    raise_exception(EXCP06_ILLOP);
4947 4966
}
4948 4967

  
4949 4968
void helper_vmload(void)
4950 4969
{
4951 4970
    target_ulong addr;
4971
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4972

  
4973
    /* XXX: invalid in 32 bit */
4952 4974
    addr = EAX;
4953 4975
    if (loglevel & CPU_LOG_TB_IN_ASM)
4954 4976
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4955 4977
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4956 4978
                env->segs[R_FS].base);
4957 4979

  
4958
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4959
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4960
    SVM_LOAD_SEG2(addr, tr, tr);
4961
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4980
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4981
                       env, R_FS);
4982
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4983
                       env, R_GS);
4984
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4985
                 &env->tr);
4986
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4987
                 &env->ldt);
4962 4988

  
4963 4989
#ifdef TARGET_X86_64
4964 4990
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
......
4975 5001
void helper_vmsave(void)
4976 5002
{
4977 5003
    target_ulong addr;
5004
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4978 5005
    addr = EAX;
4979 5006
    if (loglevel & CPU_LOG_TB_IN_ASM)
4980 5007
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4981 5008
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4982 5009
                env->segs[R_FS].base);
4983 5010

  
4984
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4985
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4986
    SVM_SAVE_SEG(addr, tr, tr);
4987
    SVM_SAVE_SEG(addr, ldt, ldtr);
5011
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5012
                 &env->segs[R_FS]);
5013
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5014
                 &env->segs[R_GS]);
5015
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5016
                 &env->tr);
5017
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5018
                 &env->ldt);
4988 5019

  
4989 5020
#ifdef TARGET_X86_64
4990 5021
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
......
4998 5029
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4999 5030
}
5000 5031

  
5032
void helper_stgi(void)
5033
{
5034
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5035
    env->hflags |= HF_GIF_MASK;
5036
}
5037

  
5038
void helper_clgi(void)
5039
{
5040
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5041
    env->hflags &= ~HF_GIF_MASK;
5042
}
5043

  
5001 5044
void helper_skinit(void)
5002 5045
{
5046
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5047
    /* XXX: not implemented */
5003 5048
    if (loglevel & CPU_LOG_TB_IN_ASM)
5004 5049
        fprintf(logfile,"skinit!\n");
5050
    raise_exception(EXCP06_ILLOP);
5005 5051
}
5006 5052

  
5007 5053
void helper_invlpga(void)
5008 5054
{
5055
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5009 5056
    tlb_flush(env, 0);
5010 5057
}
5011 5058

  
5012 5059
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5013 5060
{
5061
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5062
        return;
5014 5063
    switch(type) {
5015 5064
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5016
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
5065
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5017 5066
            helper_vmexit(type, param);
5018 5067
        }
5019 5068
        break;
5020
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
5021
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
5069
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5070
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5022 5071
            helper_vmexit(type, param);
5023 5072
        }
5024 5073
        break;
5025
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5026
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
5074
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5075
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5027 5076
            helper_vmexit(type, param);
5028 5077
        }
5029 5078
        break;
5030
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
5031
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
5079
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5080
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5032 5081
            helper_vmexit(type, param);
5033 5082
        }
5034 5083
        break;
5035
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5036
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
5084
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5085
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5037 5086
            helper_vmexit(type, param);
5038 5087
        }
5039 5088
        break;
5040
    case SVM_EXIT_IOIO:
5041
        break;
5042

  
5043 5089
    case SVM_EXIT_MSR:
5044
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5090
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5045 5091
            /* FIXME: this should be read in at vmrun (faster this way?) */
5046 5092
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5047 5093
            uint32_t t0, t1;
......
5071 5117
        }
5072 5118
        break;
5073 5119
    default:
5074
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5120
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5075 5121
            helper_vmexit(type, param);
5076 5122
        }
5077 5123
        break;
......
5081 5127
void helper_svm_check_io(uint32_t port, uint32_t param, 
5082 5128
                         uint32_t next_eip_addend)
5083 5129
{
5084
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5130
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5085 5131
        /* FIXME: this should be read in at vmrun (faster this way?) */
5086 5132
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5087 5133
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
......
5113 5159
    }
5114 5160

  
5115 5161
    /* Save the VM state in the vmcb */
5116
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5117
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5118
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5119
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5162
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5163
                 &env->segs[R_ES]);
5164
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5165
                 &env->segs[R_CS]);
5166
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5167
                 &env->segs[R_SS]);
5168
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5169
                 &env->segs[R_DS]);
5120 5170

  
5121 5171
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5122 5172
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
......
5146 5196

  
5147 5197
    /* Reload the host state from vm_hsave */
5148 5198
    env->hflags &= ~HF_HIF_MASK;
5199
    env->hflags &= ~HF_SVMI_MASK;
5149 5200
    env->intercept = 0;
5150 5201
    env->intercept_exceptions = 0;
5151 5202
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
......
5169 5220
    env->hflags &= ~HF_LMA_MASK;
5170 5221
    if (env->efer & MSR_EFER_LMA)
5171 5222
       env->hflags |= HF_LMA_MASK;
5223
    /* XXX: should also emulate the VM_CR MSR */
5224
    env->hflags &= ~HF_SVME_MASK;
5225
    if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
5226
        if (env->efer & MSR_EFER_SVME)
5227
            env->hflags |= HF_SVME_MASK;
5228
    } else {
5229
        env->efer &= ~MSR_EFER_SVME;
5230
    }
5172 5231
#endif
5173 5232

  
5174 5233
    env->eflags = 0;
......
5176 5235
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5177 5236
    CC_OP = CC_OP_EFLAGS;
5178 5237

  
5179
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
5180
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5181
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5182
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5238
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5239
                       env, R_ES);
5240
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5241
                       env, R_CS);
5242
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5243
                       env, R_SS);
5244
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5245
                       env, R_DS);
5183 5246

  
5184 5247
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5185 5248
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
b/target-i386/svm.h
1 1
#ifndef __SVM_H
2 2
#define __SVM_H
3 3

  
4
enum {
5
        /* We shift all the intercept bits so we can OR them with the
6
           TB flags later on */
7
	INTERCEPT_INTR = HF_HIF_SHIFT,
8
	INTERCEPT_NMI,
9
	INTERCEPT_SMI,
10
	INTERCEPT_INIT,
11
	INTERCEPT_VINTR,
12
	INTERCEPT_SELECTIVE_CR0,
13
	INTERCEPT_STORE_IDTR,
14
	INTERCEPT_STORE_GDTR,
15
	INTERCEPT_STORE_LDTR,
16
	INTERCEPT_STORE_TR,
17
	INTERCEPT_LOAD_IDTR,
18
	INTERCEPT_LOAD_GDTR,
19
	INTERCEPT_LOAD_LDTR,
20
	INTERCEPT_LOAD_TR,
21
	INTERCEPT_RDTSC,
22
	INTERCEPT_RDPMC,
23
	INTERCEPT_PUSHF,
24
	INTERCEPT_POPF,
25
	INTERCEPT_CPUID,
26
	INTERCEPT_RSM,
27
	INTERCEPT_IRET,
28
	INTERCEPT_INTn,
29
	INTERCEPT_INVD,
30
	INTERCEPT_PAUSE,
31
	INTERCEPT_HLT,
32
	INTERCEPT_INVLPG,
33
	INTERCEPT_INVLPGA,
34
	INTERCEPT_IOIO_PROT,
35
	INTERCEPT_MSR_PROT,
36
	INTERCEPT_TASK_SWITCH,
37
	INTERCEPT_FERR_FREEZE,
38
	INTERCEPT_SHUTDOWN,
39
	INTERCEPT_VMRUN,
40
	INTERCEPT_VMMCALL,
41
	INTERCEPT_VMLOAD,
42
	INTERCEPT_VMSAVE,
43
	INTERCEPT_STGI,
44
	INTERCEPT_CLGI,
45
	INTERCEPT_SKINIT,
46
	INTERCEPT_RDTSCP,
47
	INTERCEPT_ICEBP,
48
	INTERCEPT_WBINVD,
49
};
50
/* This is not really an intercept but rather a placeholder to
51
   show that we are in an SVM (just like a hidden flag, but keeps the
52
   TBs clean) */
53
#define INTERCEPT_SVM 63
54
#define INTERCEPT_SVM_MASK (1ULL << INTERCEPT_SVM)
55

  
56
struct __attribute__ ((__packed__)) vmcb_control_area {
57
	uint16_t intercept_cr_read;
58
	uint16_t intercept_cr_write;
59
	uint16_t intercept_dr_read;
60
	uint16_t intercept_dr_write;
61
	uint32_t intercept_exceptions;
62
	uint64_t intercept;
63
	uint8_t reserved_1[44];
64
	uint64_t iopm_base_pa;
65
	uint64_t msrpm_base_pa;
66
	uint64_t tsc_offset;
67
	uint32_t asid;
68
	uint8_t tlb_ctl;
69
	uint8_t reserved_2[3];
70
	uint32_t int_ctl;
71
	uint32_t int_vector;
72
	uint32_t int_state;
73
	uint8_t reserved_3[4];
74
	uint64_t exit_code;
75
	uint64_t exit_info_1;
76
	uint64_t exit_info_2;
77
	uint32_t exit_int_info;
78
	uint32_t exit_int_info_err;
79
	uint64_t nested_ctl;
80
	uint8_t reserved_4[16];
81
	uint32_t event_inj;
82
	uint32_t event_inj_err;
83
	uint64_t nested_cr3;
84
	uint64_t lbr_ctl;
85
	uint8_t reserved_5[832];
86
};
87

  
88

  
89 4
#define TLB_CONTROL_DO_NOTHING 0
90 5
#define TLB_CONTROL_FLUSH_ALL_ASID 1
91 6

  
......
116 31
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
117 32
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
118 33

  
119
struct __attribute__ ((__packed__)) vmcb_seg {
120
	uint16_t selector;
121
	uint16_t attrib;
122
	uint32_t limit;
123
	uint64_t base;
124
};
125

  
126
struct __attribute__ ((__packed__)) vmcb_save_area {
127
	struct vmcb_seg es;
128
	struct vmcb_seg cs;
129
	struct vmcb_seg ss;
130
	struct vmcb_seg ds;
131
	struct vmcb_seg fs;
132
	struct vmcb_seg gs;
133
	struct vmcb_seg gdtr;
134
	struct vmcb_seg ldtr;
135
	struct vmcb_seg idtr;
136
	struct vmcb_seg tr;
137
	uint8_t reserved_1[43];
138
	uint8_t cpl;
139
	uint8_t reserved_2[4];
140
	uint64_t efer;
141
	uint8_t reserved_3[112];
142
	uint64_t cr4;
143
	uint64_t cr3;
144
	uint64_t cr0;
145
	uint64_t dr7;
146
	uint64_t dr6;
147
	uint64_t rflags;
148
	uint64_t rip;
149
	uint8_t reserved_4[88];
150
	uint64_t rsp;
151
	uint8_t reserved_5[24];
152
	uint64_t rax;
153
	uint64_t star;
154
	uint64_t lstar;
155
	uint64_t cstar;
156
	uint64_t sfmask;
157
	uint64_t kernel_gs_base;
158
	uint64_t sysenter_cs;
159
	uint64_t sysenter_esp;
160
	uint64_t sysenter_eip;
161
	uint64_t cr2;
162
	/* qemu: cr8 added to reuse this as hsave */
163
	uint64_t cr8;
164
	uint8_t reserved_6[32 - 8]; /* originally 32 */
165
	uint64_t g_pat;
166
	uint64_t dbgctl;
167
	uint64_t br_from;
168
	uint64_t br_to;
169
	uint64_t last_excp_from;
170
	uint64_t last_excp_to;
171
};
172

  
173
struct __attribute__ ((__packed__)) vmcb {
174
	struct vmcb_control_area control;
175
	struct vmcb_save_area save;
176
};
177

  
178
#define SVM_CPUID_FEATURE_SHIFT 2
179
#define SVM_CPUID_FUNC 0x8000000a
180

  
181
#define MSR_EFER_SVME_MASK (1ULL << 12)
182

  
183
#define SVM_SELECTOR_S_SHIFT 4
184
#define SVM_SELECTOR_DPL_SHIFT 5
185
#define SVM_SELECTOR_P_SHIFT 7
186
#define SVM_SELECTOR_AVL_SHIFT 8
187
#define SVM_SELECTOR_L_SHIFT 9
188
#define SVM_SELECTOR_DB_SHIFT 10
189
#define SVM_SELECTOR_G_SHIFT 11
190

  
191
#define SVM_SELECTOR_TYPE_MASK (0xf)
192
#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
193
#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
194
#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
195
#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
196
#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
197
#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
198
#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
199

  
200
#define SVM_SELECTOR_WRITE_MASK (1 << 1)
201
#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
202
#define SVM_SELECTOR_CODE_MASK (1 << 3)
203

  
204
#define INTERCEPT_CR0_MASK 1
205
#define INTERCEPT_CR3_MASK (1 << 3)
206
#define INTERCEPT_CR4_MASK (1 << 4)
207

  
208
#define INTERCEPT_DR0_MASK 1
209
#define INTERCEPT_DR1_MASK (1 << 1)
210
#define INTERCEPT_DR2_MASK (1 << 2)
211
#define INTERCEPT_DR3_MASK (1 << 3)
212
#define INTERCEPT_DR4_MASK (1 << 4)
213
#define INTERCEPT_DR5_MASK (1 << 5)
214
#define INTERCEPT_DR6_MASK (1 << 6)
215
#define INTERCEPT_DR7_MASK (1 << 7)
216

  
217 34
#define SVM_EVTINJ_VEC_MASK 0xff
218 35

  
219 36
#define SVM_EVTINJ_TYPE_SHIFT 8
......
313 130

  
314 131
#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
315 132

  
316
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
317
#define SVM_VMRUN  ".byte 0x0f, 0x01, 0xd8"
318
#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
319
#define SVM_CLGI   ".byte 0x0f, 0x01, 0xdd"
320
#define SVM_STGI   ".byte 0x0f, 0x01, 0xdc"
321
#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
322

  
323
/* function references */
324

  
325
#define INTERCEPTED(mask) (env->intercept & mask)
326
#define INTERCEPTEDw(var, mask) (env->intercept ## var & mask)
327
#define INTERCEPTEDl(var, mask) (env->intercept ## var & mask)
133
struct __attribute__ ((__packed__)) vmcb_control_area {
134
	uint16_t intercept_cr_read;
135
	uint16_t intercept_cr_write;
136
	uint16_t intercept_dr_read;
137
	uint16_t intercept_dr_write;
138
	uint32_t intercept_exceptions;
139
	uint64_t intercept;
140
	uint8_t reserved_1[44];
141
	uint64_t iopm_base_pa;
142
	uint64_t msrpm_base_pa;
143
	uint64_t tsc_offset;
144
	uint32_t asid;
145
	uint8_t tlb_ctl;
146
	uint8_t reserved_2[3];
147
	uint32_t int_ctl;
148
	uint32_t int_vector;
149
	uint32_t int_state;
150
	uint8_t reserved_3[4];
151
	uint64_t exit_code;
152
	uint64_t exit_info_1;
153
	uint64_t exit_info_2;
154
	uint32_t exit_int_info;
155
	uint32_t exit_int_info_err;
156
	uint64_t nested_ctl;
157
	uint8_t reserved_4[16];
158
	uint32_t event_inj;
159
	uint32_t event_inj_err;
160
	uint64_t nested_cr3;
161
	uint64_t lbr_ctl;
162
	uint8_t reserved_5[832];
163
};
328 164

  
329
#define SVM_LOAD_SEG(addr, seg_index, seg) \
330
    cpu_x86_load_seg_cache(env, \
331
                    R_##seg_index, \
332
                    lduw_phys(addr + offsetof(struct vmcb, save.seg.selector)),\
333
                    ldq_phys(addr + offsetof(struct vmcb, save.seg.base)),\
334
                    ldl_phys(addr + offsetof(struct vmcb, save.seg.limit)),\
335
                    vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg.attrib)), ldq_phys(addr + offsetof(struct vmcb, save.seg.base)), ldl_phys(addr + offsetof(struct vmcb, save.seg.limit))))
165
struct __attribute__ ((__packed__)) vmcb_seg {
166
	uint16_t selector;
167
	uint16_t attrib;
168
	uint32_t limit;
169
	uint64_t base;
170
};
336 171

  
337
#define SVM_LOAD_SEG2(addr, seg_qemu, seg_vmcb) \
338
    env->seg_qemu.selector  = lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector)); \
339
    env->seg_qemu.base      = ldq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base)); \
340
    env->seg_qemu.limit     = ldl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit)); \
341
    env->seg_qemu.flags     = vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib)), env->seg_qemu.base, env->seg_qemu.limit)
172
struct __attribute__ ((__packed__)) vmcb_save_area {
173
	struct vmcb_seg es;
174
	struct vmcb_seg cs;
175
	struct vmcb_seg ss;
176
	struct vmcb_seg ds;
177
	struct vmcb_seg fs;
178
	struct vmcb_seg gs;
179
	struct vmcb_seg gdtr;
180
	struct vmcb_seg ldtr;
181
	struct vmcb_seg idtr;
182
	struct vmcb_seg tr;
183
	uint8_t reserved_1[43];
184
	uint8_t cpl;
185
	uint8_t reserved_2[4];
186
	uint64_t efer;
187
	uint8_t reserved_3[112];
188
	uint64_t cr4;
189
	uint64_t cr3;
190
	uint64_t cr0;
191
	uint64_t dr7;
192
	uint64_t dr6;
193
	uint64_t rflags;
194
	uint64_t rip;
195
	uint8_t reserved_4[88];
196
	uint64_t rsp;
197
	uint8_t reserved_5[24];
198
	uint64_t rax;
199
	uint64_t star;
200
	uint64_t lstar;
201
	uint64_t cstar;
202
	uint64_t sfmask;
203
	uint64_t kernel_gs_base;
204
	uint64_t sysenter_cs;
205
	uint64_t sysenter_esp;
206
	uint64_t sysenter_eip;
207
	uint64_t cr2;
208
	/* qemu: cr8 added to reuse this as hsave */
209
	uint64_t cr8;
210
	uint8_t reserved_6[32 - 8]; /* originally 32 */
211
	uint64_t g_pat;
212
	uint64_t dbgctl;
213
	uint64_t br_from;
214
	uint64_t br_to;
215
	uint64_t last_excp_from;
216
	uint64_t last_excp_to;
217
};
342 218

  
343
#define SVM_SAVE_SEG(addr, seg_qemu, seg_vmcb) \
344
    stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector), env->seg_qemu.selector); \
345
    stq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base), env->seg_qemu.base); \
346
    stl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit), env->seg_qemu.limit); \
347
    stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib), cpu2vmcb_attrib(env->seg_qemu.flags))
219
struct __attribute__ ((__packed__)) vmcb {
220
	struct vmcb_control_area control;
221
	struct vmcb_save_area save;
222
};
348 223

  
349 224
#endif
b/target-i386/translate.c
733 733
        tcg_gen_helper_0_1(gen_check_io_func[ot],
734 734
                           cpu_tmp2_i32);
735 735
    }
736
    if(s->flags & (1ULL << INTERCEPT_IOIO_PROT)) {
736
    if(s->flags & HF_SVMI_MASK) {
737 737
        if (!state_saved) {
738 738
            if (s->cc_op != CC_OP_DYNAMIC)
739 739
                gen_op_set_cc_op(s->cc_op);
......
2322 2322
    return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2323 2323
}
2324 2324

  
2325
static inline int
2325
static inline void
2326 2326
gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2327 2327
                              uint32_t type, uint64_t param)
2328 2328
{
2329
    if(!(s->flags & (INTERCEPT_SVM_MASK)))
2330
	/* no SVM activated */
2331
        return 0;
2332
    switch(type) {
2333
        /* CRx and DRx reads/writes */
2334
        case SVM_EXIT_READ_CR0 ... SVM_EXIT_EXCP_BASE - 1:
2335
            if (s->cc_op != CC_OP_DYNAMIC) {
2336
                gen_op_set_cc_op(s->cc_op);
2337
            }
2338
            gen_jmp_im(pc_start - s->cs_base);
2339
            tcg_gen_helper_0_2(helper_svm_check_intercept_param, 
2340
                               tcg_const_i32(type), tcg_const_i64(param));
2341
            /* this is a special case as we do not know if the interception occurs
2342
               so we assume there was none */
2343
            return 0;
2344
        case SVM_EXIT_MSR:
2345
            if(s->flags & (1ULL << INTERCEPT_MSR_PROT)) {
2346
                if (s->cc_op != CC_OP_DYNAMIC) {
2347
                    gen_op_set_cc_op(s->cc_op);
2348
                }
2349
                gen_jmp_im(pc_start - s->cs_base);
2350
                tcg_gen_helper_0_2(helper_svm_check_intercept_param,
2351
                                   tcg_const_i32(type), tcg_const_i64(param));
2352
                /* this is a special case as we do not know if the interception occurs
2353
                   so we assume there was none */
2354
                return 0;
2355
            }
2356
            break;
2357
        default:
2358
            if(s->flags & (1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR))) {
2359
                if (s->cc_op != CC_OP_DYNAMIC) {
2360
                    gen_op_set_cc_op(s->cc_op);
2361
                }
2362
                gen_jmp_im(pc_start - s->cs_base);
2363
                tcg_gen_helper_0_2(helper_vmexit,
2364
                                   tcg_const_i32(type), tcg_const_i64(param));
2365
                /* we can optimize this one so TBs don't get longer
2366
                   than up to vmexit */
2367
                gen_eob(s);
2368
                return 1;
2369
            }
2370
    }
2371
    return 0;
2329
    /* no SVM activated; fast case */
2330
    if (likely(!(s->flags & HF_SVMI_MASK)))
2331
        return;
2332
    if (s->cc_op != CC_OP_DYNAMIC)
2333
        gen_op_set_cc_op(s->cc_op);
2334
    gen_jmp_im(pc_start - s->cs_base);
2335
    tcg_gen_helper_0_2(helper_svm_check_intercept_param, 
2336
                       tcg_const_i32(type), tcg_const_i64(param));
2372 2337
}
2373 2338

  
2374
static inline int
2339
static inline void
2375 2340
gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2376 2341
{
2377
    return gen_svm_check_intercept_param(s, pc_start, type, 0);
2342
    gen_svm_check_intercept_param(s, pc_start, type, 0);
2378 2343
}
2379 2344

  
2380 2345
static inline void gen_stack_update(DisasContext *s, int addend)
......
5743 5708
        val = 0;
5744 5709
        goto do_lret;
5745 5710
    case 0xcf: /* iret */
5746
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET))
5747
            break;
5711
        gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
5748 5712
        if (!s->pe) {
5749 5713
            /* real mode */
5750 5714
            tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag));
......
5890 5854
        /************************/
5891 5855
        /* flags */
5892 5856
    case 0x9c: /* pushf */
5893
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF))
5894
            break;
5857
        gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
5895 5858
        if (s->vm86 && s->iopl != 3) {
5896 5859
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5897 5860
        } else {
......
5902 5865
        }
5903 5866
        break;
5904 5867
    case 0x9d: /* popf */
5905
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF))
5906
            break;
5868
        gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
5907 5869
        if (s->vm86 && s->iopl != 3) {
5908 5870
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5909 5871
        } else {
......
6187 6149
        }
6188 6150
        break;
6189 6151
    case 0xcc: /* int3 */
6190
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6191
            break;
6192 6152
        gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6193 6153
        break;
6194 6154
    case 0xcd: /* int N */
6195 6155
        val = ldub_code(s->pc++);
6196
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6197
            break;
6198 6156
        if (s->vm86 && s->iopl != 3) {
6199 6157
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6200 6158
        } else {
......
6204 6162
    case 0xce: /* into */
6205 6163
        if (CODE64(s))
6206 6164
            goto illegal_op;
6207
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
6208
            break;
6209 6165
        if (s->cc_op != CC_OP_DYNAMIC)
6210 6166
            gen_op_set_cc_op(s->cc_op);
6211 6167
        gen_jmp_im(pc_start - s->cs_base);
6212 6168
        tcg_gen_helper_0_1(helper_into, tcg_const_i32(s->pc - pc_start));
6213 6169
        break;
6214 6170
    case 0xf1: /* icebp (undocumented, exits to external debugger) */
6215
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP))
6216
            break;
6171
        gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6217 6172
#if 1
6218 6173
        gen_debug(s, pc_start - s->cs_base);
6219 6174
#else
......
6371 6326
        if (s->cpl != 0) {
6372 6327
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6373 6328
        } else {
6374
            int retval = 0;
6329
            if (s->cc_op != CC_OP_DYNAMIC)
6330
                gen_op_set_cc_op(s->cc_op);
6331
            gen_jmp_im(pc_start - s->cs_base);
6375 6332
            if (b & 2) {
6376
                retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 0);
6377 6333
                tcg_gen_helper_0_0(helper_rdmsr);
6378 6334
            } else {
6379
                retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 1);
6380 6335
                tcg_gen_helper_0_0(helper_wrmsr);
6381 6336
            }
6382
            if(retval)
6383
                gen_eob(s);
6384 6337
        }
6385 6338
        break;
6386 6339
    case 0x131: /* rdtsc */
6387
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RDTSC))
6388
            break;
6340
        if (s->cc_op != CC_OP_DYNAMIC)
6341
            gen_op_set_cc_op(s->cc_op);
6389 6342
        gen_jmp_im(pc_start - s->cs_base);
6390 6343
        tcg_gen_helper_0_0(helper_rdtsc);
6391 6344
        break;
6392 6345
    case 0x133: /* rdpmc */
6346
        if (s->cc_op != CC_OP_DYNAMIC)
6347
            gen_op_set_cc_op(s->cc_op);
6393 6348
        gen_jmp_im(pc_start - s->cs_base);
6394 6349
        tcg_gen_helper_0_0(helper_rdpmc);
6395 6350
        break;
......
6452 6407
        break;
6453 6408
#endif
6454 6409
    case 0x1a2: /* cpuid */
6455
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CPUID))
6456
            break;
6457 6410
        tcg_gen_helper_0_0(helper_cpuid);
6458 6411
        break;
6459 6412
    case 0xf4: /* hlt */
6460 6413
        if (s->cpl != 0) {
6461 6414
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6462 6415
        } else {
6463
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_HLT))
6464
                break;
6465 6416
            if (s->cc_op != CC_OP_DYNAMIC)
6466 6417
                gen_op_set_cc_op(s->cc_op);
6467 6418
            gen_jmp_im(s->pc - s->cs_base);
......
6477 6428
        case 0: /* sldt */
6478 6429
            if (!s->pe || s->vm86)
6479 6430
                goto illegal_op;
6480
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ))
6481
                break;
6431
            gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6482 6432
            tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6483 6433
            ot = OT_WORD;
6484 6434
            if (mod == 3)
......
6491 6441
            if (s->cpl != 0) {
6492 6442
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6493 6443
            } else {
6494
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE))
6495
                    break;
6444
                gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6496 6445
                gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6497 6446
                gen_jmp_im(pc_start - s->cs_base);
6498 6447
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
......
6502 6451
        case 1: /* str */
6503 6452
            if (!s->pe || s->vm86)
6504 6453
                goto illegal_op;
6505
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ))
6506
                break;
6454
            gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
6507 6455
            tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6508 6456
            ot = OT_WORD;
6509 6457
            if (mod == 3)
......
6516 6464
            if (s->cpl != 0) {
6517 6465
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6518 6466
            } else {
6519
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE))
6520
                    break;
6467
                gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
6521 6468
                gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6522 6469
                gen_jmp_im(pc_start - s->cs_base);
6523 6470
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
......
6550 6497
        case 0: /* sgdt */
6551 6498
            if (mod == 3)
6552 6499
                goto illegal_op;
6553
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ))
6554
                break;
6500
            gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
6555 6501
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6556 6502
            tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
6557 6503
            gen_op_st_T0_A0(OT_WORD + s->mem_index);
......
6568 6514
                    if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6569 6515
                        s->cpl != 0)
6570 6516
                        goto illegal_op;
6571
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MONITOR))
6572
                        break;
6573 6517
                    gen_jmp_im(pc_start - s->cs_base);
6574 6518
#ifdef TARGET_X86_64
6575 6519
                    if (s->aflag == 2) {
......
6592 6536
                        gen_op_set_cc_op(s->cc_op);
6593 6537
                        s->cc_op = CC_OP_DYNAMIC;
6594 6538
                    }
6595
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MWAIT))
6596
                        break;
6597 6539
                    gen_jmp_im(s->pc - s->cs_base);
6598 6540
                    tcg_gen_helper_0_0(helper_mwait);
6599 6541
                    gen_eob(s);
......
6602 6544
                    goto illegal_op;
6603 6545
                }
6604 6546
            } else { /* sidt */
6605
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ))
6606
                    break;
6547
                gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
6607 6548
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6608 6549
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
6609 6550
                gen_op_st_T0_A0(OT_WORD + s->mem_index);
......
6617 6558
        case 2: /* lgdt */
6618 6559
        case 3: /* lidt */
6619 6560
            if (mod == 3) {
6561
                if (s->cc_op != CC_OP_DYNAMIC)
6562
                    gen_op_set_cc_op(s->cc_op);
6563
                gen_jmp_im(pc_start - s->cs_base);
6620 6564
                switch(rm) {
6621 6565
                case 0: /* VMRUN */
6622
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMRUN))
6566
                    if (!(s->flags & HF_SVME_MASK) || !s->pe)
6567
                        goto illegal_op;
6568
                    if (s->cpl != 0) {
6569
                        gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6623 6570
                        break;
6624
                    if (s->cc_op != CC_OP_DYNAMIC)
6625
                        gen_op_set_cc_op(s->cc_op);
6626
                    gen_jmp_im(s->pc - s->cs_base);
6627
                    tcg_gen_helper_0_0(helper_vmrun);
6628
                    s->cc_op = CC_OP_EFLAGS;
6629
                    gen_eob(s);
6571
                    } else {
6572
                        tcg_gen_helper_0_0(helper_vmrun);
6573
                        s->cc_op = CC_OP_EFLAGS;
6574
                        gen_eob(s);
6575
                    }
6630 6576
                    break;
6631 6577
                case 1: /* VMMCALL */
6632
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMMCALL))
6633
                         break;
6634
                    /* FIXME: cause #UD if hflags & SVM */
6578
                    if (!(s->flags & HF_SVME_MASK))
6579
                        goto illegal_op;
6635 6580
                    tcg_gen_helper_0_0(helper_vmmcall);
6636 6581
                    break;
6637 6582
                case 2: /* VMLOAD */
6638
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMLOAD))
6639
                         break;
6640
                    tcg_gen_helper_0_0(helper_vmload);
6583
                    if (!(s->flags & HF_SVME_MASK) || !s->pe)
6584
                        goto illegal_op;
6585
                    if (s->cpl != 0) {
6586
                        gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6587
                        break;
6588
                    } else {
6589
                        tcg_gen_helper_0_0(helper_vmload);
6590
                    }
6641 6591
                    break;
6642 6592
                case 3: /* VMSAVE */
6643
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMSAVE))
6644
                         break;
6645
                    tcg_gen_helper_0_0(helper_vmsave);
6593
                    if (!(s->flags & HF_SVME_MASK) || !s->pe)
6594
                        goto illegal_op;
6595
                    if (s->cpl != 0) {
6596
                        gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6597
                        break;
6598
                    } else {
6599
                        tcg_gen_helper_0_0(helper_vmsave);
6600
                    }
6646 6601
                    break;
6647 6602
                case 4: /* STGI */
6648
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_STGI))
6649
                         break;
6650
                    tcg_gen_helper_0_0(helper_stgi);
6603
                    if ((!(s->flags & HF_SVME_MASK) &&
6604
                         !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || 
6605
                        !s->pe)
6606
                        goto illegal_op;
6607
                    if (s->cpl != 0) {
6608
                        gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6609
                        break;
6610
                    } else {
6611
                        tcg_gen_helper_0_0(helper_stgi);
6612
                    }
6651 6613
                    break;
6652 6614
                case 5: /* CLGI */
6653
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CLGI))
6654
                         break;
6655
                    tcg_gen_helper_0_0(helper_clgi);
6615
                    if (!(s->flags & HF_SVME_MASK) || !s->pe)
6616
                        goto illegal_op;
6617
                    if (s->cpl != 0) {
6618
                        gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6619
                        break;
6620
                    } else {
6621
                        tcg_gen_helper_0_0(helper_clgi);
6622
                    }
6656 6623
                    break;
6657 6624
                case 6: /* SKINIT */
6658
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SKINIT))
6659
                         break;
6625
                    if ((!(s->flags & HF_SVME_MASK) && 
6626
                         !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || 
6627
                        !s->pe)
6628
                        goto illegal_op;
6660 6629
                    tcg_gen_helper_0_0(helper_skinit);
6661 6630
                    break;
6662 6631
                case 7: /* INVLPGA */
6663
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPGA))
6664
                         break;
6665
                    tcg_gen_helper_0_0(helper_invlpga);
6632
                    if (!(s->flags & HF_SVME_MASK) || !s->pe)
6633
                        goto illegal_op;
6634
                    if (s->cpl != 0) {
6635
                        gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6636
                        break;
6637
                    } else {
6638
                        tcg_gen_helper_0_0(helper_invlpga);
6639
                    }
6666 6640
                    break;
6667 6641
                default:
6668 6642
                    goto illegal_op;
......
6670 6644
            } else if (s->cpl != 0) {
6671 6645
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6672 6646
            } else {
6673
                if (gen_svm_check_intercept(s, pc_start,
6674
                                            op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE))
6675
                    break;
6647
                gen_svm_check_intercept(s, pc_start,
6648
                                        op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
6676 6649
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6677 6650
                gen_op_ld_T1_A0(OT_WORD + s->mem_index);
6678 6651
                gen_add_A0_im(s, 2);
......
6689 6662
            }
6690 6663
            break;
6691 6664
        case 4: /* smsw */
6692
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0))
6693
                break;
6665
            gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
6694 6666
            tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
6695 6667
            gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
6696 6668
            break;
......
6698 6670
            if (s->cpl != 0) {
6699 6671
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6700 6672
            } else {
6701
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0))
6702
                    break;
6673
                gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
6703 6674
                gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6704 6675
                tcg_gen_helper_0_1(helper_lmsw, cpu_T[0]);
6705 6676
                gen_jmp_im(s->pc - s->cs_base);
......
6724 6695
                        goto illegal_op;
6725 6696
                    }
6726 6697
                } else {
6727
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPG))
6728
                        break;
6729 6698
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6730 6699
                    tcg_gen_helper_0_1(helper_invlpg, cpu_A0);
6731 6700
                    gen_jmp_im(s->pc - s->cs_base);
......
6742 6711
        if (s->cpl != 0) {
6743 6712
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6744 6713
        } else {
6745
            if (gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD))
6746
                break;
6714
            gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
6747 6715
            /* nothing to do */
6748 6716
        }
6749 6717
        break;
......
6892 6860
            case 3:
6893 6861
            case 4:
6894 6862
            case 8:
6863
                if (s->cc_op != CC_OP_DYNAMIC)
6864
                    gen_op_set_cc_op(s->cc_op);
6865
                gen_jmp_im(pc_start - s->cs_base);
6895 6866
                if (b & 2) {
6896
                    gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0 + reg);
6897 6867
                    gen_op_mov_TN_reg(ot, 0, rm);
6898
                    tcg_gen_helper_0_2(helper_movl_crN_T0, 
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff