Revision 0573fbfc

b/cpu-all.h
716 716
#define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
717 717
#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
718 718
#define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
719
#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
719 720

  
720 721
void cpu_interrupt(CPUState *s, int mask);
721 722
void cpu_reset_interrupt(CPUState *env, int mask);
b/cpu-exec.c
163 163
#if defined(TARGET_I386)
164 164
    flags = env->hflags;
165 165
    flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166
    flags |= env->intercept;
166 167
    cs_base = env->segs[R_CS].base;
167 168
    pc = cs_base + env->eip;
168 169
#elif defined(TARGET_ARM)
......
372 373
                tmp_T0 = T0;
373 374
#endif
374 375
                interrupt_request = env->interrupt_request;
375
                if (__builtin_expect(interrupt_request, 0)) {
376
                if (__builtin_expect(interrupt_request, 0)
377
#if defined(TARGET_I386)
378
			&& env->hflags & HF_GIF_MASK
379
#endif
380
				) {
376 381
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
377 382
                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
378 383
                        env->exception_index = EXCP_DEBUG;
......
390 395
#if defined(TARGET_I386)
391 396
                    if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 397
                        !(env->hflags & HF_SMM_MASK)) {
398
                        svm_check_intercept(SVM_EXIT_SMI);
393 399
                        env->interrupt_request &= ~CPU_INTERRUPT_SMI;
394 400
                        do_smm_enter();
395 401
#if defined(__sparc__) && !defined(HOST_SOLARIS)
......
398 404
                        T0 = 0;
399 405
#endif
400 406
                    } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
401
                        (env->eflags & IF_MASK) &&
407
                        (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
402 408
                        !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
403 409
                        int intno;
410
                        svm_check_intercept(SVM_EXIT_INTR);
404 411
                        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
405 412
                        intno = cpu_get_pic_interrupt(env);
406 413
                        if (loglevel & CPU_LOG_TB_IN_ASM) {
......
414 421
#else
415 422
                        T0 = 0;
416 423
#endif
424
#if !defined(CONFIG_USER_ONLY)
425
                    } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
426
                        (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
427
                         int intno;
428
                         /* FIXME: this should respect TPR */
429
                         env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
430
                         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
431
                                  ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
432
                         intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433
                         if (loglevel & CPU_LOG_TB_IN_ASM)
434
                             fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
435
	                 do_interrupt(intno, 0, 0, -1, 1);
436
#if defined(__sparc__) && !defined(HOST_SOLARIS)
437
                         tmp_T0 = 0;
438
#else
439
                         T0 = 0;
440
#endif
441
#endif
417 442
                    }
418 443
#elif defined(TARGET_PPC)
419 444
#if 0
b/exec.c
1292 1292
    vfprintf(stderr, fmt, ap);
1293 1293
    fprintf(stderr, "\n");
1294 1294
#ifdef TARGET_I386
1295
    if(env->intercept & INTERCEPT_SVM_MASK) {
1296
	/* most probably the virtual machine should not
1297
	   be shut down but rather caught by the VMM */
1298
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1299
    }
1295 1300
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1296 1301
#else
1297 1302
    cpu_dump_state(env, stderr, fprintf, 0);
b/target-i386/cpu.h
84 84
#define DESC_AVL_MASK   (1 << 20)
85 85
#define DESC_P_MASK     (1 << 15)
86 86
#define DESC_DPL_SHIFT  13
87
#define DESC_DPL_MASK   (1 << DESC_DPL_SHIFT)
87 88
#define DESC_S_MASK     (1 << 12)
88 89
#define DESC_TYPE_SHIFT 8
89 90
#define DESC_A_MASK     (1 << 8)
......
149 150
#define HF_VM_SHIFT         17 /* must be same as eflags */
150 151
#define HF_HALTED_SHIFT     18 /* CPU halted */
151 152
#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
153
#define HF_GIF_SHIFT        20 /* if set CPU takes interrupts */
154
#define HF_HIF_SHIFT        21 /* shadow copy of IF_MASK when in SVM */
152 155

  
153 156
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
154 157
#define HF_SOFTMMU_MASK      (1 << HF_SOFTMMU_SHIFT)
......
166 169
#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
167 170
#define HF_HALTED_MASK       (1 << HF_HALTED_SHIFT)
168 171
#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
172
#define HF_GIF_MASK          (1 << HF_GIF_SHIFT)
173
#define HF_HIF_MASK          (1 << HF_HIF_SHIFT)
169 174

  
170 175
#define CR0_PE_MASK  (1 << 0)
171 176
#define CR0_MP_MASK  (1 << 1)
......
249 254
#define MSR_GSBASE                      0xc0000101
250 255
#define MSR_KERNELGSBASE                0xc0000102
251 256

  
257
#define MSR_VM_HSAVE_PA                 0xc0010117
258

  
252 259
/* cpuid_features bits */
253 260
#define CPUID_FP87 (1 << 0)
254 261
#define CPUID_VME  (1 << 1)
......
283 290
#define CPUID_EXT2_FFXSR   (1 << 25)
284 291
#define CPUID_EXT2_LM      (1 << 29)
285 292

  
293
#define CPUID_EXT3_SVM     (1 << 2)
294

  
286 295
#define EXCP00_DIVZ	0
287 296
#define EXCP01_SSTP	1
288 297
#define EXCP02_NMI	2
......
489 498
    uint32_t sysenter_eip;
490 499
    uint64_t efer;
491 500
    uint64_t star;
501

  
502
    target_phys_addr_t vm_hsave;
503
    target_phys_addr_t vm_vmcb;
504
    uint64_t intercept;
505
    uint16_t intercept_cr_read;
506
    uint16_t intercept_cr_write;
507
    uint16_t intercept_dr_read;
508
    uint16_t intercept_dr_write;
509
    uint32_t intercept_exceptions;
510

  
492 511
#ifdef TARGET_X86_64
493 512
    target_ulong lstar;
494 513
    target_ulong cstar;
......
530 549
    uint32_t cpuid_xlevel;
531 550
    uint32_t cpuid_model[12];
532 551
    uint32_t cpuid_ext2_features;
552
    uint32_t cpuid_ext3_features;
533 553
    uint32_t cpuid_apic_id;
534 554

  
535 555
#ifdef USE_KQEMU
......
670 690

  
671 691
#include "cpu-all.h"
672 692

  
693
#include "svm.h"
694

  
673 695
#endif /* CPU_I386_H */
b/target-i386/exec.h
502 502
void helper_hlt(void);
503 503
void helper_monitor(void);
504 504
void helper_mwait(void);
505
void helper_vmrun(target_ulong addr);
506
void helper_vmmcall(void);
507
void helper_vmload(target_ulong addr);
508
void helper_vmsave(target_ulong addr);
509
void helper_stgi(void);
510
void helper_clgi(void);
511
void helper_skinit(void);
512
void helper_invlpga(void);
513
void vmexit(uint64_t exit_code, uint64_t exit_info_1);
505 514

  
506 515
extern const uint8_t parity_table[256];
507 516
extern const uint8_t rclw_table[32];
......
589 598
    }
590 599
    return EXCP_HALTED;
591 600
}
601

  
b/target-i386/helper.c
594 594
    int has_error_code, new_stack, shift;
595 595
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
596 596
    uint32_t old_eip, sp_mask;
597
    int svm_should_check = 1;
597 598

  
599
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
600
        next_eip = EIP;
601
        svm_should_check = 0;
602
    }
603

  
604
    if (svm_should_check
605
        && (INTERCEPTEDl(_exceptions, 1 << intno)
606
        && !is_int)) {
607
        raise_interrupt(intno, is_int, error_code, 0);
608
    }
598 609
    has_error_code = 0;
599 610
    if (!is_int && !is_hw) {
600 611
        switch(intno) {
......
830 841
    int has_error_code, new_stack;
831 842
    uint32_t e1, e2, e3, ss;
832 843
    target_ulong old_eip, esp, offset;
844
    int svm_should_check = 1;
833 845

  
846
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
847
        next_eip = EIP;
848
        svm_should_check = 0;
849
    }
850
    if (svm_should_check
851
        && INTERCEPTEDl(_exceptions, 1 << intno)
852
        && !is_int) {
853
        raise_interrupt(intno, is_int, error_code, 0);
854
    }
834 855
    has_error_code = 0;
835 856
    if (!is_int && !is_hw) {
836 857
        switch(intno) {
......
1077 1098
    int selector;
1078 1099
    uint32_t offset, esp;
1079 1100
    uint32_t old_cs, old_eip;
1101
    int svm_should_check = 1;
1080 1102

  
1103
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1104
        next_eip = EIP;
1105
        svm_should_check = 0;
1106
    }
1107
    if (svm_should_check
1108
        && INTERCEPTEDl(_exceptions, 1 << intno)
1109
        && !is_int) {
1110
        raise_interrupt(intno, is_int, error_code, 0);
1111
    }
1081 1112
    /* real mode (simpler !) */
1082 1113
    dt = &env->idt;
1083 1114
    if (intno * 4 + 3 > dt->limit)
......
1227 1258
void raise_interrupt(int intno, int is_int, int error_code,
1228 1259
                     int next_eip_addend)
1229 1260
{
1230
    if (!is_int)
1261
    if (!is_int) {
1262
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1231 1263
        intno = check_exception(intno, &error_code);
1264
    }
1232 1265

  
1233 1266
    env->exception_index = intno;
1234 1267
    env->error_code = error_code;
......
1671 1704
    case 0x80000001:
1672 1705
        EAX = env->cpuid_features;
1673 1706
        EBX = 0;
1674
        ECX = 0;
1707
        ECX = env->cpuid_ext3_features;
1675 1708
        EDX = env->cpuid_ext2_features;
1676 1709
        break;
1677 1710
    case 0x80000002:
......
2745 2778
    case MSR_PAT:
2746 2779
        env->pat = val;
2747 2780
        break;
2781
    case MSR_VM_HSAVE_PA:
2782
        env->vm_hsave = val;
2783
        break;
2748 2784
#ifdef TARGET_X86_64
2749 2785
    case MSR_LSTAR:
2750 2786
        env->lstar = val;
......
2796 2832
    case MSR_PAT:
2797 2833
        val = env->pat;
2798 2834
        break;
2835
    case MSR_VM_HSAVE_PA:
2836
        val = env->vm_hsave;
2837
        break;
2799 2838
#ifdef TARGET_X86_64
2800 2839
    case MSR_LSTAR:
2801 2840
        val = env->lstar;
......
3877 3916
    }
3878 3917
    env = saved_env;
3879 3918
}
3919

  
3920

  
3921
/* Secure Virtual Machine helpers */
3922

  
3923
void helper_stgi(void)
3924
{
3925
    env->hflags |= HF_GIF_MASK;
3926
}
3927

  
3928
void helper_clgi(void)
3929
{
3930
    env->hflags &= ~HF_GIF_MASK;
3931
}
3932

  
3933
#if defined(CONFIG_USER_ONLY)
3934

  
3935
void helper_vmrun(target_ulong addr) { }
3936
void helper_vmmcall(void) { }
3937
void helper_vmload(target_ulong addr) { }
3938
void helper_vmsave(target_ulong addr) { }
3939
void helper_skinit(void) { }
3940
void helper_invlpga(void) { }
3941
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3942
int svm_check_intercept_param(uint32_t type, uint64_t param)
3943
{
3944
    return 0;
3945
}
3946

  
3947
#else
3948

  
3949
static inline uint32_t
3950
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3951
{
3952
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3953
	    | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3954
	    | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3955
	    | (vmcb_base & 0xff000000)               /* Base 31-24 */
3956
	    | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3957
}
3958

  
3959
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3960
{
3961
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
3962
	    | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
3963
}
3964

  
3965
extern uint8_t *phys_ram_base;
3966
void helper_vmrun(target_ulong addr)
3967
{
3968
    uint32_t event_inj;
3969
    uint32_t int_ctl;
3970

  
3971
    if (loglevel & CPU_LOG_TB_IN_ASM)
3972
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3973

  
3974
    env->vm_vmcb = addr;
3975
    regs_to_env();
3976

  
3977
    /* save the current CPU state in the hsave page */
3978
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3979
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3980

  
3981
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
3982
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
3983

  
3984
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3985
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3986
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3987
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3988
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
3989
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3990
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3991

  
3992
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3993
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
3994

  
3995
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
3996
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
3997
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
3998
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
3999

  
4000
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4001
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4002
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4003

  
4004
    /* load the interception bitmaps so we do not need to access the
4005
       vmcb in svm mode */
4006
    /* We shift all the intercept bits so we can OR them with the TB
4007
       flags later on */
4008
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4009
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4010
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4011
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4012
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4013
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4014

  
4015
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4016
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4017

  
4018
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4019
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4020

  
4021
    /* clear exit_info_2 so we behave like the real hardware */
4022
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4023

  
4024
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4025
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4026
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4027
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4028
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4029
    if (int_ctl & V_INTR_MASKING_MASK) {
4030
        env->cr[8] = int_ctl & V_TPR_MASK;
4031
        if (env->eflags & IF_MASK)
4032
            env->hflags |= HF_HIF_MASK;
4033
    }
4034

  
4035
#ifdef TARGET_X86_64
4036
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4037
    env->hflags &= ~HF_LMA_MASK;
4038
    if (env->efer & MSR_EFER_LMA)
4039
       env->hflags |= HF_LMA_MASK;
4040
#endif
4041
    env->eflags = 0;
4042
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4043
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4044
    CC_OP = CC_OP_EFLAGS;
4045
    CC_DST = 0xffffffff;
4046

  
4047
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4048
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4049
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4050
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4051

  
4052
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4053
    env->eip = EIP;
4054
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4055
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4056
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4057
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4058
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4059

  
4060
    /* FIXME: guest state consistency checks */
4061

  
4062
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4063
        case TLB_CONTROL_DO_NOTHING:
4064
            break;
4065
        case TLB_CONTROL_FLUSH_ALL_ASID:
4066
            /* FIXME: this is not 100% correct but should work for now */
4067
            tlb_flush(env, 1);
4068
        break;
4069
    }
4070

  
4071
    helper_stgi();
4072

  
4073
    regs_to_env();
4074

  
4075
    /* maybe we need to inject an event */
4076
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4077
    if (event_inj & SVM_EVTINJ_VALID) {
4078
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4079
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4080
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4081
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4082

  
4083
        if (loglevel & CPU_LOG_TB_IN_ASM)
4084
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4085
        /* FIXME: need to implement valid_err */
4086
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4087
        case SVM_EVTINJ_TYPE_INTR:
4088
                env->exception_index = vector;
4089
                env->error_code = event_inj_err;
4090
                env->exception_is_int = 1;
4091
                env->exception_next_eip = -1;
4092
                if (loglevel & CPU_LOG_TB_IN_ASM)
4093
                    fprintf(logfile, "INTR");
4094
                break;
4095
        case SVM_EVTINJ_TYPE_NMI:
4096
                env->exception_index = vector;
4097
                env->error_code = event_inj_err;
4098
                env->exception_is_int = 1;
4099
                env->exception_next_eip = EIP;
4100
                if (loglevel & CPU_LOG_TB_IN_ASM)
4101
                    fprintf(logfile, "NMI");
4102
                break;
4103
        case SVM_EVTINJ_TYPE_EXEPT:
4104
                env->exception_index = vector;
4105
                env->error_code = event_inj_err;
4106
                env->exception_is_int = 0;
4107
                env->exception_next_eip = -1;
4108
                if (loglevel & CPU_LOG_TB_IN_ASM)
4109
                    fprintf(logfile, "EXEPT");
4110
                break;
4111
        case SVM_EVTINJ_TYPE_SOFT:
4112
                env->exception_index = vector;
4113
                env->error_code = event_inj_err;
4114
                env->exception_is_int = 1;
4115
                env->exception_next_eip = EIP;
4116
                if (loglevel & CPU_LOG_TB_IN_ASM)
4117
                    fprintf(logfile, "SOFT");
4118
                break;
4119
        }
4120
        if (loglevel & CPU_LOG_TB_IN_ASM)
4121
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4122
    }
4123
    if (int_ctl & V_IRQ_MASK)
4124
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4125

  
4126
    cpu_loop_exit();
4127
}
4128

  
4129
void helper_vmmcall(void)
4130
{
4131
    if (loglevel & CPU_LOG_TB_IN_ASM)
4132
        fprintf(logfile,"vmmcall!\n");
4133
}
4134

  
4135
void helper_vmload(target_ulong addr)
4136
{
4137
    if (loglevel & CPU_LOG_TB_IN_ASM)
4138
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4139
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4140
                env->segs[R_FS].base);
4141

  
4142
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4143
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4144
    SVM_LOAD_SEG2(addr, tr, tr);
4145
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4146

  
4147
#ifdef TARGET_X86_64
4148
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4149
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4150
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4151
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4152
#endif
4153
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4154
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4155
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4156
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4157
}
4158

  
4159
void helper_vmsave(target_ulong addr)
4160
{
4161
    if (loglevel & CPU_LOG_TB_IN_ASM)
4162
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4163
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4164
                env->segs[R_FS].base);
4165

  
4166
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4167
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4168
    SVM_SAVE_SEG(addr, tr, tr);
4169
    SVM_SAVE_SEG(addr, ldt, ldtr);
4170

  
4171
#ifdef TARGET_X86_64
4172
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4173
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4174
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4175
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4176
#endif
4177
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4178
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4179
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4180
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4181
}
4182

  
4183
void helper_skinit(void)
4184
{
4185
    if (loglevel & CPU_LOG_TB_IN_ASM)
4186
        fprintf(logfile,"skinit!\n");
4187
}
4188

  
4189
void helper_invlpga(void)
4190
{
4191
    tlb_flush(env, 0);
4192
}
4193

  
4194
int svm_check_intercept_param(uint32_t type, uint64_t param)
4195
{
4196
    switch(type) {
4197
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4198
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4199
            vmexit(type, param);
4200
            return 1;
4201
        }
4202
        break;
4203
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4204
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4205
            vmexit(type, param);
4206
            return 1;
4207
        }
4208
        break;
4209
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4210
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4211
            vmexit(type, param);
4212
            return 1;
4213
        }
4214
        break;
4215
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4216
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4217
            vmexit(type, param);
4218
            return 1;
4219
        }
4220
        break;
4221
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4222
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4223
            vmexit(type, param);
4224
            return 1;
4225
        }
4226
        break;
4227
    case SVM_EXIT_IOIO:
4228
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4229
            /* FIXME: this should be read in at vmrun (faster this way?) */
4230
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4231
            uint16_t port = (uint16_t) (param >> 16);
4232

  
4233
            if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4234
                vmexit(type, param);
4235
        }
4236
        break;
4237

  
4238
    case SVM_EXIT_MSR:
4239
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4240
            /* FIXME: this should be read in at vmrun (faster this way?) */
4241
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4242
            switch((uint32_t)ECX) {
4243
            case 0 ... 0x1fff:
4244
                T0 = (ECX * 2) % 8;
4245
                T1 = ECX / 8;
4246
                break;
4247
            case 0xc0000000 ... 0xc0001fff:
4248
                T0 = (8192 + ECX - 0xc0000000) * 2;
4249
                T1 = (T0 / 8);
4250
                T0 %= 8;
4251
                break;
4252
            case 0xc0010000 ... 0xc0011fff:
4253
                T0 = (16384 + ECX - 0xc0010000) * 2;
4254
                T1 = (T0 / 8);
4255
                T0 %= 8;
4256
                break;
4257
            default:
4258
                vmexit(type, param);
4259
                return 1;
4260
            }
4261
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4262
                vmexit(type, param);
4263
            return 1;
4264
        }
4265
        break;
4266
    default:
4267
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4268
            vmexit(type, param);
4269
            return 1;
4270
        }
4271
        break;
4272
    }
4273
    return 0;
4274
}
4275

  
4276
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4277
{
4278
    uint32_t int_ctl;
4279

  
4280
    if (loglevel & CPU_LOG_TB_IN_ASM)
4281
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4282
                exit_code, exit_info_1,
4283
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4284
                EIP);
4285

  
4286
    /* Save the VM state in the vmcb */
4287
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4288
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4289
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4290
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4291

  
4292
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4293
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4294

  
4295
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4296
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4297

  
4298
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4299
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4300
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4301
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4302
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4303

  
4304
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4305
        int_ctl &= ~V_TPR_MASK;
4306
        int_ctl |= env->cr[8] & V_TPR_MASK;
4307
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4308
    }
4309

  
4310
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4311
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4312
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4313
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4314
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4315
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4316
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4317

  
4318
    /* Reload the host state from vm_hsave */
4319
    env->hflags &= ~HF_HIF_MASK;
4320
    env->intercept = 0;
4321
    env->intercept_exceptions = 0;
4322
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4323

  
4324
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4325
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4326

  
4327
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4328
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4329

  
4330
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4331
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4332
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4333
    if (int_ctl & V_INTR_MASKING_MASK)
4334
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4335
    /* we need to set the efer after the crs so the hidden flags get set properly */
4336
#ifdef TARGET_X86_64
4337
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4338
    env->hflags &= ~HF_LMA_MASK;
4339
    if (env->efer & MSR_EFER_LMA)
4340
       env->hflags |= HF_LMA_MASK;
4341
#endif
4342

  
4343
    env->eflags = 0;
4344
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4345
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4346
    CC_OP = CC_OP_EFLAGS;
4347

  
4348
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4349
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4350
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4351
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4352

  
4353
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4354
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4355
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4356

  
4357
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4358
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4359

  
4360
    /* other setups */
4361
    cpu_x86_set_cpl(env, 0);
4362
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4363
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4364
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4365

  
4366
    helper_clgi();
4367
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4368

  
4369
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4370

  
4371
    /* Clears the TSC_OFFSET inside the processor. */
4372

  
4373
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4374
       from the page table indicated the host's CR3. If the PDPEs contain
4375
       illegal state, the processor causes a shutdown. */
4376

  
4377
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4378
    env->cr[0] |= CR0_PE_MASK;
4379
    env->eflags &= ~VM_MASK;
4380

  
4381
    /* Disables all breakpoints in the host DR7 register. */
4382

  
4383
    /* Checks the reloaded host state for consistency. */
4384

  
4385
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4386
       host's code segment or non-canonical (in the case of long mode), a
4387
       #GP fault is delivered inside the host.) */
4388

  
4389
    /* remove any pending exception */
4390
    env->exception_index = -1;
4391
    env->error_code = 0;
4392
    env->old_exception = -1;
4393

  
4394
    regs_to_env();
4395
    cpu_loop_exit();
4396
}
4397

  
4398
#endif
b/target-i386/helper2.c
27 27

  
28 28
#include "cpu.h"
29 29
#include "exec-all.h"
30
#include "svm.h"
30 31

  
31 32
//#define DEBUG_MMU
32 33

  
......
111 112
                               CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112 113
                               CPUID_PAT);
113 114
        env->pat = 0x0007040600070406ULL;
115
        env->cpuid_ext3_features = CPUID_EXT3_SVM;
114 116
        env->cpuid_ext_features = CPUID_EXT_SSE3;
115 117
        env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
116 118
        env->cpuid_features |= CPUID_APIC;
117
        env->cpuid_xlevel = 0x80000006;
119
        env->cpuid_xlevel = 0x8000000e;
118 120
        {
119 121
            const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120 122
            int c, len, i;
......
131 133
        /* currently not enabled for std i386 because not fully tested */
132 134
        env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
133 135
        env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
134
        env->cpuid_xlevel = 0x80000008;
135 136

  
136 137
        /* these features are needed for Win64 and aren't fully implemented */
137 138
        env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
......
162 163
#ifdef CONFIG_SOFTMMU
163 164
    env->hflags |= HF_SOFTMMU_MASK;
164 165
#endif
166
    env->hflags |= HF_GIF_MASK;
165 167

  
166 168
    cpu_x86_update_cr0(env, 0x60000010);
167 169
    env->a20_mask = 0xffffffff;
......
865 867
 do_fault_protect:
866 868
    error_code = PG_ERROR_P_MASK;
867 869
 do_fault:
868
    env->cr[2] = addr;
869 870
    error_code |= (is_write << PG_ERROR_W_BIT);
870 871
    if (is_user)
871 872
        error_code |= PG_ERROR_U_MASK;
......
873 874
        (env->efer & MSR_EFER_NXE) &&
874 875
        (env->cr[4] & CR4_PAE_MASK))
875 876
        error_code |= PG_ERROR_I_D_MASK;
877
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
878
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
879
    } else {
880
        env->cr[2] = addr;
881
    }
876 882
    env->error_code = error_code;
877 883
    env->exception_index = EXCP0E_PAGE;
884
    /* the VMM will handle this */
885
    if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
886
        return 2;
878 887
    return 1;
879 888
}
880 889

  
b/target-i386/op.c
513 513
} UREG64;
514 514
#endif
515 515

  
516
#ifdef TARGET_X86_64
517

  
518 516
#define PARAMQ1 \
519 517
({\
520 518
    UREG64 __p;\
......
523 521
    __p.q;\
524 522
})
525 523

  
524
#ifdef TARGET_X86_64
525

  
526 526
void OPPROTO op_movq_T0_im64(void)
527 527
{
528 528
    T0 = PARAMQ1;
......
1242 1242
    helper_ltr_T0();
1243 1243
}
1244 1244

  
1245
/* CR registers access */
1245
/* CR registers access. */
1246 1246
void OPPROTO op_movl_crN_T0(void)
1247 1247
{
1248 1248
    helper_movl_crN_T0(PARAM1);
1249 1249
}
1250 1250

  
1251
/* These pseudo-opcodes check for SVM intercepts. */
1252
void OPPROTO op_svm_check_intercept(void)
1253
{
1254
    A0 = PARAM1 & PARAM2;
1255
    svm_check_intercept(PARAMQ1);
1256
}
1257

  
1258
void OPPROTO op_svm_check_intercept_param(void)
1259
{
1260
    A0 = PARAM1 & PARAM2;
1261
    svm_check_intercept_param(PARAMQ1, T1);
1262
}
1263

  
1264
void OPPROTO op_svm_vmexit(void)
1265
{
1266
    A0 = PARAM1 & PARAM2;
1267
    vmexit(PARAMQ1, T1);
1268
}
1269

  
1270
void OPPROTO op_geneflags(void)
1271
{
1272
    CC_SRC = cc_table[CC_OP].compute_all();
1273
}
1274

  
1275
/* This pseudo-opcode checks for IO intercepts. */
1276
#if !defined(CONFIG_USER_ONLY)
1277
void OPPROTO op_svm_check_intercept_io(void)
1278
{
1279
    A0 = PARAM1 & PARAM2;
1280
    /* PARAMQ1 = TYPE (0 = OUT, 1 = IN; 4 = STRING; 8 = REP)
1281
       T0      = PORT
1282
       T1      = next eip */
1283
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), T1);
1284
    /* ASIZE does not appear on real hw */
1285
    svm_check_intercept_param(SVM_EXIT_IOIO,
1286
                              (PARAMQ1 & ~SVM_IOIO_ASIZE_MASK) |
1287
                              ((T0 & 0xffff) << 16));
1288
}
1289
#endif
1290

  
1251 1291
#if !defined(CONFIG_USER_ONLY)
1252 1292
void OPPROTO op_movtl_T0_cr8(void)
1253 1293
{
......
2452 2492

  
2453 2493
#define SHIFT 1
2454 2494
#include "ops_sse.h"
2495

  
2496
/* Secure Virtual Machine ops */
2497

  
2498
void OPPROTO op_vmrun(void)
2499
{
2500
    helper_vmrun(EAX);
2501
}
2502

  
2503
void OPPROTO op_vmmcall(void)
2504
{
2505
    helper_vmmcall();
2506
}
2507

  
2508
void OPPROTO op_vmload(void)
2509
{
2510
    helper_vmload(EAX);
2511
}
2512

  
2513
void OPPROTO op_vmsave(void)
2514
{
2515
    helper_vmsave(EAX);
2516
}
2517

  
2518
void OPPROTO op_stgi(void)
2519
{
2520
    helper_stgi();
2521
}
2522

  
2523
void OPPROTO op_clgi(void)
2524
{
2525
    helper_clgi();
2526
}
2527

  
2528
void OPPROTO op_skinit(void)
2529
{
2530
    helper_skinit();
2531
}
2532

  
2533
void OPPROTO op_invlpga(void)
2534
{
2535
    helper_invlpga();
2536
}
b/target-i386/translate.c
1995 1995
    }
1996 1996
}
1997 1997

  
1998
#ifdef TARGET_X86_64
1999
#define SVM_movq_T1_im(x) gen_op_movq_T1_im64((x) >> 32, x)
2000
#else
2001
#define SVM_movq_T1_im(x) gen_op_movl_T1_im(x)
2002
#endif
2003

  
2004
static inline int
2005
gen_svm_check_io(DisasContext *s, target_ulong pc_start, uint64_t type)
2006
{
2007
#if !defined(CONFIG_USER_ONLY)
2008
    if(s->flags & (1ULL << INTERCEPT_IOIO_PROT)) {
2009
        if (s->cc_op != CC_OP_DYNAMIC)
2010
            gen_op_set_cc_op(s->cc_op);
2011
        SVM_movq_T1_im(s->pc - s->cs_base);
2012
        gen_jmp_im(pc_start - s->cs_base);
2013
        gen_op_geneflags();
2014
        gen_op_svm_check_intercept_io((uint32_t)(type >> 32), (uint32_t)type);
2015
        s->cc_op = CC_OP_DYNAMIC;
2016
        /* FIXME: maybe we could move the io intercept vector to the TB as well
2017
                  so we know if this is an EOB or not ... let's assume it's not
2018
                  for now. */
2019
    }
2020
#endif
2021
    return 0;
2022
}
2023

  
2024
static inline int svm_is_rep(int prefixes)
2025
{
2026
    return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2027
}
2028

  
2029
static inline int
2030
gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2031
                              uint64_t type, uint64_t param)
2032
{
2033
    if(!(s->flags & (INTERCEPT_SVM_MASK)))
2034
	/* no SVM activated */
2035
        return 0;
2036
    switch(type) {
2037
        /* CRx and DRx reads/writes */
2038
        case SVM_EXIT_READ_CR0 ... SVM_EXIT_EXCP_BASE - 1:
2039
            if (s->cc_op != CC_OP_DYNAMIC) {
2040
                gen_op_set_cc_op(s->cc_op);
2041
                s->cc_op = CC_OP_DYNAMIC;
2042
            }
2043
            gen_jmp_im(pc_start - s->cs_base);
2044
            SVM_movq_T1_im(param);
2045
            gen_op_geneflags();
2046
            gen_op_svm_check_intercept_param((uint32_t)(type >> 32), (uint32_t)type);
2047
            /* this is a special case as we do not know if the interception occurs
2048
               so we assume there was none */
2049
            return 0;
2050
        case SVM_EXIT_MSR:
2051
            if(s->flags & (1ULL << INTERCEPT_MSR_PROT)) {
2052
                if (s->cc_op != CC_OP_DYNAMIC) {
2053
                    gen_op_set_cc_op(s->cc_op);
2054
                    s->cc_op = CC_OP_DYNAMIC;
2055
                }
2056
                gen_jmp_im(pc_start - s->cs_base);
2057
                SVM_movq_T1_im(param);
2058
                gen_op_geneflags();
2059
                gen_op_svm_check_intercept_param((uint32_t)(type >> 32), (uint32_t)type);
2060
                /* this is a special case as we do not know if the interception occurs
2061
                   so we assume there was none */
2062
                return 0;
2063
            }
2064
            break;
2065
        default:
2066
            if(s->flags & (1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR))) {
2067
                if (s->cc_op != CC_OP_DYNAMIC) {
2068
                    gen_op_set_cc_op(s->cc_op);
2069
		    s->cc_op = CC_OP_EFLAGS;
2070
                }
2071
                gen_jmp_im(pc_start - s->cs_base);
2072
                SVM_movq_T1_im(param);
2073
                gen_op_geneflags();
2074
                gen_op_svm_vmexit(type >> 32, type);
2075
                /* we can optimize this one so TBs don't get longer
2076
                   than up to vmexit */
2077
                gen_eob(s);
2078
                return 1;
2079
            }
2080
    }
2081
    return 0;
2082
}
2083

  
2084
static inline int
2085
gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2086
{
2087
    return gen_svm_check_intercept_param(s, pc_start, type, 0);
2088
}
2089

  
1998 2090
static inline void gen_stack_update(DisasContext *s, int addend)
1999 2091
{
2000 2092
#ifdef TARGET_X86_64
......
4880 4972
        else
4881 4973
            ot = dflag ? OT_LONG : OT_WORD;
4882 4974
        gen_check_io(s, ot, 1, pc_start - s->cs_base);
4975
        gen_op_mov_TN_reg[OT_WORD][0][R_EDX]();
4976
        gen_op_andl_T0_ffff();
4977
        if (gen_svm_check_io(s, pc_start,
4978
                             SVM_IOIO_TYPE_MASK | (1 << (4+ot)) |
4979
                             svm_is_rep(prefixes) | 4 | (1 << (7+s->aflag))))
4980
            break;
4883 4981
        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4884 4982
            gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
4885 4983
        } else {
......
4893 4991
        else
4894 4992
            ot = dflag ? OT_LONG : OT_WORD;
4895 4993
        gen_check_io(s, ot, 1, pc_start - s->cs_base);
4994
        gen_op_mov_TN_reg[OT_WORD][0][R_EDX]();
4995
        gen_op_andl_T0_ffff();
4996
        if (gen_svm_check_io(s, pc_start,
4997
                             (1 << (4+ot)) | svm_is_rep(prefixes) |
4998
                             4 | (1 << (7+s->aflag))))
4999
            break;
4896 5000
        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4897 5001
            gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
4898 5002
        } else {
......
4902 5006

  
4903 5007
        /************************/
4904 5008
        /* port I/O */
5009

  
4905 5010
    case 0xe4:
4906 5011
    case 0xe5:
4907 5012
        if ((b & 1) == 0)
......
4911 5016
        val = ldub_code(s->pc++);
4912 5017
        gen_op_movl_T0_im(val);
4913 5018
        gen_check_io(s, ot, 0, pc_start - s->cs_base);
5019
        if (gen_svm_check_io(s, pc_start,
5020
                             SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) |
5021
                             (1 << (4+ot))))
5022
            break;
4914 5023
        gen_op_in[ot]();
4915 5024
        gen_op_mov_reg_T1[ot][R_EAX]();
4916 5025
        break;
......
4923 5032
        val = ldub_code(s->pc++);
4924 5033
        gen_op_movl_T0_im(val);
4925 5034
        gen_check_io(s, ot, 0, pc_start - s->cs_base);
5035
        if (gen_svm_check_io(s, pc_start, svm_is_rep(prefixes) |
5036
                             (1 << (4+ot))))
5037
            break;
4926 5038
        gen_op_mov_TN_reg[ot][1][R_EAX]();
4927 5039
        gen_op_out[ot]();
4928 5040
        break;
......
4935 5047
        gen_op_mov_TN_reg[OT_WORD][0][R_EDX]();
4936 5048
        gen_op_andl_T0_ffff();
4937 5049
        gen_check_io(s, ot, 0, pc_start - s->cs_base);
5050
        if (gen_svm_check_io(s, pc_start,
5051
                             SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) |
5052
                             (1 << (4+ot))))
5053
            break;
4938 5054
        gen_op_in[ot]();
4939 5055
        gen_op_mov_reg_T1[ot][R_EAX]();
4940 5056
        break;
......
4947 5063
        gen_op_mov_TN_reg[OT_WORD][0][R_EDX]();
4948 5064
        gen_op_andl_T0_ffff();
4949 5065
        gen_check_io(s, ot, 0, pc_start - s->cs_base);
5066
        if (gen_svm_check_io(s, pc_start,
5067
                             svm_is_rep(prefixes) | (1 << (4+ot))))
5068
            break;
4950 5069
        gen_op_mov_TN_reg[ot][1][R_EAX]();
4951 5070
        gen_op_out[ot]();
4952 5071
        break;
......
5004 5123
        val = 0;
5005 5124
        goto do_lret;
5006 5125
    case 0xcf: /* iret */
5126
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET))
5127
            break;
5007 5128
        if (!s->pe) {
5008 5129
            /* real mode */
5009 5130
            gen_op_iret_real(s->dflag);
......
5125 5246
        /************************/
5126 5247
        /* flags */
5127 5248
    case 0x9c: /* pushf */
5249
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF))
5250
            break;
5128 5251
        if (s->vm86 && s->iopl != 3) {
5129 5252
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5130 5253
        } else {
......
5135 5258
        }
5136 5259
        break;
5137 5260
    case 0x9d: /* popf */
5261
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF))
5262
            break;
5138 5263
        if (s->vm86 && s->iopl != 3) {
5139 5264
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5140 5265
        } else {
......
5348 5473
        /* XXX: correct lock test for all insn */
5349 5474
        if (prefixes & PREFIX_LOCK)
5350 5475
            goto illegal_op;
5476
        if (prefixes & PREFIX_REPZ) {
5477
            gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
5478
        }
5351 5479
        break;
5352 5480
    case 0x9b: /* fwait */
5353 5481
        if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
......
5361 5489
        }
5362 5490
        break;
5363 5491
    case 0xcc: /* int3 */
5492
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
5493
            break;
5364 5494
        gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
5365 5495
        break;
5366 5496
    case 0xcd: /* int N */
5367 5497
        val = ldub_code(s->pc++);
5498
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
5499
            break;
5368 5500
        if (s->vm86 && s->iopl != 3) {
5369 5501
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5370 5502
        } else {
......
5374 5506
    case 0xce: /* into */
5375 5507
        if (CODE64(s))
5376 5508
            goto illegal_op;
5509
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SWINT))
5510
            break;
5377 5511
        if (s->cc_op != CC_OP_DYNAMIC)
5378 5512
            gen_op_set_cc_op(s->cc_op);
5379 5513
        gen_jmp_im(pc_start - s->cs_base);
5380 5514
        gen_op_into(s->pc - pc_start);
5381 5515
        break;
5382 5516
    case 0xf1: /* icebp (undocumented, exits to external debugger) */
5517
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP))
5518
            break;
5383 5519
#if 1
5384 5520
        gen_debug(s, pc_start - s->cs_base);
5385 5521
#else
......
5415 5551
                    gen_op_set_inhibit_irq();
5416 5552
                /* give a chance to handle pending irqs */
5417 5553
                gen_jmp_im(s->pc - s->cs_base);
5554
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VINTR))
5555
                    break;
5418 5556
                gen_eob(s);
5419 5557
            } else {
5420 5558
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
......
5507 5645
        if (s->cpl != 0) {
5508 5646
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5509 5647
        } else {
5510
            if (b & 2)
5648
            int retval = 0;
5649
            if (b & 2) {
5650
                retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 0);
5511 5651
                gen_op_rdmsr();
5512
            else
5652
            } else {
5653
                retval = gen_svm_check_intercept_param(s, pc_start, SVM_EXIT_MSR, 1);
5513 5654
                gen_op_wrmsr();
5655
            }
5656
            if(retval)
5657
                gen_eob(s);
5514 5658
        }
5515 5659
        break;
5516 5660
    case 0x131: /* rdtsc */
5661
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RDTSC))
5662
            break;
5517 5663
        gen_jmp_im(pc_start - s->cs_base);
5518 5664
        gen_op_rdtsc();
5519 5665
        break;
......
5576 5722
        break;
5577 5723
#endif
5578 5724
    case 0x1a2: /* cpuid */
5725
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CPUID))
5726
            break;
5579 5727
        gen_op_cpuid();
5580 5728
        break;
5581 5729
    case 0xf4: /* hlt */
5582 5730
        if (s->cpl != 0) {
5583 5731
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5584 5732
        } else {
5733
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_HLT))
5734
                break;
5585 5735
            if (s->cc_op != CC_OP_DYNAMIC)
5586 5736
                gen_op_set_cc_op(s->cc_op);
5587 5737
            gen_jmp_im(s->pc - s->cs_base);
......
5597 5747
        case 0: /* sldt */
5598 5748
            if (!s->pe || s->vm86)
5599 5749
                goto illegal_op;
5750
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ))
5751
                break;
5600 5752
            gen_op_movl_T0_env(offsetof(CPUX86State,ldt.selector));
5601 5753
            ot = OT_WORD;
5602 5754
            if (mod == 3)
......
5609 5761
            if (s->cpl != 0) {
5610 5762
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5611 5763
            } else {
5764
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE))
5765
                    break;
5612 5766
                gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5613 5767
                gen_jmp_im(pc_start - s->cs_base);
5614 5768
                gen_op_lldt_T0();
......
5617 5771
        case 1: /* str */
5618 5772
            if (!s->pe || s->vm86)
5619 5773
                goto illegal_op;
5774
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ))
5775
                break;
5620 5776
            gen_op_movl_T0_env(offsetof(CPUX86State,tr.selector));
5621 5777
            ot = OT_WORD;
5622 5778
            if (mod == 3)
......
5629 5785
            if (s->cpl != 0) {
5630 5786
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5631 5787
            } else {
5788
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE))
5789
                    break;
5632 5790
                gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5633 5791
                gen_jmp_im(pc_start - s->cs_base);
5634 5792
                gen_op_ltr_T0();
......
5660 5818
        case 0: /* sgdt */
5661 5819
            if (mod == 3)
5662 5820
                goto illegal_op;
5821
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ))
5822
                break;
5663 5823
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5664 5824
            gen_op_movl_T0_env(offsetof(CPUX86State, gdt.limit));
5665 5825
            gen_op_st_T0_A0[OT_WORD + s->mem_index]();
......
5676 5836
                    if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
5677 5837
                        s->cpl != 0)
5678 5838
                        goto illegal_op;
5839
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MONITOR))
5840
                        break;
5679 5841
                    gen_jmp_im(pc_start - s->cs_base);
5680 5842
#ifdef TARGET_X86_64
5681 5843
                    if (s->aflag == 2) {
......
5700 5862
                        gen_op_set_cc_op(s->cc_op);
5701 5863
                        s->cc_op = CC_OP_DYNAMIC;
5702 5864
                    }
5865
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_MWAIT))
5866
                        break;
5703 5867
                    gen_jmp_im(s->pc - s->cs_base);
5704 5868
                    gen_op_mwait();
5705 5869
                    gen_eob(s);
......
5708 5872
                    goto illegal_op;
5709 5873
                }
5710 5874
            } else { /* sidt */
5875
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ))
5876
                    break;
5711 5877
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5712 5878
                gen_op_movl_T0_env(offsetof(CPUX86State, idt.limit));
5713 5879
                gen_op_st_T0_A0[OT_WORD + s->mem_index]();
......
5720 5886
            break;
5721 5887
        case 2: /* lgdt */
5722 5888
        case 3: /* lidt */
5723
            if (mod == 3)
5724
                goto illegal_op;
5725
            if (s->cpl != 0) {
5889
            if (mod == 3) {
5890
                switch(rm) {
5891
                case 0: /* VMRUN */
5892
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMRUN))
5893
                        break;
5894
                    if (s->cc_op != CC_OP_DYNAMIC)
5895
                        gen_op_set_cc_op(s->cc_op);
5896
                    gen_jmp_im(s->pc - s->cs_base);
5897
                    gen_op_vmrun();
5898
                    s->cc_op = CC_OP_EFLAGS;
5899
                    gen_eob(s);
5900
                    break;
5901
                case 1: /* VMMCALL */
5902
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMMCALL))
5903
                         break;
5904
                    /* FIXME: cause #UD if hflags & SVM */
5905
                    gen_op_vmmcall();
5906
                    break;
5907
                case 2: /* VMLOAD */
5908
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMLOAD))
5909
                         break;
5910
                    gen_op_vmload();
5911
                    break;
5912
                case 3: /* VMSAVE */
5913
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_VMSAVE))
5914
                         break;
5915
                    gen_op_vmsave();
5916
                    break;
5917
                case 4: /* STGI */
5918
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_STGI))
5919
                         break;
5920
                    gen_op_stgi();
5921
                    break;
5922
                case 5: /* CLGI */
5923
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_CLGI))
5924
                         break;
5925
                    gen_op_clgi();
5926
                    break;
5927
                case 6: /* SKINIT */
5928
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_SKINIT))
5929
                         break;
5930
                    gen_op_skinit();
5931
                    break;
5932
                case 7: /* INVLPGA */
5933
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPGA))
5934
                         break;
5935
                    gen_op_invlpga();
5936
                    break;
5937
                default:
5938
                    goto illegal_op;
5939
                }
5940
            } else if (s->cpl != 0) {
5726 5941
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5727 5942
            } else {
5943
                if (gen_svm_check_intercept(s, pc_start,
5944
                                            op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE))
5945
                    break;
5728 5946
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5729 5947
                gen_op_ld_T1_A0[OT_WORD + s->mem_index]();
5730 5948
                gen_add_A0_im(s, 2);
......
5741 5959
            }
5742 5960
            break;
5743 5961
        case 4: /* smsw */
5962
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0))
5963
                break;
5744 5964
            gen_op_movl_T0_env(offsetof(CPUX86State,cr[0]));
5745 5965
            gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
5746 5966
            break;
......
5748 5968
            if (s->cpl != 0) {
5749 5969
                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5750 5970
            } else {
5971
                if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0))
5972
                    break;
5751 5973
                gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5752 5974
                gen_op_lmsw_T0();
5753 5975
                gen_jmp_im(s->pc - s->cs_base);
......
5772 5994
                        goto illegal_op;
5773 5995
                    }
5774 5996
                } else {
5997
                    if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVLPG))
5998
                        break;
5775 5999
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5776 6000
                    gen_op_invlpg_A0();
5777 6001
                    gen_jmp_im(s->pc - s->cs_base);
......
5788 6012
        if (s->cpl != 0) {
5789 6013
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5790 6014
        } else {
6015
            if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_INVD))
6016
                break;
5791 6017
            /* nothing to do */
5792 6018
        }
5793 6019
        break;
......
5908 6134
            case 4:
5909 6135
            case 8:
5910 6136
                if (b & 2) {
6137
                    gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0 + reg);
5911 6138
                    gen_op_mov_TN_reg[ot][0][rm]();
5912 6139
                    gen_op_movl_crN_T0(reg);
5913 6140
                    gen_jmp_im(s->pc - s->cs_base);
5914 6141
                    gen_eob(s);
5915 6142
                } else {
6143
                    gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0 + reg);
5916 6144
#if !defined(CONFIG_USER_ONLY)
5917 6145
                    if (reg == 8)
5918 6146
                        gen_op_movtl_T0_cr8();
......
5945 6173
            if (reg == 4 || reg == 5 || reg >= 8)
5946 6174
                goto illegal_op;
5947 6175
            if (b & 2) {
6176
                gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
5948 6177
                gen_op_mov_TN_reg[ot][0][rm]();
5949 6178
                gen_op_movl_drN_T0(reg);
5950 6179
                gen_jmp_im(s->pc - s->cs_base);
5951 6180
                gen_eob(s);
5952 6181
            } else {
6182
                gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
5953 6183
                gen_op_movtl_T0_env(offsetof(CPUX86State,dr[reg]));
5954 6184
                gen_op_mov_reg_T0[ot][rm]();
5955 6185
            }
......
5959 6189
        if (s->cpl != 0) {
5960 6190
            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
5961 6191
        } else {
6192
            gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
5962 6193
            gen_op_clts();
5963 6194
            /* abort block because static cpu state changed */
5964 6195
            gen_jmp_im(s->pc - s->cs_base);
......
6050 6281
        /* ignore for now */
6051 6282
        break;
6052 6283
    case 0x1aa: /* rsm */
6284
        if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM))
6285
            break;
6053 6286
        if (!(s->flags & HF_SMM_MASK))
6054 6287
            goto illegal_op;
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff