Revision b8b6a50b

b/target-i386/TODO
3 3
- rework eflags optimization (will be a consequence of TCG port)
4 4
- SVM: rework the implementation: simplify code, move most intercept
5 5
  tests as dynamic, correct segment access, verify exception safety,
6
  remove most of the added CPU state.
6
  cpu save/restore, SMM save/restore. 
7 7
- arpl eflags computation is invalid
8 8
- x86_64: fxsave/fxrestore intel/amd differences
9 9
- x86_64: lcall/ljmp intel/amd differences ?
10 10
- x86_64: cmpxchgl intel/amd differences ?
11
- x86_64: cmovl bug intel/amd differences ?
11
- x86_64: cmovl intel/amd differences ?
12
- cmpxchg16b + cmpxchg8b cpuid test
12 13
- x86: monitor invalid 
13 14
- better code fetch (different exception handling + CS.limit support)
14 15
- user/kernel PUSHL/POPL in helper.c
......
19 20
- full support of segment limit/rights 
20 21
- full x87 exception support
21 22
- improve x87 bit exactness (use bochs code ?)
23
- DRx register support
24
- CR0.AC emulation
25
- SSE alignment checks
26
- fix SSE min/max with nans
22 27

  
23 28
Optimizations/Features:
24 29

  
25 30
- finish TCG port
31
- add SVM nested paging support
32
- add VMX support
33
- add AVX support
34
- add SSE5 support
26 35
- evaluate x87 stack pointer statically
27 36
- find a way to avoid translating several time the same TB if CR0.TS
28 37
  is set or not.
b/target-i386/exec.h
105 105

  
106 106
extern CCTable cc_table[];
107 107

  
108
void helper_load_seg(int seg_reg, int selector);
109
void helper_ljmp_protected_T0_T1(int next_eip);
110
void helper_lcall_real_T0_T1(int shift, int next_eip);
111
void helper_lcall_protected_T0_T1(int shift, int next_eip);
112
void helper_iret_real(int shift);
113
void helper_iret_protected(int shift, int next_eip);
114
void helper_lret_protected(int shift, int addend);
115
void helper_movl_crN_T0(int reg);
116
void helper_movl_drN_T0(int reg);
117
void helper_invlpg(target_ulong addr);
118 108
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
119 109
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
120 110
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
......
141 131

  
142 132
#include "helper.h"
143 133

  
144
void helper_mulq_EAX_T0(void);
145
void helper_imulq_EAX_T0(void);
146
void helper_imulq_T0_T1(void);
147
void helper_cmpxchg8b(void);
148

  
149
void check_iob_T0(void);
150
void check_iow_T0(void);
151
void check_iol_T0(void);
152
void check_iob_DX(void);
153
void check_iow_DX(void);
154
void check_iol_DX(void);
134
static inline void svm_check_intercept(uint32_t type)
135
{
136
    helper_svm_check_intercept_param(type, 0);
137
}
155 138

  
156 139
#if !defined(CONFIG_USER_ONLY)
157 140

  
......
363 346
void fpu_raise_exception(void);
364 347
void restore_native_fp_state(CPUState *env);
365 348
void save_native_fp_state(CPUState *env);
366
void vmexit(uint64_t exit_code, uint64_t exit_info_1);
367 349

  
368 350
extern const uint8_t parity_table[256];
369 351
extern const uint8_t rclw_table[32];
b/target-i386/helper.c
17 17
 * License along with this library; if not, write to the Free Software
18 18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19 19
 */
20
#define CPU_NO_GLOBAL_REGS
20 21
#include "exec.h"
21 22
#include "host-utils.h"
22 23

  
......
93 94
    3.32192809488736234781L,  /*l2t*/
94 95
};
95 96

  
96
/* thread support */
97
/* broken thread support */
97 98

  
98 99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99 100

  
100
void cpu_lock(void)
101
void helper_lock(void)
101 102
{
102 103
    spin_lock(&global_cpu_lock);
103 104
}
104 105

  
105
void cpu_unlock(void)
106
void helper_unlock(void)
106 107
{
107 108
    spin_unlock(&global_cpu_lock);
108 109
}
......
508 509
    }
509 510
}
510 511

  
511
void check_iob_T0(void)
512
void helper_check_iob(uint32_t t0)
512 513
{
513
    check_io(T0, 1);
514
    check_io(t0, 1);
514 515
}
515 516

  
516
void check_iow_T0(void)
517
void helper_check_iow(uint32_t t0)
517 518
{
518
    check_io(T0, 2);
519
    check_io(t0, 2);
519 520
}
520 521

  
521
void check_iol_T0(void)
522
void helper_check_iol(uint32_t t0)
522 523
{
523
    check_io(T0, 4);
524
    check_io(t0, 4);
524 525
}
525 526

  
526
void check_iob_DX(void)
527
void helper_outb(uint32_t port, uint32_t data)
527 528
{
528
    check_io(EDX & 0xffff, 1);
529
    cpu_outb(env, port, data & 0xff);
529 530
}
530 531

  
531
void check_iow_DX(void)
532
target_ulong helper_inb(uint32_t port)
532 533
{
533
    check_io(EDX & 0xffff, 2);
534
    return cpu_inb(env, port);
534 535
}
535 536

  
536
void check_iol_DX(void)
537
void helper_outw(uint32_t port, uint32_t data)
537 538
{
538
    check_io(EDX & 0xffff, 4);
539
    cpu_outw(env, port, data & 0xffff);
540
}
541

  
542
target_ulong helper_inw(uint32_t port)
543
{
544
    return cpu_inw(env, port);
545
}
546

  
547
void helper_outl(uint32_t port, uint32_t data)
548
{
549
    cpu_outl(env, port, data);
550
}
551

  
552
target_ulong helper_inl(uint32_t port)
553
{
554
    return cpu_inl(env, port);
539 555
}
540 556

  
541 557
static inline unsigned int get_sp_mask(unsigned int e2)
......
1275 1291
                     int next_eip_addend)
1276 1292
{
1277 1293
    if (!is_int) {
1278
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1294
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1279 1295
        intno = check_exception(intno, &error_code);
1280 1296
    }
1281 1297

  
......
1857 1873
    FORCE_RET();
1858 1874
}
1859 1875

  
1860
void helper_cmpxchg8b(void)
1876
void helper_cmpxchg8b(target_ulong a0)
1861 1877
{
1862 1878
    uint64_t d;
1863 1879
    int eflags;
1864 1880

  
1865 1881
    eflags = cc_table[CC_OP].compute_all();
1866
    d = ldq(A0);
1882
    d = ldq(a0);
1867 1883
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1868
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1884
        stq(a0, ((uint64_t)ECX << 32) | EBX);
1869 1885
        eflags |= CC_Z;
1870 1886
    } else {
1871
        EDX = d >> 32;
1872
        EAX = d;
1887
        EDX = (uint32_t)(d >> 32);
1888
        EAX = (uint32_t)d;
1873 1889
        eflags &= ~CC_Z;
1874 1890
    }
1875 1891
    CC_SRC = eflags;
......
1986 2002
    }
1987 2003
}
1988 2004

  
1989
void helper_enter_level(int level, int data32)
2005
void helper_enter_level(int level, int data32, target_ulong t1)
1990 2006
{
1991 2007
    target_ulong ssp;
1992 2008
    uint32_t esp_mask, esp, ebp;
......
2004 2020
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2005 2021
        }
2006 2022
        esp -= 4;
2007
        stl(ssp + (esp & esp_mask), T1);
2023
        stl(ssp + (esp & esp_mask), t1);
2008 2024
    } else {
2009 2025
        /* 16 bit */
2010 2026
        esp -= 2;
......
2014 2030
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2015 2031
        }
2016 2032
        esp -= 2;
2017
        stw(ssp + (esp & esp_mask), T1);
2033
        stw(ssp + (esp & esp_mask), t1);
2018 2034
    }
2019 2035
}
2020 2036

  
2021 2037
#ifdef TARGET_X86_64
2022
void helper_enter64_level(int level, int data64)
2038
void helper_enter64_level(int level, int data64, target_ulong t1)
2023 2039
{
2024 2040
    target_ulong esp, ebp;
2025 2041
    ebp = EBP;
......
2034 2050
            stq(esp, ldq(ebp));
2035 2051
        }
2036 2052
        esp -= 8;
2037
        stq(esp, T1);
2053
        stq(esp, t1);
2038 2054
    } else {
2039 2055
        /* 16 bit */
2040 2056
        esp -= 2;
......
2044 2060
            stw(esp, lduw(ebp));
2045 2061
        }
2046 2062
        esp -= 2;
2047
        stw(esp, T1);
2063
        stw(esp, t1);
2048 2064
    }
2049 2065
}
2050 2066
#endif
......
2231 2247
}
2232 2248

  
2233 2249
/* protected mode jump */
2234
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2250
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2251
                           int next_eip_addend)
2235 2252
{
2236
    int new_cs, gate_cs, type;
2253
    int gate_cs, type;
2237 2254
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2238
    target_ulong new_eip, next_eip;
2255
    target_ulong next_eip;
2239 2256

  
2240
    new_cs = T0;
2241
    new_eip = T1;
2242 2257
    if ((new_cs & 0xfffc) == 0)
2243 2258
        raise_exception_err(EXCP0D_GPF, 0);
2244 2259
    if (load_segment(&e1, &e2, new_cs) != 0)
......
2322 2337
}
2323 2338

  
2324 2339
/* real mode call */
2325
void helper_lcall_real_T0_T1(int shift, int next_eip)
2340
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2341
                       int shift, int next_eip)
2326 2342
{
2327
    int new_cs, new_eip;
2343
    int new_eip;
2328 2344
    uint32_t esp, esp_mask;
2329 2345
    target_ulong ssp;
2330 2346

  
2331
    new_cs = T0;
2332
    new_eip = T1;
2347
    new_eip = new_eip1;
2333 2348
    esp = ESP;
2334 2349
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2335 2350
    ssp = env->segs[R_SS].base;
......
2348 2363
}
2349 2364

  
2350 2365
/* protected mode call */
2351
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2366
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2367
                            int shift, int next_eip_addend)
2352 2368
{
2353
    int new_cs, new_stack, i;
2369
    int new_stack, i;
2354 2370
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2355 2371
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2356 2372
    uint32_t val, limit, old_sp_mask;
2357
    target_ulong ssp, old_ssp, next_eip, new_eip;
2373
    target_ulong ssp, old_ssp, next_eip;
2358 2374

  
2359
    new_cs = T0;
2360
    new_eip = T1;
2361 2375
    next_eip = env->eip + next_eip_addend;
2362 2376
#ifdef DEBUG_PCALL
2363 2377
    if (loglevel & CPU_LOG_PCALL) {
......
2922 2936
#endif
2923 2937
}
2924 2938

  
2925
void helper_movl_crN_T0(int reg)
2939
void helper_movl_crN_T0(int reg, target_ulong t0)
2926 2940
{
2927 2941
#if !defined(CONFIG_USER_ONLY)
2928 2942
    switch(reg) {
2929 2943
    case 0:
2930
        cpu_x86_update_cr0(env, T0);
2944
        cpu_x86_update_cr0(env, t0);
2931 2945
        break;
2932 2946
    case 3:
2933
        cpu_x86_update_cr3(env, T0);
2947
        cpu_x86_update_cr3(env, t0);
2934 2948
        break;
2935 2949
    case 4:
2936
        cpu_x86_update_cr4(env, T0);
2950
        cpu_x86_update_cr4(env, t0);
2937 2951
        break;
2938 2952
    case 8:
2939
        cpu_set_apic_tpr(env, T0);
2940
        env->cr[8] = T0;
2953
        cpu_set_apic_tpr(env, t0);
2954
        env->cr[8] = t0;
2941 2955
        break;
2942 2956
    default:
2943
        env->cr[reg] = T0;
2957
        env->cr[reg] = t0;
2944 2958
        break;
2945 2959
    }
2946 2960
#endif
2947 2961
}
2948 2962

  
2963
void helper_lmsw(target_ulong t0)
2964
{
2965
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2966
       if already set to one. */
2967
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2968
    helper_movl_crN_T0(0, t0);
2969
}
2970

  
2971
void helper_clts(void)
2972
{
2973
    env->cr[0] &= ~CR0_TS_MASK;
2974
    env->hflags &= ~HF_TS_MASK;
2975
}
2976

  
2977
#if !defined(CONFIG_USER_ONLY)
2978
target_ulong helper_movtl_T0_cr8(void)
2979
{
2980
    return cpu_get_apic_tpr(env);
2981
}
2982
#endif
2983

  
2949 2984
/* XXX: do more */
2950
void helper_movl_drN_T0(int reg)
2985
void helper_movl_drN_T0(int reg, target_ulong t0)
2951 2986
{
2952
    env->dr[reg] = T0;
2987
    env->dr[reg] = t0;
2953 2988
}
2954 2989

  
2955 2990
void helper_invlpg(target_ulong addr)
......
2975 3010
        raise_exception(EXCP0D_GPF);
2976 3011
    }
2977 3012

  
2978
    if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2979
        /* currently unimplemented */
2980
        raise_exception_err(EXCP06_ILLOP, 0);
2981
    }
3013
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3014
    
3015
    /* currently unimplemented */
3016
    raise_exception_err(EXCP06_ILLOP, 0);
2982 3017
}
2983 3018

  
2984 3019
#if defined(CONFIG_USER_ONLY)
......
3118 3153
}
3119 3154
#endif
3120 3155

  
3121
void helper_lsl(uint32_t selector)
3156
uint32_t helper_lsl(uint32_t selector)
3122 3157
{
3123 3158
    unsigned int limit;
3124 3159
    uint32_t e1, e2, eflags;
......
3153 3188
        if (dpl < cpl || dpl < rpl) {
3154 3189
        fail:
3155 3190
            CC_SRC = eflags & ~CC_Z;
3156
            return;
3191
            return 0;
3157 3192
        }
3158 3193
    }
3159 3194
    limit = get_seg_limit(e1, e2);
3160
    T1 = limit;
3161 3195
    CC_SRC = eflags | CC_Z;
3196
    return limit;
3162 3197
}
3163 3198

  
3164
void helper_lar(uint32_t selector)
3199
uint32_t helper_lar(uint32_t selector)
3165 3200
{
3166 3201
    uint32_t e1, e2, eflags;
3167 3202
    int rpl, dpl, cpl, type;
......
3200 3235
        if (dpl < cpl || dpl < rpl) {
3201 3236
        fail:
3202 3237
            CC_SRC = eflags & ~CC_Z;
3203
            return;
3238
            return 0;
3204 3239
        }
3205 3240
    }
3206
    T1 = e2 & 0x00f0ff00;
3207 3241
    CC_SRC = eflags | CC_Z;
3242
    return e2 & 0x00f0ff00;
3208 3243
}
3209 3244

  
3210 3245
void helper_verr(uint32_t selector)
......
4412 4447
    return 0;
4413 4448
}
4414 4449

  
4415
void helper_mulq_EAX_T0(void)
4450
void helper_mulq_EAX_T0(target_ulong t0)
4416 4451
{
4417 4452
    uint64_t r0, r1;
4418 4453

  
4419
    mulu64(&r0, &r1, EAX, T0);
4454
    mulu64(&r0, &r1, EAX, t0);
4420 4455
    EAX = r0;
4421 4456
    EDX = r1;
4422 4457
    CC_DST = r0;
4423 4458
    CC_SRC = r1;
4424 4459
}
4425 4460

  
4426
void helper_imulq_EAX_T0(void)
4461
void helper_imulq_EAX_T0(target_ulong t0)
4427 4462
{
4428 4463
    uint64_t r0, r1;
4429 4464

  
4430
    muls64(&r0, &r1, EAX, T0);
4465
    muls64(&r0, &r1, EAX, t0);
4431 4466
    EAX = r0;
4432 4467
    EDX = r1;
4433 4468
    CC_DST = r0;
4434 4469
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4435 4470
}
4436 4471

  
4437
void helper_imulq_T0_T1(void)
4472
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4438 4473
{
4439 4474
    uint64_t r0, r1;
4440 4475

  
4441
    muls64(&r0, &r1, T0, T1);
4442
    T0 = r0;
4476
    muls64(&r0, &r1, t0, t1);
4443 4477
    CC_DST = r0;
4444 4478
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4479
    return r0;
4445 4480
}
4446 4481

  
4447 4482
void helper_divq_EAX(target_ulong t0)
......
4553 4588
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4554 4589
}
4555 4590

  
4556
void helper_boundw(void)
4591
void helper_boundw(target_ulong a0, int v)
4557 4592
{
4558
    int low, high, v;
4559
    low = ldsw(A0);
4560
    high = ldsw(A0 + 2);
4561
    v = (int16_t)T0;
4593
    int low, high;
4594
    low = ldsw(a0);
4595
    high = ldsw(a0 + 2);
4596
    v = (int16_t)v;
4562 4597
    if (v < low || v > high) {
4563 4598
        raise_exception(EXCP05_BOUND);
4564 4599
    }
4565 4600
    FORCE_RET();
4566 4601
}
4567 4602

  
4568
void helper_boundl(void)
4603
void helper_boundl(target_ulong a0, int v)
4569 4604
{
4570
    int low, high, v;
4571
    low = ldl(A0);
4572
    high = ldl(A0 + 4);
4573
    v = T0;
4605
    int low, high;
4606
    low = ldl(a0);
4607
    high = ldl(a0 + 4);
4574 4608
    if (v < low || v > high) {
4575 4609
        raise_exception(EXCP05_BOUND);
4576 4610
    }
......
4661 4695

  
4662 4696
#if defined(CONFIG_USER_ONLY)
4663 4697

  
4664
void helper_vmrun(void) { }
4665
void helper_vmmcall(void) { }
4666
void helper_vmload(void) { }
4667
void helper_vmsave(void) { }
4668
void helper_skinit(void) { }
4669
void helper_invlpga(void) { }
4670
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
4671
int svm_check_intercept_param(uint32_t type, uint64_t param)
4698
void helper_vmrun(void) 
4699
{ 
4700
}
4701
void helper_vmmcall(void) 
4702
{ 
4703
}
4704
void helper_vmload(void) 
4705
{ 
4706
}
4707
void helper_vmsave(void) 
4708
{ 
4709
}
4710
void helper_skinit(void) 
4711
{ 
4712
}
4713
void helper_invlpga(void) 
4714
{ 
4715
}
4716
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4717
{ 
4718
}
4719
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4672 4720
{
4673
    return 0;
4674 4721
}
4675 4722

  
4723
void helper_svm_check_io(uint32_t port, uint32_t param, 
4724
                         uint32_t next_eip_addend)
4725
{
4726
}
4676 4727
#else
4677 4728

  
4678 4729
static inline uint32_t
......
4702 4753
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4703 4754

  
4704 4755
    env->vm_vmcb = addr;
4705
    regs_to_env();
4706 4756

  
4707 4757
    /* save the current CPU state in the hsave page */
4708 4758
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
......
4801 4851

  
4802 4852
    helper_stgi();
4803 4853

  
4804
    regs_to_env();
4805

  
4806 4854
    /* maybe we need to inject an event */
4807 4855
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4808 4856
    if (event_inj & SVM_EVTINJ_VALID) {
......
4927 4975
    tlb_flush(env, 0);
4928 4976
}
4929 4977

  
4930
int svm_check_intercept_param(uint32_t type, uint64_t param)
4978
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4931 4979
{
4932 4980
    switch(type) {
4933 4981
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4934 4982
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4935
            vmexit(type, param);
4936
            return 1;
4983
            helper_vmexit(type, param);
4937 4984
        }
4938 4985
        break;
4939 4986
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4940 4987
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4941
            vmexit(type, param);
4942
            return 1;
4988
            helper_vmexit(type, param);
4943 4989
        }
4944 4990
        break;
4945 4991
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4946 4992
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4947
            vmexit(type, param);
4948
            return 1;
4993
            helper_vmexit(type, param);
4949 4994
        }
4950 4995
        break;
4951 4996
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4952 4997
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4953
            vmexit(type, param);
4954
            return 1;
4998
            helper_vmexit(type, param);
4955 4999
        }
4956 5000
        break;
4957 5001
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4958 5002
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4959
            vmexit(type, param);
4960
            return 1;
5003
            helper_vmexit(type, param);
4961 5004
        }
4962 5005
        break;
4963 5006
    case SVM_EXIT_IOIO:
4964
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4965
            /* FIXME: this should be read in at vmrun (faster this way?) */
4966
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4967
            uint16_t port = (uint16_t) (param >> 16);
4968

  
4969
            uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4970
            if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4971
                vmexit(type, param);
4972
        }
4973 5007
        break;
4974 5008

  
4975 5009
    case SVM_EXIT_MSR:
4976 5010
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4977 5011
            /* FIXME: this should be read in at vmrun (faster this way?) */
4978 5012
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5013
            uint32_t t0, t1;
4979 5014
            switch((uint32_t)ECX) {
4980 5015
            case 0 ... 0x1fff:
4981
                T0 = (ECX * 2) % 8;
4982
                T1 = ECX / 8;
5016
                t0 = (ECX * 2) % 8;
5017
                t1 = ECX / 8;
4983 5018
                break;
4984 5019
            case 0xc0000000 ... 0xc0001fff:
4985
                T0 = (8192 + ECX - 0xc0000000) * 2;
4986
                T1 = (T0 / 8);
4987
                T0 %= 8;
5020
                t0 = (8192 + ECX - 0xc0000000) * 2;
5021
                t1 = (t0 / 8);
5022
                t0 %= 8;
4988 5023
                break;
4989 5024
            case 0xc0010000 ... 0xc0011fff:
4990
                T0 = (16384 + ECX - 0xc0010000) * 2;
4991
                T1 = (T0 / 8);
4992
                T0 %= 8;
5025
                t0 = (16384 + ECX - 0xc0010000) * 2;
5026
                t1 = (t0 / 8);
5027
                t0 %= 8;
4993 5028
                break;
4994 5029
            default:
4995
                vmexit(type, param);
4996
                return 1;
5030
                helper_vmexit(type, param);
5031
                t0 = 0;
5032
                t1 = 0;
5033
                break;
4997 5034
            }
4998
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4999
                vmexit(type, param);
5000
            return 1;
5035
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5036
                helper_vmexit(type, param);
5001 5037
        }
5002 5038
        break;
5003 5039
    default:
5004 5040
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5005
            vmexit(type, param);
5006
            return 1;
5041
            helper_vmexit(type, param);
5007 5042
        }
5008 5043
        break;
5009 5044
    }
5010
    return 0;
5011 5045
}
5012 5046

  
5013
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
5047
void helper_svm_check_io(uint32_t port, uint32_t param, 
5048
                         uint32_t next_eip_addend)
5049
{
5050
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5051
        /* FIXME: this should be read in at vmrun (faster this way?) */
5052
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5053
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5054
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5055
            /* next EIP */
5056
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5057
                     env->eip + next_eip_addend);
5058
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5059
        }
5060
    }
5061
}
5062

  
5063
/* Note: currently only 32 bits of exit_code are used */
5064
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5014 5065
{
5015 5066
    uint32_t int_ctl;
5016 5067

  
5017 5068
    if (loglevel & CPU_LOG_TB_IN_ASM)
5018
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5069
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5019 5070
                exit_code, exit_info_1,
5020 5071
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5021 5072
                EIP);
......
5105 5156

  
5106 5157
    /* other setups */
5107 5158
    cpu_x86_set_cpl(env, 0);
5108
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
5109
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5159
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5110 5160
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5111 5161

  
5112 5162
    helper_clgi();
......
5137 5187
    env->error_code = 0;
5138 5188
    env->old_exception = -1;
5139 5189

  
5140
    regs_to_env();
5141 5190
    cpu_loop_exit();
5142 5191
}
5143 5192

  
b/target-i386/helper.h
1 1
#define TCG_HELPER_PROTO
2 2

  
3
void helper_lock(void);
4
void helper_unlock(void);
3 5
void helper_divb_AL(target_ulong t0);
4 6
void helper_idivb_AL(target_ulong t0);
5 7
void helper_divw_AX(target_ulong t0);
......
7 9
void helper_divl_EAX(target_ulong t0);
8 10
void helper_idivl_EAX(target_ulong t0);
9 11
#ifdef TARGET_X86_64
12
void helper_mulq_EAX_T0(target_ulong t0);
13
void helper_imulq_EAX_T0(target_ulong t0);
14
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1);
10 15
void helper_divq_EAX(target_ulong t0);
11 16
void helper_idivq_EAX(target_ulong t0);
12 17
#endif
......
18 23
void helper_daa(void);
19 24
void helper_das(void);
20 25

  
21
void helper_lsl(uint32_t selector);
22
void helper_lar(uint32_t selector);
26
uint32_t helper_lsl(uint32_t selector);
27
uint32_t helper_lar(uint32_t selector);
23 28
void helper_verr(uint32_t selector);
24 29
void helper_verw(uint32_t selector);
25 30
void helper_lldt(int selector);
26 31
void helper_ltr(int selector);
27 32
void helper_load_seg(int seg_reg, int selector);
28
void helper_ljmp_protected_T0_T1(int next_eip);
29
void helper_lcall_real_T0_T1(int shift, int next_eip);
30
void helper_lcall_protected_T0_T1(int shift, int next_eip);
33
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
34
                           int next_eip_addend);
35
void helper_lcall_real(int new_cs, target_ulong new_eip1,
36
                       int shift, int next_eip);
37
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
38
                            int shift, int next_eip_addend);
31 39
void helper_iret_real(int shift);
32 40
void helper_iret_protected(int shift, int next_eip);
33 41
void helper_lret_protected(int shift, int addend);
34
void helper_movl_crN_T0(int reg);
35
void helper_movl_drN_T0(int reg);
42
void helper_movl_crN_T0(int reg, target_ulong t0);
43
void helper_lmsw(target_ulong t0);
44
void helper_clts(void);
45
#if !defined(CONFIG_USER_ONLY)
46
target_ulong helper_movtl_T0_cr8(void);
47
#endif
48
void helper_movl_drN_T0(int reg, target_ulong t0);
36 49
void helper_invlpg(target_ulong addr);
37 50

  
38
void helper_enter_level(int level, int data32);
51
void helper_enter_level(int level, int data32, target_ulong t1);
39 52
#ifdef TARGET_X86_64
40
void helper_enter64_level(int level, int data64);
53
void helper_enter64_level(int level, int data64, target_ulong t1);
41 54
#endif
42 55
void helper_sysenter(void);
43 56
void helper_sysexit(void);
......
55 68
void helper_sti(void);
56 69
void helper_set_inhibit_irq(void);
57 70
void helper_reset_inhibit_irq(void);
58
void helper_boundw(void);
59
void helper_boundl(void);
71
void helper_boundw(target_ulong a0, int v);
72
void helper_boundl(target_ulong a0, int v);
60 73
void helper_rsm(void);
74
void helper_cmpxchg8b(target_ulong a0);
61 75
void helper_single_step(void);
62 76
void helper_cpuid(void);
63 77
void helper_rdtsc(void);
......
65 79
void helper_rdmsr(void);
66 80
void helper_wrmsr(void);
67 81

  
82
void helper_check_iob(uint32_t t0);
83
void helper_check_iow(uint32_t t0);
84
void helper_check_iol(uint32_t t0);
85
void helper_outb(uint32_t port, uint32_t data);
86
target_ulong helper_inb(uint32_t port);
87
void helper_outw(uint32_t port, uint32_t data);
88
target_ulong helper_inw(uint32_t port);
89
void helper_outl(uint32_t port, uint32_t data);
90
target_ulong helper_inl(uint32_t port);
91

  
92
void helper_svm_check_intercept_param(uint32_t type, uint64_t param);
93
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1);
94
void helper_svm_check_io(uint32_t port, uint32_t param, 
95
                         uint32_t next_eip_addend);
68 96
void helper_vmrun(void);
69 97
void helper_vmmcall(void);
70 98
void helper_vmload(void);
b/target-i386/op.c
276 276
#ifdef TARGET_X86_64
277 277
void OPPROTO op_mulq_EAX_T0(void)
278 278
{
279
    helper_mulq_EAX_T0();
279
    helper_mulq_EAX_T0(T0);
280 280
}
281 281

  
282 282
void OPPROTO op_imulq_EAX_T0(void)
283 283
{
284
    helper_imulq_EAX_T0();
284
    helper_imulq_EAX_T0(T0);
285 285
}
286 286

  
287 287
void OPPROTO op_imulq_T0_T1(void)
288 288
{
289
    helper_imulq_T0_T1();
289
    T0 = helper_imulq_T0_T1(T0, T1);
290 290
}
291 291
#endif
292 292

  
......
351 351

  
352 352
void OPPROTO op_cmpxchg8b(void)
353 353
{
354
    helper_cmpxchg8b();
354
    helper_cmpxchg8b(A0);
355 355
}
356 356

  
357 357
/* multiple size ops */
......
522 522

  
523 523
/* segment handling */
524 524

  
525
/* never use it with R_CS */
526
void OPPROTO op_movl_seg_T0(void)
527
{
528
    helper_load_seg(PARAM1, T0);
529
}
530

  
531 525
/* faster VM86 version */
532 526
void OPPROTO op_movl_seg_T0_vm(void)
533 527
{
......
548 542

  
549 543
void OPPROTO op_lsl(void)
550 544
{
551
    helper_lsl(T0);
545
    uint32_t val;
546
    val = helper_lsl(T0);
547
    if (CC_SRC & CC_Z)
548
        T1 = val;
549
    FORCE_RET();
552 550
}
553 551

  
554 552
void OPPROTO op_lar(void)
555 553
{
556
    helper_lar(T0);
554
    uint32_t val;
555
    val = helper_lar(T0);
556
    if (CC_SRC & CC_Z)
557
        T1 = val;
558
    FORCE_RET();
557 559
}
558 560

  
559 561
void OPPROTO op_verr(void)
......
585 587
    CC_SRC = (eflags & ~CC_Z) | T1;
586 588
}
587 589

  
588
/* T0: segment, T1:eip */
589
void OPPROTO op_ljmp_protected_T0_T1(void)
590
{
591
    helper_ljmp_protected_T0_T1(PARAM1);
592
}
593

  
594
void OPPROTO op_lcall_real_T0_T1(void)
595
{
596
    helper_lcall_real_T0_T1(PARAM1, PARAM2);
597
}
598

  
599
void OPPROTO op_lcall_protected_T0_T1(void)
600
{
601
    helper_lcall_protected_T0_T1(PARAM1, PARAM2);
602
}
603

  
604
void OPPROTO op_iret_real(void)
605
{
606
    helper_iret_real(PARAM1);
607
}
608

  
609
void OPPROTO op_iret_protected(void)
610
{
611
    helper_iret_protected(PARAM1, PARAM2);
612
}
613

  
614
void OPPROTO op_lret_protected(void)
615
{
616
    helper_lret_protected(PARAM1, PARAM2);
617
}
618

  
619
/* CR registers access. */
620
void OPPROTO op_movl_crN_T0(void)
621
{
622
    helper_movl_crN_T0(PARAM1);
623
}
624

  
625
/* These pseudo-opcodes check for SVM intercepts. */
626
void OPPROTO op_svm_check_intercept(void)
627
{
628
    A0 = PARAM1 & PARAM2;
629
    svm_check_intercept(PARAMQ1);
630
}
631

  
632
void OPPROTO op_svm_check_intercept_param(void)
633
{
634
    A0 = PARAM1 & PARAM2;
635
    svm_check_intercept_param(PARAMQ1, T1);
636
}
637

  
638
void OPPROTO op_svm_vmexit(void)
639
{
640
    A0 = PARAM1 & PARAM2;
641
    vmexit(PARAMQ1, T1);
642
}
643

  
644
void OPPROTO op_geneflags(void)
645
{
646
    CC_SRC = cc_table[CC_OP].compute_all();
647
}
648

  
649
/* This pseudo-opcode checks for IO intercepts. */
650
#if !defined(CONFIG_USER_ONLY)
651
void OPPROTO op_svm_check_intercept_io(void)
652
{
653
    A0 = PARAM1 & PARAM2;
654
    /* PARAMQ1 = TYPE (0 = OUT, 1 = IN; 4 = STRING; 8 = REP)
655
       T0      = PORT
656
       T1      = next eip */
657
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), T1);
658
    /* ASIZE does not appear on real hw */
659
    svm_check_intercept_param(SVM_EXIT_IOIO,
660
                              (PARAMQ1 & ~SVM_IOIO_ASIZE_MASK) |
661
                              ((T0 & 0xffff) << 16));
662
}
663
#endif
664

  
665
#if !defined(CONFIG_USER_ONLY)
666
void OPPROTO op_movtl_T0_cr8(void)
667
{
668
    T0 = cpu_get_apic_tpr(env);
669
}
670
#endif
671

  
672
/* DR registers access */
673
void OPPROTO op_movl_drN_T0(void)
674
{
675
    helper_movl_drN_T0(PARAM1);
676
}
677

  
678
void OPPROTO op_lmsw_T0(void)
679
{
680
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
681
       if already set to one. */
682
    T0 = (env->cr[0] & ~0xe) | (T0 & 0xf);
683
    helper_movl_crN_T0(0);
684
}
685

  
686 590
void OPPROTO op_movl_T0_env(void)
687 591
{
688 592
    T0 = *(uint32_t *)((char *)env + PARAM1);
......
718 622
    *(target_ulong *)((char *)env + PARAM1) = T1;
719 623
}
720 624

  
721
void OPPROTO op_clts(void)
722
{
723
    env->cr[0] &= ~CR0_TS_MASK;
724
    env->hflags &= ~HF_TS_MASK;
725
}
726

  
727 625
/* flags handling */
728 626

  
729 627
void OPPROTO op_jmp_label(void)
......
1028 926
    T0 = 0;
1029 927
}
1030 928

  
1031
/* threading support */
1032
void OPPROTO op_lock(void)
1033
{
1034
    cpu_lock();
1035
}
1036

  
1037
void OPPROTO op_unlock(void)
1038
{
1039
    cpu_unlock();
1040
}
1041

  
1042 929
/* SSE support */
1043 930
void OPPROTO op_com_dummy(void)
1044 931
{
b/target-i386/ops_sse.h
471 471
#endif
472 472
}
473 473

  
474
void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s)
474
void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s, target_ulong a0)
475 475
{
476 476
    int i;
477 477
    for(i = 0; i < (8 << SHIFT); i++) {
478 478
        if (s->B(i) & 0x80)
479
            stb(A0 + i, d->B(i));
479
            stb(a0 + i, d->B(i));
480 480
    }
481 481
    FORCE_RET();
482 482
}
b/target-i386/ops_sse_header.h
104 104
void glue(helper_pmaddwd, SUFFIX) (Reg *d, Reg *s);
105 105

  
106 106
void glue(helper_psadbw, SUFFIX) (Reg *d, Reg *s);
107
void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s);
107
void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s, target_ulong a0);
108 108
void glue(helper_movl_mm_T0, SUFFIX) (Reg *d, uint32_t val);
109 109
#ifdef TARGET_X86_64
110 110
void glue(helper_movq_mm_T0, SUFFIX) (Reg *d, uint64_t val);
b/target-i386/ops_template.h
554 554
    T0 = DF << SHIFT;
555 555
}
556 556

  
557
/* port I/O */
558
#if DATA_BITS <= 32
559
void OPPROTO glue(glue(op_out, SUFFIX), _T0_T1)(void)
560
{
561
    glue(cpu_out, SUFFIX)(env, T0, T1 & DATA_MASK);
562
}
563

  
564
void OPPROTO glue(glue(op_in, SUFFIX), _T0_T1)(void)
565
{
566
    T1 = glue(cpu_in, SUFFIX)(env, T0);
567
}
568

  
569
void OPPROTO glue(glue(op_in, SUFFIX), _DX_T0)(void)
570
{
571
    T0 = glue(cpu_in, SUFFIX)(env, EDX & 0xffff);
572
}
573

  
574
void OPPROTO glue(glue(op_out, SUFFIX), _DX_T0)(void)
575
{
576
    glue(cpu_out, SUFFIX)(env, EDX & 0xffff, T0);
577
}
578

  
579
void OPPROTO glue(glue(op_check_io, SUFFIX), _T0)(void)
580
{
581
    glue(glue(check_io, SUFFIX), _T0)();
582
}
583

  
584
void OPPROTO glue(glue(op_check_io, SUFFIX), _DX)(void)
585
{
586
    glue(glue(check_io, SUFFIX), _DX)();
587
}
588
#endif
589

  
590 557
#undef DATA_BITS
591 558
#undef SHIFT_MASK
592 559
#undef SHIFT1_MASK
b/target-i386/svm.h
71 71
	uint32_t int_vector;
72 72
	uint32_t int_state;
73 73
	uint8_t reserved_3[4];
74
	uint32_t exit_code;
75
	uint32_t exit_code_hi;
74
	uint64_t exit_code;
76 75
	uint64_t exit_info_1;
77 76
	uint64_t exit_info_2;
78 77
	uint32_t exit_int_info;
......
323 322

  
324 323
/* function references */
325 324

  
326
void helper_stgi(void);
327
void vmexit(uint64_t exit_code, uint64_t exit_info_1);
328
int svm_check_intercept_param(uint32_t type, uint64_t param);
329
static inline int svm_check_intercept(unsigned int type) {
330
    return svm_check_intercept_param(type, 0);
331
}
332

  
333

  
334 325
#define INTERCEPTED(mask) (env->intercept & mask)
335 326
#define INTERCEPTEDw(var, mask) (env->intercept ## var & mask)
336 327
#define INTERCEPTEDl(var, mask) (env->intercept ## var & mask)
b/target-i386/translate.c
60 60
/* global register indexes */
61 61
static TCGv cpu_env, cpu_T[2], cpu_A0;
62 62
/* local register indexes (only used inside old micro ops) */
63
static TCGv cpu_tmp0, cpu_tmp1, cpu_tmp2, cpu_ptr0, cpu_ptr1;
63
static TCGv cpu_tmp0, cpu_tmp1, cpu_tmp2, cpu_tmp3, cpu_ptr0, cpu_ptr1;
64 64

  
65 65
#ifdef TARGET_X86_64
66 66
static int x86_64_hregs;
......
903 903
    },
904 904
};
905 905

  
906
static GenOpFunc *gen_op_in_DX_T0[3] = {
907
    gen_op_inb_DX_T0,
908
    gen_op_inw_DX_T0,
909
    gen_op_inl_DX_T0,
906
static void *helper_in_func[3] = {
907
    helper_inb,
908
    helper_inw,
909
    helper_inl,
910 910
};
911 911

  
912
static GenOpFunc *gen_op_out_DX_T0[3] = {
913
    gen_op_outb_DX_T0,
914
    gen_op_outw_DX_T0,
915
    gen_op_outl_DX_T0,
912
static void *helper_out_func[3] = {
913
    helper_outb,
914
    helper_outw,
915
    helper_outl,
916 916
};
917 917

  
918
static GenOpFunc *gen_op_in[3] = {
919
    gen_op_inb_T0_T1,
920
    gen_op_inw_T0_T1,
921
    gen_op_inl_T0_T1,
918
static void *gen_check_io_func[3] = {
919
    helper_check_iob,
920
    helper_check_iow,
921
    helper_check_iol,
922 922
};
923 923

  
924
static GenOpFunc *gen_op_out[3] = {
925
    gen_op_outb_T0_T1,
926
    gen_op_outw_T0_T1,
927
    gen_op_outl_T0_T1,
928
};
929

  
930
static GenOpFunc *gen_check_io_T0[3] = {
931
    gen_op_check_iob_T0,
932
    gen_op_check_iow_T0,
933
    gen_op_check_iol_T0,
934
};
935

  
936
static GenOpFunc *gen_check_io_DX[3] = {
937
    gen_op_check_iob_DX,
938
    gen_op_check_iow_DX,
939
    gen_op_check_iol_DX,
940
};
941

  
942
static void gen_check_io(DisasContext *s, int ot, int use_dx, target_ulong cur_eip)
924
static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
925
                         uint32_t svm_flags)
943 926
{
927
    int state_saved;
928
    target_ulong next_eip;
929

  
930
    state_saved = 0;
944 931
    if (s->pe && (s->cpl > s->iopl || s->vm86)) {
945 932
        if (s->cc_op != CC_OP_DYNAMIC)
946 933
            gen_op_set_cc_op(s->cc_op);
947 934
        gen_jmp_im(cur_eip);
948
        if (use_dx)
949
            gen_check_io_DX[ot]();
950
        else
951
            gen_check_io_T0[ot]();
935
        state_saved = 1;
936
        tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[0]);
937
        tcg_gen_helper_0_1(gen_check_io_func[ot],
938
                           cpu_tmp2);
939
    }
940
    if(s->flags & (1ULL << INTERCEPT_IOIO_PROT)) {
941
        if (!state_saved) {
942
            if (s->cc_op != CC_OP_DYNAMIC)
943
                gen_op_set_cc_op(s->cc_op);
944
            gen_jmp_im(cur_eip);
945
            state_saved = 1;
946
        }
947
        svm_flags |= (1 << (4 + ot));
948
        next_eip = s->pc - s->cs_base;
949
        tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[0]);
950
        tcg_gen_helper_0_3(helper_svm_check_io,
951
                           cpu_tmp2,
952
                           tcg_const_i32(svm_flags),
953
                           tcg_const_i32(next_eip - cur_eip));
952 954
    }
953 955
}
954 956

  
......
1080 1082
    gen_string_movl_A0_EDI(s);
1081 1083
    gen_op_movl_T0_0();
1082 1084
    gen_op_st_T0_A0(ot + s->mem_index);
1083
    gen_op_in_DX_T0[ot]();
1085
    gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1086
    tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[1]);
1087
    tcg_gen_andi_i32(cpu_tmp2, cpu_tmp2, 0xffff);
1088
    tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2);
1084 1089
    gen_op_st_T0_A0(ot + s->mem_index);
1085 1090
    gen_op_movl_T0_Dshift[ot]();
1086 1091
#ifdef TARGET_X86_64
......
1099 1104
{
1100 1105
    gen_string_movl_A0_ESI(s);
1101 1106
    gen_op_ld_T0_A0(ot + s->mem_index);
1102
    gen_op_out_DX_T0[ot]();
1107

  
1108
    gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1109
    tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[1]);
1110
    tcg_gen_andi_i32(cpu_tmp2, cpu_tmp2, 0xffff);
1111
    tcg_gen_trunc_tl_i32(cpu_tmp3, cpu_T[0]);
1112
    tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2, cpu_tmp3);
1113

  
1103 1114
    gen_op_movl_T0_Dshift[ot]();
1104 1115
#ifdef TARGET_X86_64
1105 1116
    if (s->aflag == 2) {
......
1976 1987
        if (s->cc_op != CC_OP_DYNAMIC)
1977 1988
            gen_op_set_cc_op(s->cc_op);
1978 1989
        gen_jmp_im(cur_eip);
1979
        gen_op_movl_seg_T0(seg_reg);
1990
        tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[0]);
1991
        tcg_gen_helper_0_2(helper_load_seg, tcg_const_i32(seg_reg), cpu_tmp2);
1980 1992
        /* abort translation because the addseg value may change or
1981 1993
           because ss32 may change. For R_SS, translation must always
1982 1994
           stop as a special handling must be done to disable hardware
......
1990 2002
    }
1991 2003
}
1992 2004

  
1993
#define SVM_movq_T1_im(x) gen_movtl_T1_im(x)
1994

  
1995
static inline int
1996
gen_svm_check_io(DisasContext *s, target_ulong pc_start, uint64_t type)
1997
{
1998
#if !defined(CONFIG_USER_ONLY)
1999
    if(s->flags & (1ULL << INTERCEPT_IOIO_PROT)) {
2000
        if (s->cc_op != CC_OP_DYNAMIC)
2001
            gen_op_set_cc_op(s->cc_op);
2002
        SVM_movq_T1_im(s->pc - s->cs_base);
2003
        gen_jmp_im(pc_start - s->cs_base);
2004
        gen_op_geneflags();
2005
        gen_op_svm_check_intercept_io((uint32_t)(type >> 32), (uint32_t)type);
2006
        s->cc_op = CC_OP_DYNAMIC;
2007
        /* FIXME: maybe we could move the io intercept vector to the TB as well
2008
                  so we know if this is an EOB or not ... let's assume it's not
2009
                  for now. */
2010
    }
2011
#endif
2012
    return 0;
2013
}
2014

  
2015 2005
static inline int svm_is_rep(int prefixes)
2016 2006
{
2017 2007
    return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
......
2019 2009

  
2020 2010
static inline int
2021 2011
gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2022
                              uint64_t type, uint64_t param)
2012
                              uint32_t type, uint64_t param)
2023 2013
{
2024 2014
    if(!(s->flags & (INTERCEPT_SVM_MASK)))
2025 2015
	/* no SVM activated */
......
2029 2019
        case SVM_EXIT_READ_CR0 ... SVM_EXIT_EXCP_BASE - 1:
2030 2020
            if (s->cc_op != CC_OP_DYNAMIC) {
2031 2021
                gen_op_set_cc_op(s->cc_op);
2032
                s->cc_op = CC_OP_DYNAMIC;
2033 2022
            }
2034 2023
            gen_jmp_im(pc_start - s->cs_base);
2035
            SVM_movq_T1_im(param);
2036
            gen_op_geneflags();
2037
            gen_op_svm_check_intercept_param((uint32_t)(type >> 32), (uint32_t)type);
2024
            tcg_gen_helper_0_2(helper_svm_check_intercept_param, 
2025
                               tcg_const_i32(type), tcg_const_i64(param));
2038 2026
            /* this is a special case as we do not know if the interception occurs
2039 2027
               so we assume there was none */
2040 2028
            return 0;
......
2042 2030
            if(s->flags & (1ULL << INTERCEPT_MSR_PROT)) {
2043 2031
                if (s->cc_op != CC_OP_DYNAMIC) {
2044 2032
                    gen_op_set_cc_op(s->cc_op);
2045
                    s->cc_op = CC_OP_DYNAMIC;
2046 2033
                }
2047 2034
                gen_jmp_im(pc_start - s->cs_base);
2048
                SVM_movq_T1_im(param);
2049
                gen_op_geneflags();
2050
                gen_op_svm_check_intercept_param((uint32_t)(type >> 32), (uint32_t)type);
2035
                tcg_gen_helper_0_2(helper_svm_check_intercept_param,
2036
                                   tcg_const_i32(type), tcg_const_i64(param));
2051 2037
                /* this is a special case as we do not know if the interception occurs
2052 2038
                   so we assume there was none */
2053 2039
                return 0;
......
2057 2043
            if(s->flags & (1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR))) {
2058 2044
                if (s->cc_op != CC_OP_DYNAMIC) {
2059 2045
                    gen_op_set_cc_op(s->cc_op);
2060
		    s->cc_op = CC_OP_EFLAGS;
2061 2046
                }
2062 2047
                gen_jmp_im(pc_start - s->cs_base);
2063
                SVM_movq_T1_im(param);
2064
                gen_op_geneflags();
2065
                gen_op_svm_vmexit(type >> 32, type);
2048
                tcg_gen_helper_0_2(helper_vmexit,
2049
                                   tcg_const_i32(type), tcg_const_i64(param));
2066 2050
                /* we can optimize this one so TBs don't get longer
2067 2051
                   than up to vmexit */
2068 2052
                gen_eob(s);
......
2276 2260
        gen_op_st_T0_A0(ot + s->mem_index);
2277 2261
        if (level) {
2278 2262
            /* XXX: must save state */
2279
            tcg_gen_helper_0_2(helper_enter64_level,
2263
            tcg_gen_helper_0_3(helper_enter64_level,
2280 2264
                               tcg_const_i32(level),
2281
                               tcg_const_i32((ot == OT_QUAD)));
2265
                               tcg_const_i32((ot == OT_QUAD)),
2266
                               cpu_T[1]);
2282 2267
        }
2283 2268
        gen_op_mov_reg_T1(ot, R_EBP);
2284 2269
        gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
......
2301 2286
        gen_op_st_T0_A0(ot + s->mem_index);
2302 2287
        if (level) {
2303 2288
            /* XXX: must save state */
2304
            tcg_gen_helper_0_2(helper_enter_level,
2289
            tcg_gen_helper_0_3(helper_enter_level,
2305 2290
                               tcg_const_i32(level),
2306
                               tcg_const_i32(s->dflag));
2291
                               tcg_const_i32(s->dflag),
2292
                               cpu_T[1]);
2307 2293
        }
2308 2294
        gen_op_mov_reg_T1(ot, R_EBP);
2309 2295
        gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
......
3208 3194
    } else {
3209 3195
        /* generic MMX or SSE operation */
3210 3196
        switch(b) {
3211
        case 0xf7:
3212
            /* maskmov : we must prepare A0 */
3213
            if (mod != 3)
3214
                goto illegal_op;
3215
#ifdef TARGET_X86_64
3216
            if (s->aflag == 2) {
3217
                gen_op_movq_A0_reg(R_EDI);
3218
            } else
3219
#endif
3220
            {
3221
                gen_op_movl_A0_reg(R_EDI);
3222
                if (s->aflag == 0)
3223
                    gen_op_andl_A0_ffff();
3224
            }
3225
            gen_add_A0_ds_seg(s);
3226
            break;
3227 3197
        case 0x70: /* pshufx insn */
3228 3198
        case 0xc6: /* pshufx insn */
3229 3199
        case 0xc2: /* compare insns */
......
3295 3265
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3296 3266
            tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1);
3297 3267
            break;
3268
        case 0xf7:
3269
            /* maskmov : we must prepare A0 */
3270
            if (mod != 3)
3271
                goto illegal_op;
3272
#ifdef TARGET_X86_64
3273
            if (s->aflag == 2) {
3274
                gen_op_movq_A0_reg(R_EDI);
3275
            } else
3276
#endif
3277
            {
3278
                gen_op_movl_A0_reg(R_EDI);
3279
                if (s->aflag == 0)
3280
                    gen_op_andl_A0_ffff();
3281
            }
3282
            gen_add_A0_ds_seg(s);
3283

  
3284
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3285
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3286
            tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, cpu_A0);
3287
            break;
3298 3288
        default:
3299 3289
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3300 3290
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
......
3440 3430

  
3441 3431
    /* lock generation */
3442 3432
    if (prefixes & PREFIX_LOCK)
3443
        gen_op_lock();
3433
        tcg_gen_helper_0_0(helper_lock);
3444 3434

  
3445 3435
    /* now check op code */
3446 3436
 reswitch:
......
3783 3773
                if (s->cc_op != CC_OP_DYNAMIC)
3784 3774
                    gen_op_set_cc_op(s->cc_op);
3785 3775
                gen_jmp_im(pc_start - s->cs_base);
3786
                gen_op_lcall_protected_T0_T1(dflag, s->pc - pc_start);
3776
                tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[0]);
3777
                tcg_gen_helper_0_4(helper_lcall_protected,
3778
                                   cpu_tmp2, cpu_T[1],
3779
                                   tcg_const_i32(dflag), 
3780
                                   tcg_const_i32(s->pc - pc_start));
3787 3781
            } else {
3788
                gen_op_lcall_real_T0_T1(dflag, s->pc - s->cs_base);
3782
                tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[0]);
3783
                tcg_gen_helper_0_4(helper_lcall_real,
3784
                                   cpu_tmp2, cpu_T[1],
3785
                                   tcg_const_i32(dflag), 
3786
                                   tcg_const_i32(s->pc - s->cs_base));
3789 3787
            }
3790 3788
            gen_eob(s);
3791 3789
            break;
......
3804 3802
                if (s->cc_op != CC_OP_DYNAMIC)
3805 3803
                    gen_op_set_cc_op(s->cc_op);
3806 3804
                gen_jmp_im(pc_start - s->cs_base);
3807
                gen_op_ljmp_protected_T0_T1(s->pc - pc_start);
3805
                tcg_gen_trunc_tl_i32(cpu_tmp2, cpu_T[0]);
3806
                tcg_gen_helper_0_3(helper_ljmp_protected,
3807
                                   cpu_tmp2,
3808
                                   cpu_T[1],
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff