Revision 77b2bc2c

b/cpu-exec.c
289 289
#endif
290 290
#if defined(TARGET_I386)
291 291
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
292
                            svm_check_intercept(env, SVM_EXIT_INIT);
292
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
293
                                                          0);
293 294
                            do_cpu_init(x86_env_get_cpu(env));
294 295
                            env->exception_index = EXCP_HALTED;
295 296
                            cpu_loop_exit(env);
......
298 299
                    } else if (env->hflags2 & HF2_GIF_MASK) {
299 300
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
300 301
                            !(env->hflags & HF_SMM_MASK)) {
301
                            svm_check_intercept(env, SVM_EXIT_SMI);
302
                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
303
                                                          0);
302 304
                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
303 305
                            do_smm_enter(env);
304 306
                            next_tb = 0;
......
319 321
                                     (env->eflags & IF_MASK && 
320 322
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
321 323
                            int intno;
322
                            svm_check_intercept(env, SVM_EXIT_INTR);
324
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
325
                                                          0);
323 326
                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
324 327
                            intno = cpu_get_pic_interrupt(env);
325 328
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
......
333 336
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
334 337
                            int intno;
335 338
                            /* FIXME: this should respect TPR */
336
                            svm_check_intercept(env, SVM_EXIT_VINTR);
339
                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
340
                                                          0);
337 341
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
338 342
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
339 343
                            do_interrupt_x86_hardirq(env, intno, 1);
b/target-i386/cpu.h
1074 1074
/* op_helper.c */
1075 1075
void do_interrupt(CPUX86State *env);
1076 1076
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
1077
void QEMU_NORETURN raise_exception_env(int exception_index, CPUX86State *nenv);
1078
void QEMU_NORETURN raise_exception_err_env(CPUX86State *nenv, int exception_index,
1079
                                           int error_code);
1077
void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
1078
void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
1079
                                       int error_code);
1080 1080

  
1081 1081
void do_smm_enter(CPUX86State *env1);
1082 1082

  
1083
void svm_check_intercept(CPUX86State *env1, uint32_t type);
1083
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
1084
                                   uint64_t param);
1085
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
1084 1086

  
1085 1087
uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
1086 1088

  
b/target-i386/helper.c
951 951
        if (env->watchpoint_hit->flags & BP_CPU) {
952 952
            env->watchpoint_hit = NULL;
953 953
            if (check_hw_breakpoints(env, 0))
954
                raise_exception_env(EXCP01_DB, env);
954
                raise_exception(env, EXCP01_DB);
955 955
            else
956 956
                cpu_resume_from_signal(env, NULL);
957 957
        }
......
960 960
            if (bp->pc == env->eip) {
961 961
                if (bp->flags & BP_CPU) {
962 962
                    check_hw_breakpoints(env, 1);
963
                    raise_exception_env(EXCP01_DB, env);
963
                    raise_exception(env, EXCP01_DB);
964 964
                }
965 965
                break;
966 966
            }
b/target-i386/helper.h
63 63
DEF_HELPER_1(mwait, void, int)
64 64
DEF_HELPER_0(debug, void)
65 65
DEF_HELPER_0(reset_rf, void)
66
DEF_HELPER_2(raise_interrupt, void, int, int)
67
DEF_HELPER_1(raise_exception, void, int)
66
DEF_HELPER_3(raise_interrupt, void, env, int, int)
67
DEF_HELPER_2(raise_exception, void, env, int)
68 68
DEF_HELPER_0(cli, void)
69 69
DEF_HELPER_0(sti, void)
70 70
DEF_HELPER_0(set_inhibit_irq, void)
b/target-i386/op_helper.c
139 139
}
140 140

  
141 141
#if 0
142
#define raise_exception_err(a, b)                                       \
142
#define raise_exception_err(env, a, b)                                  \
143 143
    do {                                                                \
144 144
        qemu_log("raise_exception line=%d\n", __LINE__);                \
145
        (raise_exception_err)(a, b);                                    \
145
        (raise_exception_err)(env, a, b);                               \
146 146
    } while (0)
147 147
#endif
148 148

  
149
static void QEMU_NORETURN raise_exception_err(int exception_index,
150
                                              int error_code);
151

  
152 149
static const uint8_t parity_table[256] = {
153 150
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
154 151
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
......
317 314
    shift = type >> 3;
318 315
    index = (dpl * 4 + 2) << shift;
319 316
    if (index + (4 << shift) - 1 > env->tr.limit) {
320
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
317
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
321 318
    }
322 319
    if (shift == 0) {
323 320
        *esp_ptr = lduw_kernel(env->tr.base + index);
......
336 333

  
337 334
    if ((selector & 0xfffc) != 0) {
338 335
        if (load_segment(&e1, &e2, selector) != 0) {
339
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
340 337
        }
341 338
        if (!(e2 & DESC_S_MASK)) {
342
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
339
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
343 340
        }
344 341
        rpl = selector & 3;
345 342
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
346 343
        cpl = env->hflags & HF_CPL_MASK;
347 344
        if (seg_reg == R_CS) {
348 345
            if (!(e2 & DESC_CS_MASK)) {
349
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
350 347
            }
351 348
            /* XXX: is it correct? */
352 349
            if (dpl != rpl) {
353
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
354 351
            }
355 352
            if ((e2 & DESC_C_MASK) && dpl > rpl) {
356
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
357 354
            }
358 355
        } else if (seg_reg == R_SS) {
359 356
            /* SS must be writable data */
360 357
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
361
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
362 359
            }
363 360
            if (dpl != cpl || dpl != rpl) {
364
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
365 362
            }
366 363
        } else {
367 364
            /* not readable code */
368 365
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
369
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
370 367
            }
371 368
            /* if data or non conforming code, checks the rights */
372 369
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 370
                if (dpl < cpl || dpl < rpl) {
374
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371
                    raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
375 372
                }
376 373
            }
377 374
        }
378 375
        if (!(e2 & DESC_P_MASK)) {
379
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
376
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
380 377
        }
381 378
        cpu_x86_load_seg_cache(env, seg_reg, selector,
382 379
                               get_seg_base(e1, e2),
......
384 381
                               e2);
385 382
    } else {
386 383
        if (seg_reg == R_SS || seg_reg == R_CS) {
387
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
384
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
388 385
        }
389 386
    }
390 387
}
......
414 411
    /* if task gate, we read the TSS segment and we load it */
415 412
    if (type == 5) {
416 413
        if (!(e2 & DESC_P_MASK)) {
417
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
414
            raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
418 415
        }
419 416
        tss_selector = e1 >> 16;
420 417
        if (tss_selector & 4) {
421
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
422 419
        }
423 420
        if (load_segment(&e1, &e2, tss_selector) != 0) {
424
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
421
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
425 422
        }
426 423
        if (e2 & DESC_S_MASK) {
427
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
424
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
428 425
        }
429 426
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
430 427
        if ((type & 7) != 1) {
431
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
428
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
432 429
        }
433 430
    }
434 431

  
435 432
    if (!(e2 & DESC_P_MASK)) {
436
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
433
        raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
437 434
    }
438 435

  
439 436
    if (type & 8) {
......
445 442
    tss_base = get_seg_base(e1, e2);
446 443
    if ((tss_selector & 4) != 0 ||
447 444
        tss_limit < tss_limit_max) {
448
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
445
        raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
449 446
    }
450 447
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
451 448
    if (old_type & 8) {
......
619 616

  
620 617
    /* load the LDT */
621 618
    if (new_ldt & 4) {
622
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
619
        raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
623 620
    }
624 621

  
625 622
    if ((new_ldt & 0xfffc) != 0) {
626 623
        dt = &env->gdt;
627 624
        index = new_ldt & ~7;
628 625
        if ((index + 7) > dt->limit) {
629
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
626
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
630 627
        }
631 628
        ptr = dt->base + index;
632 629
        e1 = ldl_kernel(ptr);
633 630
        e2 = ldl_kernel(ptr + 4);
634 631
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
635
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
632
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
636 633
        }
637 634
        if (!(e2 & DESC_P_MASK)) {
638
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
635
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
639 636
        }
640 637
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
641 638
    }
......
653 650
    /* check that EIP is in the CS segment limits */
654 651
    if (new_eip > env->segs[R_CS].limit) {
655 652
        /* XXX: different exception if CALL? */
656
        raise_exception_err(EXCP0D_GPF, 0);
653
        raise_exception_err(env, EXCP0D_GPF, 0);
657 654
    }
658 655

  
659 656
#ifndef CONFIG_USER_ONLY
......
692 689
    /* all bits must be zero to allow the I/O */
693 690
    if ((val & mask) != 0) {
694 691
    fail:
695
        raise_exception_err(EXCP0D_GPF, 0);
692
        raise_exception_err(env, EXCP0D_GPF, 0);
696 693
    }
697 694
}
698 695

  
......
835 832

  
836 833
    dt = &env->idt;
837 834
    if (intno * 8 + 7 > dt->limit) {
838
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
835
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
839 836
    }
840 837
    ptr = dt->base + intno * 8;
841 838
    e1 = ldl_kernel(ptr);
......
846 843
    case 5: /* task gate */
847 844
        /* must do that check here to return the correct error code */
848 845
        if (!(e2 & DESC_P_MASK)) {
849
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
846
            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
850 847
        }
851 848
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
852 849
        if (has_error_code) {
......
877 874
    case 15: /* 386 trap gate */
878 875
        break;
879 876
    default:
880
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
877
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
881 878
        break;
882 879
    }
883 880
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
884 881
    cpl = env->hflags & HF_CPL_MASK;
885 882
    /* check privilege if software int */
886 883
    if (is_int && dpl < cpl) {
887
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
884
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
888 885
    }
889 886
    /* check valid bit */
890 887
    if (!(e2 & DESC_P_MASK)) {
891
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
888
        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
892 889
    }
893 890
    selector = e1 >> 16;
894 891
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
895 892
    if ((selector & 0xfffc) == 0) {
896
        raise_exception_err(EXCP0D_GPF, 0);
893
        raise_exception_err(env, EXCP0D_GPF, 0);
897 894
    }
898 895
    if (load_segment(&e1, &e2, selector) != 0) {
899
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
896
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
900 897
    }
901 898
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
902
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
899
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
903 900
    }
904 901
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
905 902
    if (dpl > cpl) {
906
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
903
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
907 904
    }
908 905
    if (!(e2 & DESC_P_MASK)) {
909
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
906
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
910 907
    }
911 908
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
912 909
        /* to inner privilege */
913 910
        get_ss_esp_from_tss(&ss, &esp, dpl);
914 911
        if ((ss & 0xfffc) == 0) {
915
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
912
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
916 913
        }
917 914
        if ((ss & 3) != dpl) {
918
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
915
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
919 916
        }
920 917
        if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
921
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
922 919
        }
923 920
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
924 921
        if (ss_dpl != dpl) {
925
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
922
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
926 923
        }
927 924
        if (!(ss_e2 & DESC_S_MASK) ||
928 925
            (ss_e2 & DESC_CS_MASK) ||
929 926
            !(ss_e2 & DESC_W_MASK)) {
930
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
927
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
931 928
        }
932 929
        if (!(ss_e2 & DESC_P_MASK)) {
933
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
934 931
        }
935 932
        new_stack = 1;
936 933
        sp_mask = get_sp_mask(ss_e2);
......
938 935
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
939 936
        /* to same privilege */
940 937
        if (env->eflags & VM_MASK) {
941
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
942 939
        }
943 940
        new_stack = 0;
944 941
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
......
946 943
        esp = ESP;
947 944
        dpl = cpl;
948 945
    } else {
949
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
950 947
        new_stack = 0; /* avoid warning */
951 948
        sp_mask = 0; /* avoid warning */
952 949
        ssp = 0; /* avoid warning */
......
1055 1052
    }
1056 1053
    index = 8 * level + 4;
1057 1054
    if ((index + 7) > env->tr.limit) {
1058
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1055
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
1059 1056
    }
1060 1057
    return ldq_kernel(env->tr.base + index);
1061 1058
}
......
1083 1080

  
1084 1081
    dt = &env->idt;
1085 1082
    if (intno * 16 + 15 > dt->limit) {
1086
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1083
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1087 1084
    }
1088 1085
    ptr = dt->base + intno * 16;
1089 1086
    e1 = ldl_kernel(ptr);
......
1096 1093
    case 15: /* 386 trap gate */
1097 1094
        break;
1098 1095
    default:
1099
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1096
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1100 1097
        break;
1101 1098
    }
1102 1099
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1103 1100
    cpl = env->hflags & HF_CPL_MASK;
1104 1101
    /* check privilege if software int */
1105 1102
    if (is_int && dpl < cpl) {
1106
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1103
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1107 1104
    }
1108 1105
    /* check valid bit */
1109 1106
    if (!(e2 & DESC_P_MASK)) {
1110
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1107
        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
1111 1108
    }
1112 1109
    selector = e1 >> 16;
1113 1110
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1114 1111
    ist = e2 & 7;
1115 1112
    if ((selector & 0xfffc) == 0) {
1116
        raise_exception_err(EXCP0D_GPF, 0);
1113
        raise_exception_err(env, EXCP0D_GPF, 0);
1117 1114
    }
1118 1115

  
1119 1116
    if (load_segment(&e1, &e2, selector) != 0) {
1120
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1117
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1121 1118
    }
1122 1119
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1123
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1120
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1124 1121
    }
1125 1122
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1126 1123
    if (dpl > cpl) {
1127
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1124
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1128 1125
    }
1129 1126
    if (!(e2 & DESC_P_MASK)) {
1130
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1127
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1131 1128
    }
1132 1129
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1133
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1130
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1134 1131
    }
1135 1132
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1136 1133
        /* to inner privilege */
......
1145 1142
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1146 1143
        /* to same privilege */
1147 1144
        if (env->eflags & VM_MASK) {
1148
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1145
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1149 1146
        }
1150 1147
        new_stack = 0;
1151 1148
        if (ist != 0) {
......
1156 1153
        esp &= ~0xfLL; /* align stack */
1157 1154
        dpl = cpl;
1158 1155
    } else {
1159
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1156
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1160 1157
        new_stack = 0; /* avoid warning */
1161 1158
        esp = 0; /* avoid warning */
1162 1159
    }
......
1206 1203
    int selector;
1207 1204

  
1208 1205
    if (!(env->efer & MSR_EFER_SCE)) {
1209
        raise_exception_err(EXCP06_ILLOP, 0);
1206
        raise_exception_err(env, EXCP06_ILLOP, 0);
1210 1207
    }
1211 1208
    selector = (env->star >> 32) & 0xffff;
1212 1209
    if (env->hflags & HF_LMA_MASK) {
......
1263 1260
    int cpl, selector;
1264 1261

  
1265 1262
    if (!(env->efer & MSR_EFER_SCE)) {
1266
        raise_exception_err(EXCP06_ILLOP, 0);
1263
        raise_exception_err(env, EXCP06_ILLOP, 0);
1267 1264
    }
1268 1265
    cpl = env->hflags & HF_CPL_MASK;
1269 1266
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1270
        raise_exception_err(EXCP0D_GPF, 0);
1267
        raise_exception_err(env, EXCP0D_GPF, 0);
1271 1268
    }
1272 1269
    selector = (env->star >> 48) & 0xffff;
1273 1270
    if (env->hflags & HF_LMA_MASK) {
......
1326 1323
    /* real mode (simpler!) */
1327 1324
    dt = &env->idt;
1328 1325
    if (intno * 4 + 3 > dt->limit) {
1329
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1326
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1330 1327
    }
1331 1328
    ptr = dt->base + intno * 4;
1332 1329
    offset = lduw_kernel(ptr);
......
1375 1372
    cpl = env->hflags & HF_CPL_MASK;
1376 1373
    /* check privilege if software int */
1377 1374
    if (is_int && dpl < cpl) {
1378
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1375
        raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1379 1376
    }
1380 1377

  
1381 1378
    /* Since we emulate only user space, we cannot do more than
......
1540 1537
 * needed. It should only be called, if this is not an interrupt.
1541 1538
 * Returns the new exception number.
1542 1539
 */
1543
static int check_exception(int intno, int *error_code)
1540
static int check_exception(CPUX86State *env, int intno, int *error_code)
1544 1541
{
1545 1542
    int first_contributory = env->old_exception == 0 ||
1546 1543
                              (env->old_exception >= 10 &&
......
1554 1551
#if !defined(CONFIG_USER_ONLY)
1555 1552
    if (env->old_exception == EXCP08_DBLE) {
1556 1553
        if (env->hflags & HF_SVMI_MASK) {
1557
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1554
            cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */
1558 1555
        }
1559 1556

  
1560 1557
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
......
1585 1582
 * EIP value AFTER the interrupt instruction. It is only relevant if
1586 1583
 * is_int is TRUE.
1587 1584
 */
1588
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1589
                                          int next_eip_addend)
1585
static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
1586
                                           int is_int, int error_code,
1587
                                           int next_eip_addend)
1590 1588
{
1591 1589
    if (!is_int) {
1592
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno,
1593
                                         error_code);
1594
        intno = check_exception(intno, &error_code);
1590
        cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
1591
                                      error_code);
1592
        intno = check_exception(env, intno, &error_code);
1595 1593
    } else {
1596
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1594
        cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
1597 1595
    }
1598 1596

  
1599 1597
    env->exception_index = intno;
......
1605 1603

  
1606 1604
/* shortcuts to generate exceptions */
1607 1605

  
1608
static void QEMU_NORETURN raise_exception_err(int exception_index,
1609
                                              int error_code)
1610
{
1611
    raise_interrupt(exception_index, 0, error_code, 0);
1612
}
1613

  
1614
void raise_exception_err_env(CPUX86State *nenv, int exception_index,
1615
                             int error_code)
1606
static void QEMU_NORETURN raise_interrupt(CPUX86State *nenv,
1607
                                          int intno, int is_int,
1608
                                          int error_code,
1609
                                          int next_eip_addend)
1616 1610
{
1617 1611
    env = nenv;
1618
    raise_interrupt(exception_index, 0, error_code, 0);
1612
    raise_interrupt2(env, intno, is_int, error_code, next_eip_addend);
1619 1613
}
1620 1614

  
1621
static void QEMU_NORETURN raise_exception(int exception_index)
1615
void raise_exception_err(CPUX86State *nenv, int exception_index,
1616
                         int error_code)
1622 1617
{
1623
    raise_interrupt(exception_index, 0, 0, 0);
1618
    env = nenv;
1619
    raise_interrupt2(env, exception_index, 0, error_code, 0);
1624 1620
}
1625 1621

  
1626
void raise_exception_env(int exception_index, CPUX86State *nenv)
1622
void raise_exception(CPUX86State *nenv, int exception_index)
1627 1623
{
1628 1624
    env = nenv;
1629
    raise_exception(exception_index);
1625
    raise_interrupt2(env, exception_index, 0, 0, 0);
1630 1626
}
1631 1627
/* SMM support */
1632 1628

  
......
1922 1918
    num = (EAX & 0xffff);
1923 1919
    den = (t0 & 0xff);
1924 1920
    if (den == 0) {
1925
        raise_exception(EXCP00_DIVZ);
1921
        raise_exception(env, EXCP00_DIVZ);
1926 1922
    }
1927 1923
    q = (num / den);
1928 1924
    if (q > 0xff) {
1929
        raise_exception(EXCP00_DIVZ);
1925
        raise_exception(env, EXCP00_DIVZ);
1930 1926
    }
1931 1927
    q &= 0xff;
1932 1928
    r = (num % den) & 0xff;
......
1940 1936
    num = (int16_t)EAX;
1941 1937
    den = (int8_t)t0;
1942 1938
    if (den == 0) {
1943
        raise_exception(EXCP00_DIVZ);
1939
        raise_exception(env, EXCP00_DIVZ);
1944 1940
    }
1945 1941
    q = (num / den);
1946 1942
    if (q != (int8_t)q) {
1947
        raise_exception(EXCP00_DIVZ);
1943
        raise_exception(env, EXCP00_DIVZ);
1948 1944
    }
1949 1945
    q &= 0xff;
1950 1946
    r = (num % den) & 0xff;
......
1958 1954
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1959 1955
    den = (t0 & 0xffff);
1960 1956
    if (den == 0) {
1961
        raise_exception(EXCP00_DIVZ);
1957
        raise_exception(env, EXCP00_DIVZ);
1962 1958
    }
1963 1959
    q = (num / den);
1964 1960
    if (q > 0xffff) {
1965
        raise_exception(EXCP00_DIVZ);
1961
        raise_exception(env, EXCP00_DIVZ);
1966 1962
    }
1967 1963
    q &= 0xffff;
1968 1964
    r = (num % den) & 0xffff;
......
1977 1973
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1978 1974
    den = (int16_t)t0;
1979 1975
    if (den == 0) {
1980
        raise_exception(EXCP00_DIVZ);
1976
        raise_exception(env, EXCP00_DIVZ);
1981 1977
    }
1982 1978
    q = (num / den);
1983 1979
    if (q != (int16_t)q) {
1984
        raise_exception(EXCP00_DIVZ);
1980
        raise_exception(env, EXCP00_DIVZ);
1985 1981
    }
1986 1982
    q &= 0xffff;
1987 1983
    r = (num % den) & 0xffff;
......
1997 1993
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1998 1994
    den = t0;
1999 1995
    if (den == 0) {
2000
        raise_exception(EXCP00_DIVZ);
1996
        raise_exception(env, EXCP00_DIVZ);
2001 1997
    }
2002 1998
    q = (num / den);
2003 1999
    r = (num % den);
2004 2000
    if (q > 0xffffffff) {
2005
        raise_exception(EXCP00_DIVZ);
2001
        raise_exception(env, EXCP00_DIVZ);
2006 2002
    }
2007 2003
    EAX = (uint32_t)q;
2008 2004
    EDX = (uint32_t)r;
......
2016 2012
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2017 2013
    den = t0;
2018 2014
    if (den == 0) {
2019
        raise_exception(EXCP00_DIVZ);
2015
        raise_exception(env, EXCP00_DIVZ);
2020 2016
    }
2021 2017
    q = (num / den);
2022 2018
    r = (num % den);
2023 2019
    if (q != (int32_t)q) {
2024
        raise_exception(EXCP00_DIVZ);
2020
        raise_exception(env, EXCP00_DIVZ);
2025 2021
    }
2026 2022
    EAX = (uint32_t)q;
2027 2023
    EDX = (uint32_t)r;
......
2164 2160

  
2165 2161
    eflags = helper_cc_compute_all(CC_OP);
2166 2162
    if (eflags & CC_O) {
2167
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2163
        raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
2168 2164
    }
2169 2165
}
2170 2166

  
......
2195 2191
    int eflags;
2196 2192

  
2197 2193
    if ((a0 & 0xf) != 0) {
2198
        raise_exception(EXCP0D_GPF);
2194
        raise_exception(env, EXCP0D_GPF);
2199 2195
    }
2200 2196
    eflags = helper_cc_compute_all(CC_OP);
2201 2197
    d0 = ldq(a0);
......
2222 2218
    check_hw_breakpoints(env, 1);
2223 2219
    env->dr[6] |= DR6_BS;
2224 2220
#endif
2225
    raise_exception(EXCP01_DB);
2221
    raise_exception(env, EXCP01_DB);
2226 2222
}
2227 2223

  
2228 2224
void helper_cpuid(void)
......
2316 2312
        env->ldt.limit = 0;
2317 2313
    } else {
2318 2314
        if (selector & 0x4) {
2319
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2315
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2320 2316
        }
2321 2317
        dt = &env->gdt;
2322 2318
        index = selector & ~7;
......
2329 2325
            entry_limit = 7;
2330 2326
        }
2331 2327
        if ((index + entry_limit) > dt->limit) {
2332
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2328
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2333 2329
        }
2334 2330
        ptr = dt->base + index;
2335 2331
        e1 = ldl_kernel(ptr);
2336 2332
        e2 = ldl_kernel(ptr + 4);
2337 2333
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
2338
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2334
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2339 2335
        }
2340 2336
        if (!(e2 & DESC_P_MASK)) {
2341
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2337
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2342 2338
        }
2343 2339
#ifdef TARGET_X86_64
2344 2340
        if (env->hflags & HF_LMA_MASK) {
......
2371 2367
        env->tr.flags = 0;
2372 2368
    } else {
2373 2369
        if (selector & 0x4) {
2374
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2370
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2375 2371
        }
2376 2372
        dt = &env->gdt;
2377 2373
        index = selector & ~7;
......
2384 2380
            entry_limit = 7;
2385 2381
        }
2386 2382
        if ((index + entry_limit) > dt->limit) {
2387
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2383
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2388 2384
        }
2389 2385
        ptr = dt->base + index;
2390 2386
        e1 = ldl_kernel(ptr);
......
2392 2388
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2393 2389
        if ((e2 & DESC_S_MASK) ||
2394 2390
            (type != 1 && type != 9)) {
2395
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2391
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2396 2392
        }
2397 2393
        if (!(e2 & DESC_P_MASK)) {
2398
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2394
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2399 2395
        }
2400 2396
#ifdef TARGET_X86_64
2401 2397
        if (env->hflags & HF_LMA_MASK) {
......
2404 2400
            e3 = ldl_kernel(ptr + 8);
2405 2401
            e4 = ldl_kernel(ptr + 12);
2406 2402
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
2407
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2403
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2408 2404
            }
2409 2405
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2410 2406
            env->tr.base |= (target_ulong)e3 << 32;
......
2437 2433
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2438 2434
#endif
2439 2435
            ) {
2440
            raise_exception_err(EXCP0D_GPF, 0);
2436
            raise_exception_err(env, EXCP0D_GPF, 0);
2441 2437
        }
2442 2438
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2443 2439
    } else {
......
2449 2445
        }
2450 2446
        index = selector & ~7;
2451 2447
        if ((index + 7) > dt->limit) {
2452
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2448
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2453 2449
        }
2454 2450
        ptr = dt->base + index;
2455 2451
        e1 = ldl_kernel(ptr);
2456 2452
        e2 = ldl_kernel(ptr + 4);
2457 2453

  
2458 2454
        if (!(e2 & DESC_S_MASK)) {
2459
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2455
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2460 2456
        }
2461 2457
        rpl = selector & 3;
2462 2458
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2463 2459
        if (seg_reg == R_SS) {
2464 2460
            /* must be writable segment */
2465 2461
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
2466
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2462
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2467 2463
            }
2468 2464
            if (rpl != cpl || dpl != cpl) {
2469
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2465
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2470 2466
            }
2471 2467
        } else {
2472 2468
            /* must be readable segment */
2473 2469
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
2474
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2470
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2475 2471
            }
2476 2472

  
2477 2473
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2478 2474
                /* if not conforming code, test rights */
2479 2475
                if (dpl < cpl || dpl < rpl) {
2480
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2476
                    raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2481 2477
                }
2482 2478
            }
2483 2479
        }
2484 2480

  
2485 2481
        if (!(e2 & DESC_P_MASK)) {
2486 2482
            if (seg_reg == R_SS) {
2487
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2483
                raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
2488 2484
            } else {
2489
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2485
                raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2490 2486
            }
2491 2487
        }
2492 2488

  
......
2516 2512
    target_ulong next_eip;
2517 2513

  
2518 2514
    if ((new_cs & 0xfffc) == 0) {
2519
        raise_exception_err(EXCP0D_GPF, 0);
2515
        raise_exception_err(env, EXCP0D_GPF, 0);
2520 2516
    }
2521 2517
    if (load_segment(&e1, &e2, new_cs) != 0) {
2522
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2518
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2523 2519
    }
2524 2520
    cpl = env->hflags & HF_CPL_MASK;
2525 2521
    if (e2 & DESC_S_MASK) {
2526 2522
        if (!(e2 & DESC_CS_MASK)) {
2527
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2523
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2528 2524
        }
2529 2525
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2530 2526
        if (e2 & DESC_C_MASK) {
2531 2527
            /* conforming code segment */
2532 2528
            if (dpl > cpl) {
2533
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2529
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2534 2530
            }
2535 2531
        } else {
2536 2532
            /* non conforming code segment */
2537 2533
            rpl = new_cs & 3;
2538 2534
            if (rpl > cpl) {
2539
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2535
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2540 2536
            }
2541 2537
            if (dpl != cpl) {
2542
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2538
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2543 2539
            }
2544 2540
        }
2545 2541
        if (!(e2 & DESC_P_MASK)) {
2546
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2542
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2547 2543
        }
2548 2544
        limit = get_seg_limit(e1, e2);
2549 2545
        if (new_eip > limit &&
2550 2546
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
2551
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2547
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2552 2548
        }
2553 2549
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2554 2550
                       get_seg_base(e1, e2), limit, e2);
......
2564 2560
        case 9: /* 386 TSS */
2565 2561
        case 5: /* task gate */
2566 2562
            if (dpl < cpl || dpl < rpl) {
2567
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2563
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2568 2564
            }
2569 2565
            next_eip = env->eip + next_eip_addend;
2570 2566
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
......
2573 2569
        case 4: /* 286 call gate */
2574 2570
        case 12: /* 386 call gate */
2575 2571
            if ((dpl < cpl) || (dpl < rpl)) {
2576
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2572
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2577 2573
            }
2578 2574
            if (!(e2 & DESC_P_MASK)) {
2579
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2575
                raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2580 2576
            }
2581 2577
            gate_cs = e1 >> 16;
2582 2578
            new_eip = (e1 & 0xffff);
......
2584 2580
                new_eip |= (e2 & 0xffff0000);
2585 2581
            }
2586 2582
            if (load_segment(&e1, &e2, gate_cs) != 0) {
2587
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2583
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2588 2584
            }
2589 2585
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2590 2586
            /* must be code segment */
2591 2587
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2592 2588
                 (DESC_S_MASK | DESC_CS_MASK))) {
2593
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2589
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2594 2590
            }
2595 2591
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2596 2592
                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
2597
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2593
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2598 2594
            }
2599 2595
            if (!(e2 & DESC_P_MASK)) {
2600
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2596
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2601 2597
            }
2602 2598
            limit = get_seg_limit(e1, e2);
2603 2599
            if (new_eip > limit) {
2604
                raise_exception_err(EXCP0D_GPF, 0);
2600
                raise_exception_err(env, EXCP0D_GPF, 0);
2605 2601
            }
2606 2602
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2607 2603
                                   get_seg_base(e1, e2), limit, e2);
2608 2604
            EIP = new_eip;
2609 2605
            break;
2610 2606
        default:
2611
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2607
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2612 2608
            break;
2613 2609
        }
2614 2610
    }
......
2654 2650
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2655 2651
    LOG_PCALL_STATE(env);
2656 2652
    if ((new_cs & 0xfffc) == 0) {
2657
        raise_exception_err(EXCP0D_GPF, 0);
2653
        raise_exception_err(env, EXCP0D_GPF, 0);
2658 2654
    }
2659 2655
    if (load_segment(&e1, &e2, new_cs) != 0) {
2660
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2656
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2661 2657
    }
2662 2658
    cpl = env->hflags & HF_CPL_MASK;
2663 2659
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2664 2660
    if (e2 & DESC_S_MASK) {
2665 2661
        if (!(e2 & DESC_CS_MASK)) {
2666
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2662
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2667 2663
        }
2668 2664
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2669 2665
        if (e2 & DESC_C_MASK) {
2670 2666
            /* conforming code segment */
2671 2667
            if (dpl > cpl) {
2672
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2668
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2673 2669
            }
2674 2670
        } else {
2675 2671
            /* non conforming code segment */
2676 2672
            rpl = new_cs & 3;
2677 2673
            if (rpl > cpl) {
2678
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2674
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2679 2675
            }
2680 2676
            if (dpl != cpl) {
2681
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2677
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2682 2678
            }
2683 2679
        }
2684 2680
        if (!(e2 & DESC_P_MASK)) {
2685
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2681
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2686 2682
        }
2687 2683

  
2688 2684
#ifdef TARGET_X86_64
......
2716 2712

  
2717 2713
            limit = get_seg_limit(e1, e2);
2718 2714
            if (new_eip > limit) {
2719
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2715
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2720 2716
            }
2721 2717
            /* from this point, not restartable */
2722 2718
            SET_ESP(sp, sp_mask);
......
2734 2730
        case 9: /* available 386 TSS */
2735 2731
        case 5: /* task gate */
2736 2732
            if (dpl < cpl || dpl < rpl) {
2737
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2733
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2738 2734
            }
2739 2735
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2740 2736
            CC_OP = CC_OP_EFLAGS;
......
2743 2739
        case 12: /* 386 call gate */
2744 2740
            break;
2745 2741
        default:
2746
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2747 2743
            break;
2748 2744
        }
2749 2745
        shift = type >> 3;
2750 2746

  
2751 2747
        if (dpl < cpl || dpl < rpl) {
2752
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2753 2749
        }
2754 2750
        /* check valid bit */
2755 2751
        if (!(e2 & DESC_P_MASK)) {
2756
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2752
            raise_exception_err(env, EXCP0B_NOSEG,  new_cs & 0xfffc);
2757 2753
        }
2758 2754
        selector = e1 >> 16;
2759 2755
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2760 2756
        param_count = e2 & 0x1f;
2761 2757
        if ((selector & 0xfffc) == 0) {
2762
            raise_exception_err(EXCP0D_GPF, 0);
2758
            raise_exception_err(env, EXCP0D_GPF, 0);
2763 2759
        }
2764 2760

  
2765 2761
        if (load_segment(&e1, &e2, selector) != 0) {
2766
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2762
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2767 2763
        }
2768 2764
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
2769
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2765
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2770 2766
        }
2771 2767
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2772 2768
        if (dpl > cpl) {
2773
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2769
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2774 2770
        }
2775 2771
        if (!(e2 & DESC_P_MASK)) {
2776
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2772
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2777 2773
        }
2778 2774

  
2779 2775
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
......
2783 2779
                      "\n",
2784 2780
                      ss, sp, param_count, ESP);
2785 2781
            if ((ss & 0xfffc) == 0) {
2786
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2782
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2787 2783
            }
2788 2784
            if ((ss & 3) != dpl) {
2789
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2785
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2790 2786
            }
2791 2787
            if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
2792
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2788
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2793 2789
            }
2794 2790
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2795 2791
            if (ss_dpl != dpl) {
2796
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2792
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2797 2793
            }
2798 2794
            if (!(ss_e2 & DESC_S_MASK) ||
2799 2795
                (ss_e2 & DESC_CS_MASK) ||
2800 2796
                !(ss_e2 & DESC_W_MASK)) {
2801
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2797
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2802 2798
            }
2803 2799
            if (!(ss_e2 & DESC_P_MASK)) {
2804
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2800
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2805 2801
            }
2806 2802

  
2807 2803
            /* push_size = ((param_count * 2) + 8) << shift; */
......
2983 2979
              new_cs, new_eip, shift, addend);
2984 2980
    LOG_PCALL_STATE(env);
2985 2981
    if ((new_cs & 0xfffc) == 0) {
2986
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2982
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2987 2983
    }
2988 2984
    if (load_segment(&e1, &e2, new_cs) != 0) {
2989
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2985
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2990 2986
    }
2991 2987
    if (!(e2 & DESC_S_MASK) ||
2992 2988
        !(e2 & DESC_CS_MASK)) {
2993
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2989
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2994 2990
    }
2995 2991
    cpl = env->hflags & HF_CPL_MASK;
2996 2992
    rpl = new_cs & 3;
2997 2993
    if (rpl < cpl) {
2998
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2994
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2999 2995
    }
3000 2996
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3001 2997
    if (e2 & DESC_C_MASK) {
3002 2998
        if (dpl > rpl) {
3003
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2999
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
3004 3000
        }
3005 3001
    } else {
3006 3002
        if (dpl != rpl) {
3007
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3003
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
3008 3004
        }
3009 3005
    }
3010 3006
    if (!(e2 & DESC_P_MASK)) {
3011
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3007
        raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
3012 3008
    }
3013 3009

  
3014 3010
    sp += addend;
......
3056 3052
            } else
3057 3053
#endif
3058 3054
            {
3059
                raise_exception_err(EXCP0D_GPF, 0);
3055
                raise_exception_err(env, EXCP0D_GPF, 0);
3060 3056
            }
3061 3057
        } else {
3062 3058
            if ((new_ss & 3) != rpl) {
3063
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3059
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
3064 3060
            }
3065 3061
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
3066
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3062
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
3067 3063
            }
3068 3064
            if (!(ss_e2 & DESC_S_MASK) ||
3069 3065
                (ss_e2 & DESC_CS_MASK) ||
3070 3066
                !(ss_e2 & DESC_W_MASK)) {
3071
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3067
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
3072 3068
            }
3073 3069
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3074 3070
            if (dpl != rpl) {
3075
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3071
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
3076 3072
            }
3077 3073
            if (!(ss_e2 & DESC_P_MASK)) {
3078
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3074
                raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
3079 3075
            }
3080 3076
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
3081 3077
                                   get_seg_base(ss_e1, ss_e2),
......
3157 3153
    if (env->eflags & NT_MASK) {
3158 3154
#ifdef TARGET_X86_64
3159 3155
        if (env->hflags & HF_LMA_MASK) {
3160
            raise_exception_err(EXCP0D_GPF, 0);
3156
            raise_exception_err(env, EXCP0D_GPF, 0);
3161 3157
        }
3162 3158
#endif
3163 3159
        tss_selector = lduw_kernel(env->tr.base + 0);
3164 3160
        if (tss_selector & 4) {
3165
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3161
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3166 3162
        }
3167 3163
        if (load_segment(&e1, &e2, tss_selector) != 0) {
3168
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3164
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3169 3165
        }
3170 3166
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3171 3167
        /* NOTE: we check both segment and busy TSS */
3172 3168
        if (type != 3) {
3173
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3169
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3174 3170
        }
3175 3171
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3176 3172
    } else {
......
3187 3183
void helper_sysenter(void)
3188 3184
{
3189 3185
    if (env->sysenter_cs == 0) {
3190
        raise_exception_err(EXCP0D_GPF, 0);
3186
        raise_exception_err(env, EXCP0D_GPF, 0);
3191 3187
    }
3192 3188
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3193 3189
    cpu_x86_set_cpl(env, 0);
......
3224 3220

  
3225 3221
    cpl = env->hflags & HF_CPL_MASK;
3226 3222
    if (env->sysenter_cs == 0 || cpl != 0) {
3227
        raise_exception_err(EXCP0D_GPF, 0);
3223
        raise_exception_err(env, EXCP0D_GPF, 0);
3228 3224
    }
3229 3225
    cpu_x86_set_cpl(env, 3);
3230 3226
#ifdef TARGET_X86_64
......
3364 3360
    uint64_t val;
3365 3361

  
3366 3362
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3367
        raise_exception(EXCP0D_GPF);
3363
        raise_exception(env, EXCP0D_GPF);
3368 3364
    }
3369 3365
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3370 3366

  
......
3382 3378
void helper_rdpmc(void)
3383 3379
{
3384 3380
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3385
        raise_exception(EXCP0D_GPF);
3381
        raise_exception(env, EXCP0D_GPF);
3386 3382
    }
3387 3383
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3388 3384

  
3389 3385
    /* currently unimplemented */
3390 3386
    qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
3391
    raise_exception_err(EXCP06_ILLOP, 0);
3387
    raise_exception_err(env, EXCP06_ILLOP, 0);
3392 3388
}
3393 3389

  
3394 3390
#if defined(CONFIG_USER_ONLY)
......
3900 3896
static void fpu_raise_exception(void)
3901 3897
{
3902 3898
    if (env->cr[0] & CR0_NE_MASK) {
3903
        raise_exception(EXCP10_COPR);
3899
        raise_exception(env, EXCP10_COPR);
3904 3900
    }
3905 3901
#if !defined(CONFIG_USER_ONLY)
3906 3902
    else {
......
4894 4890

  
4895 4891
    /* The operand must be 16 byte aligned */
4896 4892
    if (ptr & 0xf) {
4897
        raise_exception(EXCP0D_GPF);
4893
        raise_exception(env, EXCP0D_GPF);
4898 4894
    }
4899 4895

  
4900 4896
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
......
4956 4952

  
4957 4953
    /* The operand must be 16 byte aligned */
4958 4954
    if (ptr & 0xf) {
4959
        raise_exception(EXCP0D_GPF);
4955
        raise_exception(env, EXCP0D_GPF);
4960 4956
    }
4961 4957

  
4962 4958
    env->fpuc = lduw(ptr);
......
5144 5140
    uint64_t r0, r1;
5145 5141

  
5146 5142
    if (t0 == 0) {
5147
        raise_exception(EXCP00_DIVZ);
5143
        raise_exception(env, EXCP00_DIVZ);
5148 5144
    }
5149 5145
    r0 = EAX;
5150 5146
    r1 = EDX;
5151 5147
    if (div64(&r0, &r1, t0)) {
5152
        raise_exception(EXCP00_DIVZ);
5148
        raise_exception(env, EXCP00_DIVZ);
5153 5149
    }
5154 5150
    EAX = r0;
5155 5151
    EDX = r1;
......
5160 5156
    uint64_t r0, r1;
5161 5157

  
5162 5158
    if (t0 == 0) {
5163
        raise_exception(EXCP00_DIVZ);
5159
        raise_exception(env, EXCP00_DIVZ);
5164 5160
    }
5165 5161
    r0 = EAX;
5166 5162
    r1 = EDX;
5167 5163
    if (idiv64(&r0, &r1, t0)) {
5168
        raise_exception(EXCP00_DIVZ);
5164
        raise_exception(env, EXCP00_DIVZ);
5169 5165
    }
5170 5166
    EAX = r0;
5171 5167
    EDX = r1;
......
5191 5187
void helper_monitor(target_ulong ptr)
5192 5188
{
5193 5189
    if ((uint32_t)ECX != 0) {
5194
        raise_exception(EXCP0D_GPF);
5190
        raise_exception(env, EXCP0D_GPF);
5195 5191
    }
5196 5192
    /* XXX: store address? */
5197 5193
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
......
5200 5196
void helper_mwait(int next_eip_addend)
5201 5197
{
5202 5198
    if ((uint32_t)ECX != 0) {
5203
        raise_exception(EXCP0D_GPF);
5199
        raise_exception(env, EXCP0D_GPF);
5204 5200
    }
5205 5201
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5206 5202
    EIP += next_eip_addend;
......
5225 5221
    env->eflags &= ~RF_MASK;
5226 5222
}
5227 5223

  
5228
void helper_raise_interrupt(int intno, int next_eip_addend)
5224
void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
5229 5225
{
5230
    raise_interrupt(intno, 1, 0, next_eip_addend);
5226
    raise_interrupt(env, intno, 1, 0, next_eip_addend);
5231 5227
}
5232 5228

  
5233
void helper_raise_exception(int exception_index)
5229
void helper_raise_exception(CPUX86State *env, int exception_index)
5234 5230
{
5235
    raise_exception(exception_index);
5231
    raise_exception(env, exception_index);
5236 5232
}
5237 5233

  
5238 5234
void helper_cli(void)
......
5256 5252
{
5257 5253
    env->eflags |= VIF_MASK;
5258 5254
    if (env->eflags & VIP_MASK) {
5259
        raise_exception(EXCP0D_GPF);
5255
        raise_exception(env, EXCP0D_GPF);
5260 5256
    }
5261 5257
}
5262 5258
#endif
......
5279 5275
    high = ldsw(a0 + 2);
5280 5276
    v = (int16_t)v;
5281 5277
    if (v < low || v > high) {
5282
        raise_exception(EXCP05_BOUND);
5278
        raise_exception(env, EXCP05_BOUND);
5283 5279
    }
5284 5280
}
5285 5281

  
......
5290 5286
    low = ldl(a0);
5291 5287
    high = ldl(a0 + 4);
5292 5288
    if (v < low || v > high) {
5293
        raise_exception(EXCP05_BOUND);
5289
        raise_exception(env, EXCP05_BOUND);
5294 5290
    }
5295 5291
}
5296 5292

  
......
5338 5334
                cpu_restore_state(tb, env, retaddr);
5339 5335
            }
5340 5336
        }
5341
        raise_exception_err(env->exception_index, env->error_code);
5337
        raise_exception_err(env, env->exception_index, env->error_code);
5342 5338
    }
5343 5339
    env = saved_env;
5344 5340
}
......
5384 5380
{
5385 5381
}
5386 5382

  
5383
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
5384
{
5385
}
5386

  
5387 5387
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5388 5388
{
5389 5389
}
5390 5390

  
5391
void svm_check_intercept(CPUX86State *env1, uint32_t type)
5391
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
5392
                                   uint64_t param)
5392 5393
{
5393 5394
}
5394 5395

  
......
5605 5606
            env->exception_next_eip = -1;
5606 5607
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5607 5608
            /* XXX: is it always correct? */
5608
            do_interrupt_all(vector, 0, 0, 0, 1);
5609
            do_interrupt_x86_hardirq(env, vector, 1);
5609 5610
            break;
5610 5611
        case SVM_EVTINJ_TYPE_NMI:
5611 5612
            env->exception_index = EXCP02_NMI;
......
5640 5641
void helper_vmmcall(void)
5641 5642
{
5642 5643
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5643
    raise_exception(EXCP06_ILLOP);
5644
    raise_exception(env, EXCP06_ILLOP);
5644 5645
}
5645 5646

  
5646 5647
void helper_vmload(int aflag)
......
5741 5742
{
5742 5743
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5743 5744
    /* XXX: not implemented */
5744
    raise_exception(EXCP06_ILLOP);
5745
    raise_exception(env, EXCP06_ILLOP);
5745 5746
}
5746 5747

  
5747 5748
void helper_invlpga(int aflag)
......
5834 5835
    }
5835 5836
}
5836 5837

  
5837
void svm_check_intercept(CPUX86State *env1, uint32_t type)
5838
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
5839
                                   uint64_t param)
5838 5840
{
5839 5841
    CPUX86State *saved_env;
5840 5842

  
5841 5843
    saved_env = env;
5842 5844
    env = env1;
5843
    helper_svm_check_intercept_param(type, 0);
5845
    helper_svm_check_intercept_param(type, param);
5844 5846
    env = saved_env;
5845 5847
}
5846 5848

  
......
6022 6024
    cpu_loop_exit(env);
6023 6025
}
6024 6026

  
6027
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
6028
{
6029
    env = nenv;
6030
    helper_vmexit(exit_code, exit_info_1);
6031
}
6032

  
6025 6033
#endif
6026 6034

  
6027 6035
/* MMX/SSE */
b/target-i386/translate.c
2659 2659
    if (s->cc_op != CC_OP_DYNAMIC)
2660 2660
        gen_op_set_cc_op(s->cc_op);
2661 2661
    gen_jmp_im(cur_eip);
2662
    gen_helper_raise_exception(tcg_const_i32(trapno));
2662
    gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2663 2663
    s->is_jmp = DISAS_TB_JUMP;
2664 2664
}
2665 2665

  
......
2671 2671
    if (s->cc_op != CC_OP_DYNAMIC)
2672 2672
        gen_op_set_cc_op(s->cc_op);
2673 2673
    gen_jmp_im(cur_eip);
2674
    gen_helper_raise_interrupt(tcg_const_i32(intno), 
2674
    gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2675 2675
                               tcg_const_i32(next_eip - cur_eip));
2676 2676
    s->is_jmp = DISAS_TB_JUMP;
2677 2677
}
b/user-exec.c
41 41
static void exception_action(CPUArchState *env1)
42 42
{
43 43
#if defined(TARGET_I386)
44
    raise_exception_err_env(env1, env1->exception_index, env1->error_code);
44
    raise_exception_err(env1, env1->exception_index, env1->error_code);
45 45
#else
46 46
    cpu_loop_exit(env1);
47 47
#endif

Also available in: Unified diff