Revision 2c1794c4 helper-i386.c

b/helper-i386.c
185 185

  
186 186
/* protected mode interrupt */
187 187
static void do_interrupt_protected(int intno, int is_int, int error_code,
188
                                      unsigned int next_eip)
188
                                   unsigned int next_eip)
189 189
{
190 190
    SegmentCache *dt;
191 191
    uint8_t *ptr, *ssp;
......
378 378
    ptr = dt->base + intno * 4;
379 379
    offset = lduw(ptr);
380 380
    selector = lduw(ptr + 2);
381
    esp = env->regs[R_ESP] & 0xffff;
382
    ssp = env->segs[R_SS].base + esp;
381
    esp = env->regs[R_ESP];
382
    ssp = env->segs[R_SS].base;
383 383
    if (is_int)
384 384
        old_eip = next_eip;
385 385
    else
386 386
        old_eip = env->eip;
387 387
    old_cs = env->segs[R_CS].selector;
388
    ssp -= 2;
389
    stw(ssp, compute_eflags());
390
    ssp -= 2;
391
    stw(ssp, old_cs);
392
    ssp -= 2;
393
    stw(ssp, old_eip);
394
    esp -= 6;
388
    esp -= 2;
389
    stw(ssp + (esp & 0xffff), compute_eflags());
390
    esp -= 2;
391
    stw(ssp + (esp & 0xffff), old_cs);
392
    esp -= 2;
393
    stw(ssp + (esp & 0xffff), old_eip);
395 394
    
396 395
    /* update processor state */
397 396
    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
......
733 732
}
734 733

  
735 734
/* protected mode jump */
736
void jmp_seg(int selector, unsigned int new_eip)
735
void helper_ljmp_protected_T0_T1(void)
737 736
{
737
    int new_cs, new_eip;
738 738
    SegmentCache sc1;
739 739
    uint32_t e1, e2, cpl, dpl, rpl;
740 740

  
741
    if ((selector & 0xfffc) == 0) {
741
    new_cs = T0;
742
    new_eip = T1;
743
    if ((new_cs & 0xfffc) == 0)
742 744
        raise_exception_err(EXCP0D_GPF, 0);
745
    if (load_segment(&e1, &e2, new_cs) != 0)
746
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
747
    cpl = env->segs[R_CS].selector & 3;
748
    if (e2 & DESC_S_MASK) {
749
        if (!(e2 & DESC_CS_MASK))
750
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
751
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752
        if (e2 & DESC_CS_MASK) {
753
            /* conforming code segment */
754
            if (dpl > cpl)
755
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
756
        } else {
757
            /* non conforming code segment */
758
            rpl = new_cs & 3;
759
            if (rpl > cpl)
760
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
761
            if (dpl != cpl)
762
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
763
        }
764
        if (!(e2 & DESC_P_MASK))
765
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
766
        load_seg_cache(&sc1, e1, e2);
767
        if (new_eip > sc1.limit)
768
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
769
        env->segs[R_CS].base = sc1.base;
770
        env->segs[R_CS].limit = sc1.limit;
771
        env->segs[R_CS].flags = sc1.flags;
772
        env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
773
        EIP = new_eip;
774
    } else {
775
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
776
                  new_cs, new_eip);
743 777
    }
778
}
744 779

  
745
    if (load_segment(&e1, &e2, selector) != 0)
746
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
780
/* real mode call */
781
void helper_lcall_real_T0_T1(int shift, int next_eip)
782
{
783
    int new_cs, new_eip;
784
    uint32_t esp, esp_mask;
785
    uint8_t *ssp;
786
    
787
    new_cs = T0;
788
    new_eip = T1;
789
    esp = env->regs[R_ESP];
790
    esp_mask = 0xffffffff;
791
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
792
        esp_mask = 0xffff;
793
    ssp = env->segs[R_SS].base;
794
    if (shift) {
795
        esp -= 4;
796
        stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
797
        esp -= 4;
798
        stl(ssp + (esp & esp_mask), next_eip);
799
    } else {
800
        esp -= 2;
801
        stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
802
        esp -= 2;
803
        stw(ssp + (esp & esp_mask), next_eip);
804
    }
805

  
806
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
807
        env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
808
    else
809
        env->regs[R_ESP] = esp;
810
    env->eip = new_eip;
811
    env->segs[R_CS].selector = new_cs;
812
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
813
}
814

  
815
/* protected mode call */
816
void helper_lcall_protected_T0_T1(int shift, int next_eip)
817
{
818
    int new_cs, new_eip;
819
    SegmentCache sc1;
820
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
821
    uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
822
    uint32_t old_ss, old_esp, val, i;
823
    uint8_t *ssp, *old_ssp;
824
    
825
    new_cs = T0;
826
    new_eip = T1;
827
    if ((new_cs & 0xfffc) == 0)
828
        raise_exception_err(EXCP0D_GPF, 0);
829
    if (load_segment(&e1, &e2, new_cs) != 0)
830
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
747 831
    cpl = env->segs[R_CS].selector & 3;
748 832
    if (e2 & DESC_S_MASK) {
749 833
        if (!(e2 & DESC_CS_MASK))
750
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
834
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
751 835
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752 836
        if (e2 & DESC_CS_MASK) {
753 837
            /* conforming code segment */
754 838
            if (dpl > cpl)
755
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
839
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
756 840
        } else {
757 841
            /* non conforming code segment */
758
            rpl = selector & 3;
842
            rpl = new_cs & 3;
759 843
            if (rpl > cpl)
760
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
844
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
761 845
            if (dpl != cpl)
762
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
846
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
763 847
        }
764 848
        if (!(e2 & DESC_P_MASK))
765
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
849
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
850

  
851
        sp = env->regs[R_ESP];
852
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
853
            sp &= 0xffff;
854
        ssp = env->segs[R_SS].base + sp;
855
        if (shift) {
856
            ssp -= 4;
857
            stl(ssp, env->segs[R_CS].selector);
858
            ssp -= 4;
859
            stl(ssp, next_eip);
860
        } else {
861
            ssp -= 2;
862
            stw(ssp, env->segs[R_CS].selector);
863
            ssp -= 2;
864
            stw(ssp, next_eip);
865
        }
866
        sp -= (4 << shift);
867
        
766 868
        load_seg_cache(&sc1, e1, e2);
767 869
        if (new_eip > sc1.limit)
768
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
870
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
871
        /* from this point, not restartable */
872
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
873
            env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
874
        else
875
            env->regs[R_ESP] = sp;
769 876
        env->segs[R_CS].base = sc1.base;
770 877
        env->segs[R_CS].limit = sc1.limit;
771 878
        env->segs[R_CS].flags = sc1.flags;
772
        env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
879
        env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
773 880
        EIP = new_eip;
774 881
    } else {
775
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
776
                  selector, new_eip);
882
        /* check gate type */
883
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
        switch(type) {
885
        case 1: /* available 286 TSS */
886
        case 9: /* available 386 TSS */
887
        case 5: /* task gate */
888
            cpu_abort(env, "task gate not supported");
889
            break;
890
        case 4: /* 286 call gate */
891
        case 12: /* 386 call gate */
892
            break;
893
        default:
894
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
895
            break;
896
        }
897
        shift = type >> 3;
898

  
899
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
900
        rpl = new_cs & 3;
901
        if (dpl < cpl || dpl < rpl)
902
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
903
        /* check valid bit */
904
        if (!(e2 & DESC_P_MASK))
905
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
906
        selector = e1 >> 16;
907
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
908
        if ((selector & 0xfffc) == 0)
909
            raise_exception_err(EXCP0D_GPF, 0);
910

  
911
        if (load_segment(&e1, &e2, selector) != 0)
912
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
914
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
915
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916
        if (dpl > cpl)
917
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
918
        if (!(e2 & DESC_P_MASK))
919
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
920

  
921
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
922
            /* to inner priviledge */
923
            get_ss_esp_from_tss(&ss, &sp, dpl);
924
            if ((ss & 0xfffc) == 0)
925
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
926
            if ((ss & 3) != dpl)
927
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
929
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
931
            if (ss_dpl != dpl)
932
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
933
            if (!(ss_e2 & DESC_S_MASK) ||
934
                (ss_e2 & DESC_CS_MASK) ||
935
                !(ss_e2 & DESC_W_MASK))
936
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937
            if (!(ss_e2 & DESC_P_MASK))
938
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
939
            
940
            param_count = e2 & 0x1f;
941
            push_size = ((param_count * 2) + 8) << shift;
942

  
943
            old_esp = env->regs[R_ESP];
944
            old_ss = env->segs[R_SS].selector;
945
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
946
                old_esp &= 0xffff;
947
            old_ssp = env->segs[R_SS].base + old_esp;
948
            
949
            /* XXX: from this point not restartable */
950
            load_seg(R_SS, ss, env->eip);
951

  
952
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
953
                sp &= 0xffff;
954
            ssp = env->segs[R_SS].base + sp;
955
            if (shift) {
956
                ssp -= 4;
957
                stl(ssp, old_ss);
958
                ssp -= 4;
959
                stl(ssp, old_esp);
960
                ssp -= 4 * param_count;
961
                for(i = 0; i < param_count; i++) {
962
                    val = ldl(old_ssp + i * 4);
963
                    stl(ssp + i * 4, val);
964
                }
965
            } else {
966
                ssp -= 2;
967
                stw(ssp, old_ss);
968
                ssp -= 2;
969
                stw(ssp, old_esp);
970
                ssp -= 2 * param_count;
971
                for(i = 0; i < param_count; i++) {
972
                    val = lduw(old_ssp + i * 2);
973
                    stw(ssp + i * 2, val);
974
                }
975
            }
976
        } else {
977
            /* to same priviledge */
978
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
979
                sp &= 0xffff;
980
            ssp = env->segs[R_SS].base + sp;
981
            push_size = (4 << shift);
982
        }
983

  
984
        if (shift) {
985
            ssp -= 4;
986
            stl(ssp, env->segs[R_CS].selector);
987
            ssp -= 4;
988
            stl(ssp, next_eip);
989
        } else {
990
            ssp -= 2;
991
            stw(ssp, env->segs[R_CS].selector);
992
            ssp -= 2;
993
            stw(ssp, next_eip);
994
        }
995

  
996
        sp -= push_size;
997
        load_seg(R_CS, selector, env->eip);
998
        /* from this point, not restartable if same priviledge */
999
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
1000
            env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
1001
        else
1002
            env->regs[R_ESP] = sp;
1003
        EIP = offset;
777 1004
    }
778 1005
}
779 1006

  
......
820 1047
}
821 1048

  
822 1049
/* protected mode iret */
823
void helper_iret_protected(int shift)
1050
static inline void helper_ret_protected(int shift, int is_iret, int addend)
824 1051
{
825 1052
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
826 1053
    uint32_t new_es, new_ds, new_fs, new_gs;
......
834 1061
    ssp = env->segs[R_SS].base + sp;
835 1062
    if (shift == 1) {
836 1063
        /* 32 bits */
837
        new_eflags = ldl(ssp + 8);
1064
        if (is_iret)
1065
            new_eflags = ldl(ssp + 8);
838 1066
        new_cs = ldl(ssp + 4) & 0xffff;
839 1067
        new_eip = ldl(ssp);
840
        if (new_eflags & VM_MASK)
1068
        if (is_iret && (new_eflags & VM_MASK))
841 1069
            goto return_to_vm86;
842 1070
    } else {
843 1071
        /* 16 bits */
844
        new_eflags = lduw(ssp + 4);
1072
        if (is_iret)
1073
            new_eflags = lduw(ssp + 4);
845 1074
        new_cs = lduw(ssp + 2);
846 1075
        new_eip = lduw(ssp);
847 1076
    }
......
870 1099
    if (rpl == cpl) {
871 1100
        /* return to same priledge level */
872 1101
        load_seg(R_CS, new_cs, env->eip);
873
        new_esp = sp + (6 << shift);
1102
        new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
874 1103
    } else {
875
        /* return to differentr priviledge level */
1104
        /* return to different priviledge level */
1105
        ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
876 1106
        if (shift == 1) {
877 1107
            /* 32 bits */
878
            new_esp = ldl(ssp + 12);
879
            new_ss = ldl(ssp + 16) & 0xffff;
1108
            new_esp = ldl(ssp);
1109
            new_ss = ldl(ssp + 4) & 0xffff;
880 1110
        } else {
881 1111
            /* 16 bits */
882
            new_esp = lduw(ssp + 6);
883
            new_ss = lduw(ssp + 8);
1112
            new_esp = lduw(ssp);
1113
            new_ss = lduw(ssp + 2);
884 1114
        }
885 1115
        
886 1116
        if ((new_ss & 3) != rpl)
......
906 1136
        env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
907 1137
            (new_esp & 0xffff);
908 1138
    env->eip = new_eip;
909
    if (cpl == 0)
910
        eflags_mask = FL_UPDATE_CPL0_MASK;
911
    else
912
        eflags_mask = FL_UPDATE_MASK32;
913
    if (shift == 0)
914
        eflags_mask &= 0xffff;
915
    load_eflags(new_eflags, eflags_mask);
1139
    if (is_iret) {
1140
        if (cpl == 0)
1141
            eflags_mask = FL_UPDATE_CPL0_MASK;
1142
        else
1143
            eflags_mask = FL_UPDATE_MASK32;
1144
        if (shift == 0)
1145
            eflags_mask &= 0xffff;
1146
        load_eflags(new_eflags, eflags_mask);
1147
    }
916 1148
    return;
917 1149

  
918 1150
 return_to_vm86:
......
936 1168
    env->regs[R_ESP] = new_esp;
937 1169
}
938 1170

  
1171
void helper_iret_protected(int shift)
1172
{
1173
    helper_ret_protected(shift, 1, 0);
1174
}
1175

  
1176
void helper_lret_protected(int shift, int addend)
1177
{
1178
    helper_ret_protected(shift, 0, addend);
1179
}
1180

  
939 1181
void helper_movl_crN_T0(int reg)
940 1182
{
941 1183
    env->cr[reg] = T0;

Also available in: Unified diff