Revision 3ab493de

b/target-i386/exec.h
170 170
void helper_wrmsr(void);
171 171
void helper_lsl(void);
172 172
void helper_lar(void);
173
void helper_verr(void);
174
void helper_verw(void);
173 175

  
174 176
void check_iob_T0(void);
175 177
void check_iow_T0(void);
b/target-i386/helper.c
1037 1037
    env->tr.selector = selector;
1038 1038
}
1039 1039

  
1040
/* only works if protected mode and not VM86. Calling load_seg with
1041
   seg_reg == R_CS is discouraged */
1042
/* XXX: add ring level checks */
1040
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1043 1041
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
1044 1042
{
1045 1043
    uint32_t e1, e2;
1046
    
1044
    int cpl, dpl, rpl;
1045
    SegmentCache *dt;
1046
    int index;
1047
    uint8_t *ptr;
1048

  
1047 1049
    if ((selector & 0xfffc) == 0) {
1048 1050
        /* null selector case */
1049 1051
        if (seg_reg == R_SS) {
......
1053 1055
            cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1054 1056
        }
1055 1057
    } else {
1056
        if (load_segment(&e1, &e2, selector) != 0) {
1058
        
1059
        if (selector & 0x4)
1060
            dt = &env->ldt;
1061
        else
1062
            dt = &env->gdt;
1063
        index = selector & ~7;
1064
        if ((index + 7) > dt->limit) {
1057 1065
            EIP = cur_eip;
1058 1066
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1059 1067
        }
1060
        if (!(e2 & DESC_S_MASK) ||
1061
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1068
        ptr = dt->base + index;
1069
        e1 = ldl_kernel(ptr);
1070
        e2 = ldl_kernel(ptr + 4);
1071

  
1072
        if (!(e2 & DESC_S_MASK)) {
1062 1073
            EIP = cur_eip;
1063 1074
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1064 1075
        }
1065

  
1076
        rpl = selector & 3;
1077
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1078
        cpl = env->hflags & HF_CPL_MASK;
1066 1079
        if (seg_reg == R_SS) {
1080
            /* must be writable segment */
1067 1081
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1068 1082
                EIP = cur_eip;
1069 1083
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1070 1084
            }
1085
            if (rpl != cpl || dpl != cpl) {
1086
                EIP = cur_eip;
1087
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1088
            }
1071 1089
        } else {
1090
            /* must be readable segment */
1072 1091
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1073 1092
                EIP = cur_eip;
1074 1093
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1075 1094
            }
1095
            
1096
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1097
                /* if not conforming code, test rights */
1098
                if (dpl < cpl || dpl < rpl) {
1099
                    EIP = cur_eip;
1100
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1101
                }
1102
            }
1076 1103
        }
1077 1104

  
1078 1105
        if (!(e2 & DESC_P_MASK)) {
......
1082 1109
            else
1083 1110
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1084 1111
        }
1112

  
1113
        /* set the access bit if not already set */
1114
        if (!(e2 & DESC_A_MASK)) {
1115
            e2 |= DESC_A_MASK;
1116
            stl_kernel(ptr + 4, e2);
1117
        }
1118

  
1085 1119
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1086 1120
                       get_seg_base(e1, e2),
1087 1121
                       get_seg_limit(e1, e2),
......
1696 1730
{
1697 1731
    unsigned int selector, limit;
1698 1732
    uint32_t e1, e2;
1733
    int rpl, dpl, cpl, type;
1699 1734

  
1700 1735
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1701 1736
    selector = T0 & 0xffff;
1702 1737
    if (load_segment(&e1, &e2, selector) != 0)
1703 1738
        return;
1704
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1705
    if (e2 & (1 << 23))
1706
        limit = (limit << 12) | 0xfff;
1739
    rpl = selector & 3;
1740
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1741
    cpl = env->hflags & HF_CPL_MASK;
1742
    if (e2 & DESC_S_MASK) {
1743
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1744
            /* conforming */
1745
        } else {
1746
            if (dpl < cpl || dpl < rpl)
1747
                return;
1748
        }
1749
    } else {
1750
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1751
        switch(type) {
1752
        case 1:
1753
        case 2:
1754
        case 3:
1755
        case 9:
1756
        case 11:
1757
            break;
1758
        default:
1759
            return;
1760
        }
1761
        if (dpl < cpl || dpl < rpl)
1762
            return;
1763
    }
1764
    limit = get_seg_limit(e1, e2);
1707 1765
    T1 = limit;
1708 1766
    CC_SRC |= CC_Z;
1709 1767
}
......
1712 1770
{
1713 1771
    unsigned int selector;
1714 1772
    uint32_t e1, e2;
1773
    int rpl, dpl, cpl, type;
1715 1774

  
1716 1775
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1717 1776
    selector = T0 & 0xffff;
1777
    if ((selector & 0xfffc) == 0)
1778
        return;
1718 1779
    if (load_segment(&e1, &e2, selector) != 0)
1719 1780
        return;
1781
    rpl = selector & 3;
1782
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1783
    cpl = env->hflags & HF_CPL_MASK;
1784
    if (e2 & DESC_S_MASK) {
1785
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1786
            /* conforming */
1787
        } else {
1788
            if (dpl < cpl || dpl < rpl)
1789
                return;
1790
        }
1791
    } else {
1792
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1793
        switch(type) {
1794
        case 1:
1795
        case 2:
1796
        case 3:
1797
        case 4:
1798
        case 5:
1799
        case 9:
1800
        case 11:
1801
        case 12:
1802
            break;
1803
        default:
1804
            return;
1805
        }
1806
        if (dpl < cpl || dpl < rpl)
1807
            return;
1808
    }
1720 1809
    T1 = e2 & 0x00f0ff00;
1721 1810
    CC_SRC |= CC_Z;
1722 1811
}
1723 1812

  
1813
void helper_verr(void)
1814
{
1815
    unsigned int selector;
1816
    uint32_t e1, e2;
1817
    int rpl, dpl, cpl;
1818

  
1819
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1820
    selector = T0 & 0xffff;
1821
    if ((selector & 0xfffc) == 0)
1822
        return;
1823
    if (load_segment(&e1, &e2, selector) != 0)
1824
        return;
1825
    if (!(e2 & DESC_S_MASK))
1826
        return;
1827
    rpl = selector & 3;
1828
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1829
    cpl = env->hflags & HF_CPL_MASK;
1830
    if (e2 & DESC_CS_MASK) {
1831
        if (!(e2 & DESC_R_MASK))
1832
            return;
1833
        if (!(e2 & DESC_C_MASK)) {
1834
            if (dpl < cpl || dpl < rpl)
1835
                return;
1836
        }
1837
    } else {
1838
        if (dpl < cpl || dpl < rpl)
1839
            return;
1840
    }
1841
    /* ok */
1842
}
1843

  
1844
void helper_verw(void)
1845
{
1846
    unsigned int selector;
1847
    uint32_t e1, e2;
1848
    int rpl, dpl, cpl;
1849

  
1850
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1851
    selector = T0 & 0xffff;
1852
    if ((selector & 0xfffc) == 0)
1853
        return;
1854
    if (load_segment(&e1, &e2, selector) != 0)
1855
        return;
1856
    if (!(e2 & DESC_S_MASK))
1857
        return;
1858
    rpl = selector & 3;
1859
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1860
    cpl = env->hflags & HF_CPL_MASK;
1861
    if (e2 & DESC_CS_MASK) {
1862
        return;
1863
    } else {
1864
        if (dpl < cpl || dpl < rpl)
1865
            return;
1866
        if (!(e2 & DESC_W_MASK))
1867
            return;
1868
    }
1869
    /* ok */
1870
}
1871

  
1724 1872
/* FPU helpers */
1725 1873

  
1726 1874
void helper_fldt_ST0_A0(void)
b/target-i386/op.c
936 936
    helper_lar();
937 937
}
938 938

  
939
void OPPROTO op_verr(void)
940
{
941
    helper_verr();
942
}
943

  
944
void OPPROTO op_verw(void)
945
{
946
    helper_verw();
947
}
948

  
949
void OPPROTO op_arpl(void)
950
{
951
    if ((T0 & 3) < (T1 & 3)) {
952
        /* XXX: emulate bug or 0xff3f0000 oring as in bochs ? */
953
        T0 = (T0 & ~3) | (T1 & 3);
954
        T1 = CC_Z;
955
   } else {
956
        T1 = 0;
957
    }
958
    FORCE_RET();
959
}
960
            
961
void OPPROTO op_arpl_update(void)
962
{
963
    int eflags;
964
    eflags = cc_table[CC_OP].compute_all();
965
    CC_SRC = (eflags & ~CC_Z) | T1;
966
}
967
    
939 968
/* T0: segment, T1:eip */
940 969
void OPPROTO op_ljmp_protected_T0_T1(void)
941 970
{

Also available in: Unified diff