Revision e37e6ee6

b/exec.c
1734 1734
        env->tlb_table[2][i].addr_read = -1;
1735 1735
        env->tlb_table[2][i].addr_write = -1;
1736 1736
        env->tlb_table[2][i].addr_code = -1;
1737
#if (NB_MMU_MODES == 4)
1737
#endif
1738
#if (NB_MMU_MODES >= 4)
1738 1739
        env->tlb_table[3][i].addr_read = -1;
1739 1740
        env->tlb_table[3][i].addr_write = -1;
1740 1741
        env->tlb_table[3][i].addr_code = -1;
1741 1742
#endif
1743
#if (NB_MMU_MODES >= 5)
1744
        env->tlb_table[4][i].addr_read = -1;
1745
        env->tlb_table[4][i].addr_write = -1;
1746
        env->tlb_table[4][i].addr_code = -1;
1742 1747
#endif
1748

  
1743 1749
    }
1744 1750

  
1745 1751
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
......
1783 1789
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1784 1790
#if (NB_MMU_MODES >= 3)
1785 1791
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1786
#if (NB_MMU_MODES == 4)
1792
#endif
1793
#if (NB_MMU_MODES >= 4)
1787 1794
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1788 1795
#endif
1796
#if (NB_MMU_MODES >= 5)
1797
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1789 1798
#endif
1790 1799

  
1791 1800
    tlb_flush_jmp_cache(env, addr);
......
1869 1878
#if (NB_MMU_MODES >= 3)
1870 1879
        for(i = 0; i < CPU_TLB_SIZE; i++)
1871 1880
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1872
#if (NB_MMU_MODES == 4)
1881
#endif
1882
#if (NB_MMU_MODES >= 4)
1873 1883
        for(i = 0; i < CPU_TLB_SIZE; i++)
1874 1884
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1875 1885
#endif
1886
#if (NB_MMU_MODES >= 5)
1887
        for(i = 0; i < CPU_TLB_SIZE; i++)
1888
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1876 1889
#endif
1877 1890
    }
1878 1891
}
......
1918 1931
#if (NB_MMU_MODES >= 3)
1919 1932
    for(i = 0; i < CPU_TLB_SIZE; i++)
1920 1933
        tlb_update_dirty(&env->tlb_table[2][i]);
1921
#if (NB_MMU_MODES == 4)
1934
#endif
1935
#if (NB_MMU_MODES >= 4)
1922 1936
    for(i = 0; i < CPU_TLB_SIZE; i++)
1923 1937
        tlb_update_dirty(&env->tlb_table[3][i]);
1924 1938
#endif
1939
#if (NB_MMU_MODES >= 5)
1940
    for(i = 0; i < CPU_TLB_SIZE; i++)
1941
        tlb_update_dirty(&env->tlb_table[4][i]);
1925 1942
#endif
1926 1943
}
1927 1944

  
......
1943 1960
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1944 1961
#if (NB_MMU_MODES >= 3)
1945 1962
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1946
#if (NB_MMU_MODES == 4)
1963
#endif
1964
#if (NB_MMU_MODES >= 4)
1947 1965
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1948 1966
#endif
1967
#if (NB_MMU_MODES >= 5)
1968
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1949 1969
#endif
1950 1970
}
1951 1971

  
b/softmmu_exec.h
60 60
#include "softmmu_header.h"
61 61
#undef ACCESS_TYPE
62 62
#undef MEMSUFFIX
63
#endif /* (NB_MMU_MODES >= 3) */
63 64

  
64 65
#if (NB_MMU_MODES >= 4)
65 66

  
......
78 79
#include "softmmu_header.h"
79 80
#undef ACCESS_TYPE
80 81
#undef MEMSUFFIX
82
#endif /* (NB_MMU_MODES >= 4) */
81 83

  
82
#if (NB_MMU_MODES > 4)
83
#error "NB_MMU_MODES > 4 is not supported for now"
84
#endif /* (NB_MMU_MODES > 4) */
85
#endif /* (NB_MMU_MODES == 4) */
86
#endif /* (NB_MMU_MODES >= 3) */
84
#if (NB_MMU_MODES >= 5)
85

  
86
#define ACCESS_TYPE 4
87
#define MEMSUFFIX MMU_MODE4_SUFFIX
88
#define DATA_SIZE 1
89
#include "softmmu_header.h"
90

  
91
#define DATA_SIZE 2
92
#include "softmmu_header.h"
93

  
94
#define DATA_SIZE 4
95
#include "softmmu_header.h"
96

  
97
#define DATA_SIZE 8
98
#include "softmmu_header.h"
99
#undef ACCESS_TYPE
100
#undef MEMSUFFIX
101
#endif /* (NB_MMU_MODES >= 5) */
102

  
103
#if (NB_MMU_MODES > 5)
104
#error "NB_MMU_MODES > 5 is not supported for now"
105
#endif /* (NB_MMU_MODES > 5) */
87 106

  
88 107
/* these access are slower, they must be as rare as possible */
89 108
#define ACCESS_TYPE (NB_MMU_MODES)

Also available in: Unified diff