Statistics
| Branch: | Revision:

root / softmmu_template.h @ 61382a50

History | View | Annotate | Download (9.4 kB)

1
/*
2
 *  Software MMU support
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define DATA_SIZE (1 << SHIFT)
21

    
22
#if DATA_SIZE == 8
23
#define SUFFIX q
24
#define USUFFIX q
25
#define DATA_TYPE uint64_t
26
#elif DATA_SIZE == 4
27
#define SUFFIX l
28
#define USUFFIX l
29
#define DATA_TYPE uint32_t
30
#elif DATA_SIZE == 2
31
#define SUFFIX w
32
#define USUFFIX uw
33
#define DATA_TYPE uint16_t
34
#elif DATA_SIZE == 1
35
#define SUFFIX b
36
#define USUFFIX ub
37
#define DATA_TYPE uint8_t
38
#else
39
#error unsupported data size
40
#endif
41

    
42
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 
43
                                                        int is_user,
44
                                                        void *retaddr);
45
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, 
46
                                                   DATA_TYPE val, 
47
                                                   int is_user,
48
                                                   void *retaddr);
49

    
50
static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, 
51
                                              unsigned long tlb_addr)
52
{
53
    DATA_TYPE res;
54
    int index;
55

    
56
    index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
57
#if SHIFT <= 2
58
    res = io_mem_read[index][SHIFT](physaddr);
59
#else
60
#ifdef TARGET_WORDS_BIGENDIAN
61
    res = (uint64_t)io_mem_read[index][2](physaddr) << 32;
62
    res |= io_mem_read[index][2](physaddr + 4);
63
#else
64
    res = io_mem_read[index][2](physaddr);
65
    res |= (uint64_t)io_mem_read[index][2](physaddr + 4) << 32;
66
#endif
67
#endif /* SHIFT > 2 */
68
    return res;
69
}
70

    
71
static inline void glue(io_write, SUFFIX)(unsigned long physaddr, 
72
                                          DATA_TYPE val,
73
                                          unsigned long tlb_addr)
74
{
75
    int index;
76

    
77
    index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
78
#if SHIFT <= 2
79
    io_mem_write[index][SHIFT](physaddr, val);
80
#else
81
#ifdef TARGET_WORDS_BIGENDIAN
82
    io_mem_write[index][2](physaddr, val >> 32);
83
    io_mem_write[index][2](physaddr + 4, val);
84
#else
85
    io_mem_write[index][2](physaddr, val);
86
    io_mem_write[index][2](physaddr + 4, val >> 32);
87
#endif
88
#endif /* SHIFT > 2 */
89
}
90

    
91
/* handle all cases except unaligned access which span two pages */
92
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
93
                                                         int is_user)
94
{
95
    DATA_TYPE res;
96
    int index;
97
    unsigned long physaddr, tlb_addr;
98
    void *retaddr;
99
    
100
    /* test if there is match for unaligned or IO access */
101
    /* XXX: could done more in memory macro in a non portable way */
102
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
103
 redo:
104
    tlb_addr = env->tlb_read[is_user][index].address;
105
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
106
        physaddr = addr + env->tlb_read[is_user][index].addend;
107
        if (tlb_addr & ~TARGET_PAGE_MASK) {
108
            /* IO access */
109
            if ((addr & (DATA_SIZE - 1)) != 0)
110
                goto do_unaligned_access;
111
            res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
112
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
113
            /* slow unaligned access (it spans two pages or IO) */
114
        do_unaligned_access:
115
            retaddr = GETPC();
116
            res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, 
117
                                                         is_user, retaddr);
118
        } else {
119
            /* unaligned access in the same page */
120
            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
121
        }
122
    } else {
123
        /* the page is not in the TLB : fill it */
124
        retaddr = GETPC();
125
        tlb_fill(addr, 0, is_user, retaddr);
126
        goto redo;
127
    }
128
    return res;
129
}
130

    
131
/* handle all unaligned cases */
132
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 
133
                                                        int is_user,
134
                                                        void *retaddr)
135
{
136
    DATA_TYPE res, res1, res2;
137
    int index, shift;
138
    unsigned long physaddr, tlb_addr, addr1, addr2;
139

    
140
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
141
 redo:
142
    tlb_addr = env->tlb_read[is_user][index].address;
143
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
144
        physaddr = addr + env->tlb_read[is_user][index].addend;
145
        if (tlb_addr & ~TARGET_PAGE_MASK) {
146
            /* IO access */
147
            if ((addr & (DATA_SIZE - 1)) != 0)
148
                goto do_unaligned_access;
149
            res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
150
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
151
        do_unaligned_access:
152
            /* slow unaligned access (it spans two pages) */
153
            addr1 = addr & ~(DATA_SIZE - 1);
154
            addr2 = addr1 + DATA_SIZE;
155
            res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, 
156
                                                          is_user, retaddr);
157
            res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, 
158
                                                          is_user, retaddr);
159
            shift = (addr & (DATA_SIZE - 1)) * 8;
160
#ifdef TARGET_WORDS_BIGENDIAN
161
            res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
162
#else
163
            res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
164
#endif
165
        } else {
166
            /* unaligned/aligned access in the same page */
167
            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
168
        }
169
    } else {
170
        /* the page is not in the TLB : fill it */
171
        tlb_fill(addr, 0, is_user, retaddr);
172
        goto redo;
173
    }
174
    return res;
175
}
176

    
177

    
178
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, 
179
                                                    DATA_TYPE val,
180
                                                    int is_user)
181
{
182
    unsigned long physaddr, tlb_addr;
183
    void *retaddr;
184
    int index;
185
    
186
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
187
 redo:
188
    tlb_addr = env->tlb_write[is_user][index].address;
189
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
190
        physaddr = addr + env->tlb_read[is_user][index].addend;
191
        if (tlb_addr & ~TARGET_PAGE_MASK) {
192
            /* IO access */
193
            if ((addr & (DATA_SIZE - 1)) != 0)
194
                goto do_unaligned_access;
195
            glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
196
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
197
        do_unaligned_access:
198
            retaddr = GETPC();
199
            glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, 
200
                                                   is_user, retaddr);
201
        } else {
202
            /* aligned/unaligned access in the same page */
203
            glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
204
        }
205
    } else {
206
        /* the page is not in the TLB : fill it */
207
        retaddr = GETPC();
208
        tlb_fill(addr, 1, is_user, retaddr);
209
        goto redo;
210
    }
211
}
212

    
213
/* handles all unaligned cases */
214
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, 
215
                                                   DATA_TYPE val,
216
                                                   int is_user,
217
                                                   void *retaddr)
218
{
219
    unsigned long physaddr, tlb_addr;
220
    int index, i;
221

    
222
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
223
 redo:
224
    tlb_addr = env->tlb_write[is_user][index].address;
225
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
226
        physaddr = addr + env->tlb_read[is_user][index].addend;
227
        if (tlb_addr & ~TARGET_PAGE_MASK) {
228
            /* IO access */
229
            if ((addr & (DATA_SIZE - 1)) != 0)
230
                goto do_unaligned_access;
231
            glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
232
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
233
        do_unaligned_access:
234
            /* XXX: not efficient, but simple */
235
            for(i = 0;i < DATA_SIZE; i++) {
236
#ifdef TARGET_WORDS_BIGENDIAN
237
                glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), 
238
                                          is_user, retaddr);
239
#else
240
                glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), 
241
                                          is_user, retaddr);
242
#endif
243
            }
244
        } else {
245
            /* aligned/unaligned access in the same page */
246
            glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
247
        }
248
    } else {
249
        /* the page is not in the TLB : fill it */
250
        tlb_fill(addr, 1, is_user, retaddr);
251
        goto redo;
252
    }
253
}
254

    
255
#undef SHIFT
256
#undef DATA_TYPE
257
#undef SUFFIX
258
#undef USUFFIX
259
#undef DATA_SIZE