Statistics
| Branch: | Revision:

root / softmmu_template.h @ d720b93d

History | View | Annotate | Download (9.6 kB)

1
/*
2
 *  Software MMU support
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define DATA_SIZE (1 << SHIFT)
21

    
22
#if DATA_SIZE == 8
23
#define SUFFIX q
24
#define USUFFIX q
25
#define DATA_TYPE uint64_t
26
#elif DATA_SIZE == 4
27
#define SUFFIX l
28
#define USUFFIX l
29
#define DATA_TYPE uint32_t
30
#elif DATA_SIZE == 2
31
#define SUFFIX w
32
#define USUFFIX uw
33
#define DATA_TYPE uint16_t
34
#elif DATA_SIZE == 1
35
#define SUFFIX b
36
#define USUFFIX ub
37
#define DATA_TYPE uint8_t
38
#else
39
#error unsupported data size
40
#endif
41

    
42
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 
43
                                                        int is_user,
44
                                                        void *retaddr);
45
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, 
46
                                                   DATA_TYPE val, 
47
                                                   int is_user,
48
                                                   void *retaddr);
49

    
50
static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, 
51
                                              unsigned long tlb_addr)
52
{
53
    DATA_TYPE res;
54
    int index;
55

    
56
    index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
57
#if SHIFT <= 2
58
    res = io_mem_read[index][SHIFT](physaddr);
59
#else
60
#ifdef TARGET_WORDS_BIGENDIAN
61
    res = (uint64_t)io_mem_read[index][2](physaddr) << 32;
62
    res |= io_mem_read[index][2](physaddr + 4);
63
#else
64
    res = io_mem_read[index][2](physaddr);
65
    res |= (uint64_t)io_mem_read[index][2](physaddr + 4) << 32;
66
#endif
67
#endif /* SHIFT > 2 */
68
    return res;
69
}
70

    
71
static inline void glue(io_write, SUFFIX)(unsigned long physaddr, 
72
                                          DATA_TYPE val,
73
                                          unsigned long tlb_addr,
74
                                          void *retaddr)
75
{
76
    int index;
77

    
78
    index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
79
    env->mem_write_vaddr = tlb_addr;
80
    env->mem_write_pc = (unsigned long)retaddr;
81
#if SHIFT <= 2
82
    io_mem_write[index][SHIFT](physaddr, val);
83
#else
84
#ifdef TARGET_WORDS_BIGENDIAN
85
    io_mem_write[index][2](physaddr, val >> 32);
86
    io_mem_write[index][2](physaddr + 4, val);
87
#else
88
    io_mem_write[index][2](physaddr, val);
89
    io_mem_write[index][2](physaddr + 4, val >> 32);
90
#endif
91
#endif /* SHIFT > 2 */
92
}
93

    
94
/* handle all cases except unaligned access which span two pages */
95
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
96
                                                         int is_user)
97
{
98
    DATA_TYPE res;
99
    int index;
100
    unsigned long physaddr, tlb_addr;
101
    void *retaddr;
102
    
103
    /* test if there is match for unaligned or IO access */
104
    /* XXX: could done more in memory macro in a non portable way */
105
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
106
 redo:
107
    tlb_addr = env->tlb_read[is_user][index].address;
108
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
109
        physaddr = addr + env->tlb_read[is_user][index].addend;
110
        if (tlb_addr & ~TARGET_PAGE_MASK) {
111
            /* IO access */
112
            if ((addr & (DATA_SIZE - 1)) != 0)
113
                goto do_unaligned_access;
114
            res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
115
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
116
            /* slow unaligned access (it spans two pages or IO) */
117
        do_unaligned_access:
118
            retaddr = GETPC();
119
            res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, 
120
                                                         is_user, retaddr);
121
        } else {
122
            /* unaligned access in the same page */
123
            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
124
        }
125
    } else {
126
        /* the page is not in the TLB : fill it */
127
        retaddr = GETPC();
128
        tlb_fill(addr, 0, is_user, retaddr);
129
        goto redo;
130
    }
131
    return res;
132
}
133

    
134
/* handle all unaligned cases */
135
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 
136
                                                        int is_user,
137
                                                        void *retaddr)
138
{
139
    DATA_TYPE res, res1, res2;
140
    int index, shift;
141
    unsigned long physaddr, tlb_addr, addr1, addr2;
142

    
143
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144
 redo:
145
    tlb_addr = env->tlb_read[is_user][index].address;
146
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
147
        physaddr = addr + env->tlb_read[is_user][index].addend;
148
        if (tlb_addr & ~TARGET_PAGE_MASK) {
149
            /* IO access */
150
            if ((addr & (DATA_SIZE - 1)) != 0)
151
                goto do_unaligned_access;
152
            res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
153
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
154
        do_unaligned_access:
155
            /* slow unaligned access (it spans two pages) */
156
            addr1 = addr & ~(DATA_SIZE - 1);
157
            addr2 = addr1 + DATA_SIZE;
158
            res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, 
159
                                                          is_user, retaddr);
160
            res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, 
161
                                                          is_user, retaddr);
162
            shift = (addr & (DATA_SIZE - 1)) * 8;
163
#ifdef TARGET_WORDS_BIGENDIAN
164
            res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
165
#else
166
            res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
167
#endif
168
            res = (DATA_TYPE)res;
169
        } else {
170
            /* unaligned/aligned access in the same page */
171
            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
172
        }
173
    } else {
174
        /* the page is not in the TLB : fill it */
175
        tlb_fill(addr, 0, is_user, retaddr);
176
        goto redo;
177
    }
178
    return res;
179
}
180

    
181

    
182
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, 
183
                                                    DATA_TYPE val,
184
                                                    int is_user)
185
{
186
    unsigned long physaddr, tlb_addr;
187
    void *retaddr;
188
    int index;
189
    
190
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
191
 redo:
192
    tlb_addr = env->tlb_write[is_user][index].address;
193
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
194
        physaddr = addr + env->tlb_write[is_user][index].addend;
195
        if (tlb_addr & ~TARGET_PAGE_MASK) {
196
            /* IO access */
197
            if ((addr & (DATA_SIZE - 1)) != 0)
198
                goto do_unaligned_access;
199
            retaddr = GETPC();
200
            glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
201
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
202
        do_unaligned_access:
203
            retaddr = GETPC();
204
            glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, 
205
                                                   is_user, retaddr);
206
        } else {
207
            /* aligned/unaligned access in the same page */
208
            glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
209
        }
210
    } else {
211
        /* the page is not in the TLB : fill it */
212
        retaddr = GETPC();
213
        tlb_fill(addr, 1, is_user, retaddr);
214
        goto redo;
215
    }
216
}
217

    
218
/* handles all unaligned cases */
219
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, 
220
                                                   DATA_TYPE val,
221
                                                   int is_user,
222
                                                   void *retaddr)
223
{
224
    unsigned long physaddr, tlb_addr;
225
    int index, i;
226

    
227
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
228
 redo:
229
    tlb_addr = env->tlb_write[is_user][index].address;
230
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
231
        physaddr = addr + env->tlb_write[is_user][index].addend;
232
        if (tlb_addr & ~TARGET_PAGE_MASK) {
233
            /* IO access */
234
            if ((addr & (DATA_SIZE - 1)) != 0)
235
                goto do_unaligned_access;
236
            glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
237
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
238
        do_unaligned_access:
239
            /* XXX: not efficient, but simple */
240
            for(i = 0;i < DATA_SIZE; i++) {
241
#ifdef TARGET_WORDS_BIGENDIAN
242
                glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), 
243
                                          is_user, retaddr);
244
#else
245
                glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), 
246
                                          is_user, retaddr);
247
#endif
248
            }
249
        } else {
250
            /* aligned/unaligned access in the same page */
251
            glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
252
        }
253
    } else {
254
        /* the page is not in the TLB : fill it */
255
        tlb_fill(addr, 1, is_user, retaddr);
256
        goto redo;
257
    }
258
}
259

    
260
#undef SHIFT
261
#undef DATA_TYPE
262
#undef SUFFIX
263
#undef USUFFIX
264
#undef DATA_SIZE