Statistics
| Branch: | Revision:

root / softmmu_template.h @ aa102231

History | View | Annotate | Download (12.2 kB)

1
/*
2
 *  Software MMU support
3
 *
4
 * Generate helpers used by TCG for qemu_ld/st ops and code load
5
 * functions.
6
 *
7
 * Included from target op helpers and exec.c.
8
 *
9
 *  Copyright (c) 2003 Fabrice Bellard
10
 *
11
 * This library is free software; you can redistribute it and/or
12
 * modify it under the terms of the GNU Lesser General Public
13
 * License as published by the Free Software Foundation; either
14
 * version 2 of the License, or (at your option) any later version.
15
 *
16
 * This library is distributed in the hope that it will be useful,
17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19
 * Lesser General Public License for more details.
20
 *
21
 * You should have received a copy of the GNU Lesser General Public
22
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23
 */
24
#include "qemu-timer.h"
25
#include "memory.h"
26

    
27
#define DATA_SIZE (1 << SHIFT)
28

    
29
#if DATA_SIZE == 8
30
#define SUFFIX q
31
#define USUFFIX q
32
#define DATA_TYPE uint64_t
33
#elif DATA_SIZE == 4
34
#define SUFFIX l
35
#define USUFFIX l
36
#define DATA_TYPE uint32_t
37
#elif DATA_SIZE == 2
38
#define SUFFIX w
39
#define USUFFIX uw
40
#define DATA_TYPE uint16_t
41
#elif DATA_SIZE == 1
42
#define SUFFIX b
43
#define USUFFIX ub
44
#define DATA_TYPE uint8_t
45
#else
46
#error unsupported data size
47
#endif
48

    
49
#ifdef SOFTMMU_CODE_ACCESS
50
#define READ_ACCESS_TYPE 2
51
#define ADDR_READ addr_code
52
#else
53
#define READ_ACCESS_TYPE 0
54
#define ADDR_READ addr_read
55
#endif
56

    
57
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
58
                                                        int mmu_idx,
59
                                                        void *retaddr);
60
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
61
                                              target_ulong addr,
62
                                              void *retaddr)
63
{
64
    DATA_TYPE res;
65
    int index;
66
    index = physaddr & (IO_MEM_NB_ENTRIES - 1);
67
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
68
    env->mem_io_pc = (unsigned long)retaddr;
69
    if (index != io_mem_ram.ram_addr && index != io_mem_rom.ram_addr
70
        && index != io_mem_unassigned.ram_addr
71
        && index != io_mem_notdirty.ram_addr
72
            && !can_do_io(env)) {
73
        cpu_io_recompile(env, retaddr);
74
    }
75

    
76
    env->mem_io_vaddr = addr;
77
#if SHIFT <= 2
78
    res = io_mem_read(index, physaddr, 1 << SHIFT);
79
#else
80
#ifdef TARGET_WORDS_BIGENDIAN
81
    res = io_mem_read(index, physaddr, 4) << 32;
82
    res |= io_mem_read(index, physaddr + 4, 4);
83
#else
84
    res = io_mem_read(index, physaddr, 4);
85
    res |= io_mem_read(index, physaddr + 4, 4) << 32;
86
#endif
87
#endif /* SHIFT > 2 */
88
    return res;
89
}
90

    
91
/* handle all cases except unaligned access which span two pages */
92
DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
93
                                                      int mmu_idx)
94
{
95
    DATA_TYPE res;
96
    int index;
97
    target_ulong tlb_addr;
98
    target_phys_addr_t ioaddr;
99
    unsigned long addend;
100
    void *retaddr;
101

    
102
    /* test if there is match for unaligned or IO access */
103
    /* XXX: could done more in memory macro in a non portable way */
104
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
105
 redo:
106
    tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
107
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
108
        if (tlb_addr & ~TARGET_PAGE_MASK) {
109
            /* IO access */
110
            if ((addr & (DATA_SIZE - 1)) != 0)
111
                goto do_unaligned_access;
112
            retaddr = GETPC();
113
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
114
            res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
115
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
116
            /* slow unaligned access (it spans two pages or IO) */
117
        do_unaligned_access:
118
            retaddr = GETPC();
119
#ifdef ALIGNED_ONLY
120
            do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
121
#endif
122
            res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
123
                                                         mmu_idx, retaddr);
124
        } else {
125
            /* unaligned/aligned access in the same page */
126
#ifdef ALIGNED_ONLY
127
            if ((addr & (DATA_SIZE - 1)) != 0) {
128
                retaddr = GETPC();
129
                do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
130
            }
131
#endif
132
            addend = env->tlb_table[mmu_idx][index].addend;
133
            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
134
        }
135
    } else {
136
        /* the page is not in the TLB : fill it */
137
        retaddr = GETPC();
138
#ifdef ALIGNED_ONLY
139
        if ((addr & (DATA_SIZE - 1)) != 0)
140
            do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
141
#endif
142
        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
143
        goto redo;
144
    }
145
    return res;
146
}
147

    
148
/* handle all unaligned cases */
149
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
150
                                                        int mmu_idx,
151
                                                        void *retaddr)
152
{
153
    DATA_TYPE res, res1, res2;
154
    int index, shift;
155
    target_phys_addr_t ioaddr;
156
    unsigned long addend;
157
    target_ulong tlb_addr, addr1, addr2;
158

    
159
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
160
 redo:
161
    tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
162
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
163
        if (tlb_addr & ~TARGET_PAGE_MASK) {
164
            /* IO access */
165
            if ((addr & (DATA_SIZE - 1)) != 0)
166
                goto do_unaligned_access;
167
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
168
            res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
169
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
170
        do_unaligned_access:
171
            /* slow unaligned access (it spans two pages) */
172
            addr1 = addr & ~(DATA_SIZE - 1);
173
            addr2 = addr1 + DATA_SIZE;
174
            res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
175
                                                          mmu_idx, retaddr);
176
            res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
177
                                                          mmu_idx, retaddr);
178
            shift = (addr & (DATA_SIZE - 1)) * 8;
179
#ifdef TARGET_WORDS_BIGENDIAN
180
            res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
181
#else
182
            res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
183
#endif
184
            res = (DATA_TYPE)res;
185
        } else {
186
            /* unaligned/aligned access in the same page */
187
            addend = env->tlb_table[mmu_idx][index].addend;
188
            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
189
        }
190
    } else {
191
        /* the page is not in the TLB : fill it */
192
        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
193
        goto redo;
194
    }
195
    return res;
196
}
197

    
198
#ifndef SOFTMMU_CODE_ACCESS
199

    
200
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
201
                                                   DATA_TYPE val,
202
                                                   int mmu_idx,
203
                                                   void *retaddr);
204

    
205
static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
206
                                          DATA_TYPE val,
207
                                          target_ulong addr,
208
                                          void *retaddr)
209
{
210
    int index;
211
    index = physaddr & (IO_MEM_NB_ENTRIES - 1);
212
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
213
    if (index != io_mem_ram.ram_addr && index != io_mem_rom.ram_addr
214
        && index != io_mem_unassigned.ram_addr
215
        && index != io_mem_notdirty.ram_addr
216
            && !can_do_io(env)) {
217
        cpu_io_recompile(env, retaddr);
218
    }
219

    
220
    env->mem_io_vaddr = addr;
221
    env->mem_io_pc = (unsigned long)retaddr;
222
#if SHIFT <= 2
223
    io_mem_write(index, physaddr, val, 1 << SHIFT);
224
#else
225
#ifdef TARGET_WORDS_BIGENDIAN
226
    io_mem_write(index, physaddr, (val >> 32), 4);
227
    io_mem_write(index, physaddr + 4, (uint32_t)val, 4);
228
#else
229
    io_mem_write(index, physaddr, (uint32_t)val, 4);
230
    io_mem_write(index, physaddr + 4, val >> 32, 4);
231
#endif
232
#endif /* SHIFT > 2 */
233
}
234

    
235
void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
236
                                                 DATA_TYPE val,
237
                                                 int mmu_idx)
238
{
239
    target_phys_addr_t ioaddr;
240
    unsigned long addend;
241
    target_ulong tlb_addr;
242
    void *retaddr;
243
    int index;
244

    
245
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
246
 redo:
247
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
248
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
249
        if (tlb_addr & ~TARGET_PAGE_MASK) {
250
            /* IO access */
251
            if ((addr & (DATA_SIZE - 1)) != 0)
252
                goto do_unaligned_access;
253
            retaddr = GETPC();
254
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
255
            glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
256
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
257
        do_unaligned_access:
258
            retaddr = GETPC();
259
#ifdef ALIGNED_ONLY
260
            do_unaligned_access(addr, 1, mmu_idx, retaddr);
261
#endif
262
            glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
263
                                                   mmu_idx, retaddr);
264
        } else {
265
            /* aligned/unaligned access in the same page */
266
#ifdef ALIGNED_ONLY
267
            if ((addr & (DATA_SIZE - 1)) != 0) {
268
                retaddr = GETPC();
269
                do_unaligned_access(addr, 1, mmu_idx, retaddr);
270
            }
271
#endif
272
            addend = env->tlb_table[mmu_idx][index].addend;
273
            glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
274
        }
275
    } else {
276
        /* the page is not in the TLB : fill it */
277
        retaddr = GETPC();
278
#ifdef ALIGNED_ONLY
279
        if ((addr & (DATA_SIZE - 1)) != 0)
280
            do_unaligned_access(addr, 1, mmu_idx, retaddr);
281
#endif
282
        tlb_fill(env, addr, 1, mmu_idx, retaddr);
283
        goto redo;
284
    }
285
}
286

    
287
/* handles all unaligned cases */
288
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
289
                                                   DATA_TYPE val,
290
                                                   int mmu_idx,
291
                                                   void *retaddr)
292
{
293
    target_phys_addr_t ioaddr;
294
    unsigned long addend;
295
    target_ulong tlb_addr;
296
    int index, i;
297

    
298
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
299
 redo:
300
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
301
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
302
        if (tlb_addr & ~TARGET_PAGE_MASK) {
303
            /* IO access */
304
            if ((addr & (DATA_SIZE - 1)) != 0)
305
                goto do_unaligned_access;
306
            ioaddr = section_to_ioaddr(env->iotlb[mmu_idx][index]);
307
            glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
308
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
309
        do_unaligned_access:
310
            /* XXX: not efficient, but simple */
311
            /* Note: relies on the fact that tlb_fill() does not remove the
312
             * previous page from the TLB cache.  */
313
            for(i = DATA_SIZE - 1; i >= 0; i--) {
314
#ifdef TARGET_WORDS_BIGENDIAN
315
                glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
316
                                          mmu_idx, retaddr);
317
#else
318
                glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
319
                                          mmu_idx, retaddr);
320
#endif
321
            }
322
        } else {
323
            /* aligned/unaligned access in the same page */
324
            addend = env->tlb_table[mmu_idx][index].addend;
325
            glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
326
        }
327
    } else {
328
        /* the page is not in the TLB : fill it */
329
        tlb_fill(env, addr, 1, mmu_idx, retaddr);
330
        goto redo;
331
    }
332
}
333

    
334
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
335

    
336
#undef READ_ACCESS_TYPE
337
#undef SHIFT
338
#undef DATA_TYPE
339
#undef SUFFIX
340
#undef USUFFIX
341
#undef DATA_SIZE
342
#undef ADDR_READ