Statistics
| Branch: | Revision:

root / softmmu_template.h @ 7a3f1944

History | View | Annotate | Download (8.4 kB)

1
/*
2
 *  Software MMU support
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define DATA_SIZE (1 << SHIFT)
21

    
22
#if DATA_SIZE == 8
23
#define SUFFIX q
24
#define DATA_TYPE uint64_t
25
#elif DATA_SIZE == 4
26
#define SUFFIX l
27
#define DATA_TYPE uint32_t
28
#elif DATA_SIZE == 2
29
#define SUFFIX w
30
#define DATA_TYPE uint16_t
31
#elif DATA_SIZE == 1
32
#define SUFFIX b
33
#define DATA_TYPE uint8_t
34
#else
35
#error unsupported data size
36
#endif
37

    
38
static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr);
39
static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
40
                                  void *retaddr);
41

    
42
static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, 
43
                                              unsigned long tlb_addr)
44
{
45
    DATA_TYPE res;
46
    int index;
47

    
48
    index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
49
#if SHIFT <= 2
50
    res = io_mem_read[index][SHIFT](physaddr);
51
#else
52
#ifdef TARGET_WORDS_BIGENDIAN
53
    res = (uint64_t)io_mem_read[index][2](physaddr) << 32;
54
    res |= io_mem_read[index][2](physaddr + 4);
55
#else
56
    res = io_mem_read[index][2](physaddr);
57
    res |= (uint64_t)io_mem_read[index][2](physaddr + 4) << 32;
58
#endif
59
#endif /* SHIFT > 2 */
60
    return res;
61
}
62

    
63
static inline void glue(io_write, SUFFIX)(unsigned long physaddr, 
64
                                          DATA_TYPE val,
65
                                          unsigned long tlb_addr)
66
{
67
    int index;
68

    
69
    index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
70
#if SHIFT <= 2
71
    io_mem_write[index][SHIFT](physaddr, val);
72
#else
73
#ifdef TARGET_WORDS_BIGENDIAN
74
    io_mem_write[index][2](physaddr, val >> 32);
75
    io_mem_write[index][2](physaddr + 4, val);
76
#else
77
    io_mem_write[index][2](physaddr, val);
78
    io_mem_write[index][2](physaddr + 4, val >> 32);
79
#endif
80
#endif /* SHIFT > 2 */
81
}
82

    
83
/* handle all cases except unaligned access which span two pages */
84
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr)
85
{
86
    DATA_TYPE res;
87
    int is_user, index;
88
    unsigned long physaddr, tlb_addr;
89
    void *retaddr;
90
    
91
    /* test if there is match for unaligned or IO access */
92
    /* XXX: could done more in memory macro in a non portable way */
93
    is_user = ((env->hflags & HF_CPL_MASK) == 3);
94
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
95
 redo:
96
    tlb_addr = env->tlb_read[is_user][index].address;
97
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
98
        physaddr = addr + env->tlb_read[is_user][index].addend;
99
        if (tlb_addr & ~TARGET_PAGE_MASK) {
100
            /* IO access */
101
            if ((addr & (DATA_SIZE - 1)) != 0)
102
                goto do_unaligned_access;
103
            res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
104
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
105
            /* slow unaligned access (it spans two pages or IO) */
106
        do_unaligned_access:
107
            retaddr = __builtin_return_address(0);
108
            res = glue(slow_ld, SUFFIX)(addr, retaddr);
109
        } else {
110
            /* unaligned access in the same page */
111
            res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
112
        }
113
    } else {
114
        /* the page is not in the TLB : fill it */
115
        retaddr = __builtin_return_address(0);
116
        tlb_fill(addr, 0, retaddr);
117
        goto redo;
118
    }
119
    return res;
120
}
121

    
122
/* handle all unaligned cases */
123
static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
124
{
125
    DATA_TYPE res, res1, res2;
126
    int is_user, index, shift;
127
    unsigned long physaddr, tlb_addr, addr1, addr2;
128

    
129
    is_user = ((env->hflags & HF_CPL_MASK) == 3);
130
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
131
 redo:
132
    tlb_addr = env->tlb_read[is_user][index].address;
133
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
134
        physaddr = addr + env->tlb_read[is_user][index].addend;
135
        if (tlb_addr & ~TARGET_PAGE_MASK) {
136
            /* IO access */
137
            if ((addr & (DATA_SIZE - 1)) != 0)
138
                goto do_unaligned_access;
139
            res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
140
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
141
        do_unaligned_access:
142
            /* slow unaligned access (it spans two pages) */
143
            addr1 = addr & ~(DATA_SIZE - 1);
144
            addr2 = addr1 + DATA_SIZE;
145
            res1 = glue(slow_ld, SUFFIX)(addr1, retaddr);
146
            res2 = glue(slow_ld, SUFFIX)(addr2, retaddr);
147
            shift = (addr & (DATA_SIZE - 1)) * 8;
148
#ifdef TARGET_WORDS_BIGENDIAN
149
            res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
150
#else
151
            res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
152
#endif
153
        } else {
154
            /* unaligned/aligned access in the same page */
155
            res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
156
        }
157
    } else {
158
        /* the page is not in the TLB : fill it */
159
        tlb_fill(addr, 0, retaddr);
160
        goto redo;
161
    }
162
    return res;
163
}
164

    
165

    
166
void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val)
167
{
168
    unsigned long physaddr, tlb_addr;
169
    void *retaddr;
170
    int is_user, index;
171
    
172
    is_user = ((env->hflags & HF_CPL_MASK) == 3);
173
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
174
 redo:
175
    tlb_addr = env->tlb_write[is_user][index].address;
176
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
177
        physaddr = addr + env->tlb_read[is_user][index].addend;
178
        if (tlb_addr & ~TARGET_PAGE_MASK) {
179
            /* IO access */
180
            if ((addr & (DATA_SIZE - 1)) != 0)
181
                goto do_unaligned_access;
182
            glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
183
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
184
        do_unaligned_access:
185
            retaddr = __builtin_return_address(0);
186
            glue(slow_st, SUFFIX)(addr, val, retaddr);
187
        } else {
188
            /* aligned/unaligned access in the same page */
189
            glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
190
        }
191
    } else {
192
        /* the page is not in the TLB : fill it */
193
        retaddr = __builtin_return_address(0);
194
        tlb_fill(addr, 1, retaddr);
195
        goto redo;
196
    }
197
}
198

    
199
/* handles all unaligned cases */
200
static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
201
                                  void *retaddr)
202
{
203
    unsigned long physaddr, tlb_addr;
204
    int is_user, index, i;
205

    
206
    is_user = ((env->hflags & HF_CPL_MASK) == 3);
207
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
208
 redo:
209
    tlb_addr = env->tlb_write[is_user][index].address;
210
    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
211
        physaddr = addr + env->tlb_read[is_user][index].addend;
212
        if (tlb_addr & ~TARGET_PAGE_MASK) {
213
            /* IO access */
214
            if ((addr & (DATA_SIZE - 1)) != 0)
215
                goto do_unaligned_access;
216
            glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
217
        } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
218
        do_unaligned_access:
219
            /* XXX: not efficient, but simple */
220
            for(i = 0;i < DATA_SIZE; i++) {
221
#ifdef TARGET_WORDS_BIGENDIAN
222
                slow_stb(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), retaddr);
223
#else
224
                slow_stb(addr + i, val >> (i * 8), retaddr);
225
#endif
226
            }
227
        } else {
228
            /* aligned/unaligned access in the same page */
229
            glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
230
        }
231
    } else {
232
        /* the page is not in the TLB : fill it */
233
        tlb_fill(addr, 1, retaddr);
234
        goto redo;
235
    }
236
}
237

    
238
#undef SHIFT
239
#undef DATA_TYPE
240
#undef SUFFIX
241
#undef DATA_SIZE