Statistics
| Branch: | Revision:

root / target-ppc / mem_helper.c @ 1de7afc9

History | View | Annotate | Download (9.1 kB)

1
/*
2
 *  PowerPC memory access emulation helpers for QEMU.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "cpu.h"
20
#include "qemu/host-utils.h"
21
#include "helper.h"
22

    
23
#include "helper_regs.h"
24

    
25
#if !defined(CONFIG_USER_ONLY)
26
#include "exec/softmmu_exec.h"
27
#endif /* !defined(CONFIG_USER_ONLY) */
28

    
29
//#define DEBUG_OP
30

    
31
/*****************************************************************************/
32
/* Memory load and stores */
33

    
34
static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
35
                                    target_long arg)
36
{
37
#if defined(TARGET_PPC64)
38
    if (!msr_is_64bit(env, env->msr)) {
39
        return (uint32_t)(addr + arg);
40
    } else
41
#endif
42
    {
43
        return addr + arg;
44
    }
45
}
46

    
47
void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
48
{
49
    for (; reg < 32; reg++) {
50
        if (msr_le) {
51
            env->gpr[reg] = bswap32(cpu_ldl_data(env, addr));
52
        } else {
53
            env->gpr[reg] = cpu_ldl_data(env, addr);
54
        }
55
        addr = addr_add(env, addr, 4);
56
    }
57
}
58

    
59
void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
60
{
61
    for (; reg < 32; reg++) {
62
        if (msr_le) {
63
            cpu_stl_data(env, addr, bswap32((uint32_t)env->gpr[reg]));
64
        } else {
65
            cpu_stl_data(env, addr, (uint32_t)env->gpr[reg]);
66
        }
67
        addr = addr_add(env, addr, 4);
68
    }
69
}
70

    
71
void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
72
{
73
    int sh;
74

    
75
    for (; nb > 3; nb -= 4) {
76
        env->gpr[reg] = cpu_ldl_data(env, addr);
77
        reg = (reg + 1) % 32;
78
        addr = addr_add(env, addr, 4);
79
    }
80
    if (unlikely(nb > 0)) {
81
        env->gpr[reg] = 0;
82
        for (sh = 24; nb > 0; nb--, sh -= 8) {
83
            env->gpr[reg] |= cpu_ldub_data(env, addr) << sh;
84
            addr = addr_add(env, addr, 1);
85
        }
86
    }
87
}
88
/* PPC32 specification says we must generate an exception if
89
 * rA is in the range of registers to be loaded.
90
 * In an other hand, IBM says this is valid, but rA won't be loaded.
91
 * For now, I'll follow the spec...
92
 */
93
void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
94
                 uint32_t ra, uint32_t rb)
95
{
96
    if (likely(xer_bc != 0)) {
97
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
98
                     (reg < rb && (reg + xer_bc) > rb))) {
99
            helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
100
                                       POWERPC_EXCP_INVAL |
101
                                       POWERPC_EXCP_INVAL_LSWX);
102
        } else {
103
            helper_lsw(env, addr, xer_bc, reg);
104
        }
105
    }
106
}
107

    
108
void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
109
                 uint32_t reg)
110
{
111
    int sh;
112

    
113
    for (; nb > 3; nb -= 4) {
114
        cpu_stl_data(env, addr, env->gpr[reg]);
115
        reg = (reg + 1) % 32;
116
        addr = addr_add(env, addr, 4);
117
    }
118
    if (unlikely(nb > 0)) {
119
        for (sh = 24; nb > 0; nb--, sh -= 8) {
120
            cpu_stb_data(env, addr, (env->gpr[reg] >> sh) & 0xFF);
121
            addr = addr_add(env, addr, 1);
122
        }
123
    }
124
}
125

    
126
static void do_dcbz(CPUPPCState *env, target_ulong addr, int dcache_line_size)
127
{
128
    int i;
129

    
130
    addr &= ~(dcache_line_size - 1);
131
    for (i = 0; i < dcache_line_size; i += 4) {
132
        cpu_stl_data(env, addr + i, 0);
133
    }
134
    if (env->reserve_addr == addr) {
135
        env->reserve_addr = (target_ulong)-1ULL;
136
    }
137
}
138

    
139
void helper_dcbz(CPUPPCState *env, target_ulong addr)
140
{
141
    do_dcbz(env, addr, env->dcache_line_size);
142
}
143

    
144
void helper_dcbz_970(CPUPPCState *env, target_ulong addr)
145
{
146
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
147
        do_dcbz(env, addr, 32);
148
    } else {
149
        do_dcbz(env, addr, env->dcache_line_size);
150
    }
151
}
152

    
153
void helper_icbi(CPUPPCState *env, target_ulong addr)
154
{
155
    addr &= ~(env->dcache_line_size - 1);
156
    /* Invalidate one cache line :
157
     * PowerPC specification says this is to be treated like a load
158
     * (not a fetch) by the MMU. To be sure it will be so,
159
     * do the load "by hand".
160
     */
161
    cpu_ldl_data(env, addr);
162
}
163

    
164
/* XXX: to be tested */
165
target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
166
                          uint32_t ra, uint32_t rb)
167
{
168
    int i, c, d;
169

    
170
    d = 24;
171
    for (i = 0; i < xer_bc; i++) {
172
        c = cpu_ldub_data(env, addr);
173
        addr = addr_add(env, addr, 1);
174
        /* ra (if not 0) and rb are never modified */
175
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
176
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
177
        }
178
        if (unlikely(c == xer_cmp)) {
179
            break;
180
        }
181
        if (likely(d != 0)) {
182
            d -= 8;
183
        } else {
184
            d = 24;
185
            reg++;
186
            reg = reg & 0x1F;
187
        }
188
    }
189
    return i;
190
}
191

    
192
/*****************************************************************************/
193
/* Altivec extension helpers */
194
#if defined(HOST_WORDS_BIGENDIAN)
195
#define HI_IDX 0
196
#define LO_IDX 1
197
#else
198
#define HI_IDX 1
199
#define LO_IDX 0
200
#endif
201

    
202
#define LVE(name, access, swap, element)                        \
203
    void helper_##name(CPUPPCState *env, ppc_avr_t *r,          \
204
                       target_ulong addr)                       \
205
    {                                                           \
206
        size_t n_elems = ARRAY_SIZE(r->element);                \
207
        int adjust = HI_IDX*(n_elems - 1);                      \
208
        int sh = sizeof(r->element[0]) >> 1;                    \
209
        int index = (addr & 0xf) >> sh;                         \
210
                                                                \
211
        if (msr_le) {                                           \
212
            r->element[LO_IDX ? index : (adjust - index)] =     \
213
                swap(access(env, addr));                        \
214
        } else {                                                \
215
            r->element[LO_IDX ? index : (adjust - index)] =     \
216
                access(env, addr);                              \
217
        }                                                       \
218
    }
219
#define I(x) (x)
220
LVE(lvebx, cpu_ldub_data, I, u8)
221
LVE(lvehx, cpu_lduw_data, bswap16, u16)
222
LVE(lvewx, cpu_ldl_data, bswap32, u32)
223
#undef I
224
#undef LVE
225

    
226
#define STVE(name, access, swap, element)                               \
227
    void helper_##name(CPUPPCState *env, ppc_avr_t *r,                  \
228
                       target_ulong addr)                               \
229
    {                                                                   \
230
        size_t n_elems = ARRAY_SIZE(r->element);                        \
231
        int adjust = HI_IDX * (n_elems - 1);                            \
232
        int sh = sizeof(r->element[0]) >> 1;                            \
233
        int index = (addr & 0xf) >> sh;                                 \
234
                                                                        \
235
        if (msr_le) {                                                   \
236
            access(env, addr, swap(r->element[LO_IDX ? index :          \
237
                                              (adjust - index)]));      \
238
        } else {                                                        \
239
            access(env, addr, r->element[LO_IDX ? index :               \
240
                                         (adjust - index)]);            \
241
        }                                                               \
242
    }
243
#define I(x) (x)
244
STVE(stvebx, cpu_stb_data, I, u8)
245
STVE(stvehx, cpu_stw_data, bswap16, u16)
246
STVE(stvewx, cpu_stl_data, bswap32, u32)
247
#undef I
248
#undef LVE
249

    
250
#undef HI_IDX
251
#undef LO_IDX
252

    
253
/*****************************************************************************/
254
/* Softmmu support */
255
#if !defined(CONFIG_USER_ONLY)
256

    
257
#define MMUSUFFIX _mmu
258

    
259
#define SHIFT 0
260
#include "exec/softmmu_template.h"
261

    
262
#define SHIFT 1
263
#include "exec/softmmu_template.h"
264

    
265
#define SHIFT 2
266
#include "exec/softmmu_template.h"
267

    
268
#define SHIFT 3
269
#include "exec/softmmu_template.h"
270

    
271
/* try to fill the TLB and return an exception if error. If retaddr is
272
   NULL, it means that the function was called in C code (i.e. not
273
   from generated code or from helper.c) */
274
/* XXX: fix it to restore all registers */
275
void tlb_fill(CPUPPCState *env, target_ulong addr, int is_write, int mmu_idx,
276
              uintptr_t retaddr)
277
{
278
    int ret;
279

    
280
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx);
281
    if (unlikely(ret != 0)) {
282
        if (likely(retaddr)) {
283
            /* now we have a real cpu fault */
284
            cpu_restore_state(env, retaddr);
285
        }
286
        helper_raise_exception_err(env, env->exception_index, env->error_code);
287
    }
288
}
289
#endif /* !CONFIG_USER_ONLY */