root / tcg / hppa / tcg-target.c @ 3e1f46ea
History | View | Annotate | Download (51.9 kB)
1 |
/*
|
---|---|
2 |
* Tiny Code Generator for QEMU
|
3 |
*
|
4 |
* Copyright (c) 2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#ifndef NDEBUG
|
26 |
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
27 |
"%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7", |
28 |
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", |
29 |
"%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23", |
30 |
"%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31", |
31 |
}; |
32 |
#endif
|
33 |
|
34 |
/* This is an 8 byte temp slot in the stack frame. */
|
35 |
#define STACK_TEMP_OFS -16 |
36 |
|
37 |
#ifndef GUEST_BASE
|
38 |
#define GUEST_BASE 0 |
39 |
#endif
|
40 |
|
41 |
#ifdef CONFIG_USE_GUEST_BASE
|
42 |
#define TCG_GUEST_BASE_REG TCG_REG_R16
|
43 |
#else
|
44 |
#define TCG_GUEST_BASE_REG TCG_REG_R0
|
45 |
#endif
|
46 |
|
47 |
static const int tcg_target_reg_alloc_order[] = { |
48 |
TCG_REG_R4, |
49 |
TCG_REG_R5, |
50 |
TCG_REG_R6, |
51 |
TCG_REG_R7, |
52 |
TCG_REG_R8, |
53 |
TCG_REG_R9, |
54 |
TCG_REG_R10, |
55 |
TCG_REG_R11, |
56 |
TCG_REG_R12, |
57 |
TCG_REG_R13, |
58 |
|
59 |
TCG_REG_R17, |
60 |
TCG_REG_R14, |
61 |
TCG_REG_R15, |
62 |
TCG_REG_R16, |
63 |
|
64 |
TCG_REG_R26, |
65 |
TCG_REG_R25, |
66 |
TCG_REG_R24, |
67 |
TCG_REG_R23, |
68 |
|
69 |
TCG_REG_RET0, |
70 |
TCG_REG_RET1, |
71 |
}; |
72 |
|
73 |
static const int tcg_target_call_iarg_regs[4] = { |
74 |
TCG_REG_R26, |
75 |
TCG_REG_R25, |
76 |
TCG_REG_R24, |
77 |
TCG_REG_R23, |
78 |
}; |
79 |
|
80 |
static const int tcg_target_call_oarg_regs[2] = { |
81 |
TCG_REG_RET0, |
82 |
TCG_REG_RET1, |
83 |
}; |
84 |
|
85 |
/* True iff val fits a signed field of width BITS. */
|
86 |
static inline int check_fit_tl(tcg_target_long val, unsigned int bits) |
87 |
{ |
88 |
return (val << ((sizeof(tcg_target_long) * 8 - bits)) |
89 |
>> (sizeof(tcg_target_long) * 8 - bits)) == val; |
90 |
} |
91 |
|
92 |
/* True iff depi can be used to compute (reg | MASK).
|
93 |
Accept a bit pattern like:
|
94 |
0....01....1
|
95 |
1....10....0
|
96 |
0..01..10..0
|
97 |
Copied from gcc sources. */
|
98 |
static inline int or_mask_p(tcg_target_ulong mask) |
99 |
{ |
100 |
mask += mask & -mask; |
101 |
return (mask & (mask - 1)) == 0; |
102 |
} |
103 |
|
104 |
/* True iff depi or extru can be used to compute (reg & mask).
|
105 |
Accept a bit pattern like these:
|
106 |
0....01....1
|
107 |
1....10....0
|
108 |
1..10..01..1
|
109 |
Copied from gcc sources. */
|
110 |
static inline int and_mask_p(tcg_target_ulong mask) |
111 |
{ |
112 |
return or_mask_p(~mask);
|
113 |
} |
114 |
|
115 |
static int low_sign_ext(int val, int len) |
116 |
{ |
117 |
return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1)); |
118 |
} |
119 |
|
120 |
static int reassemble_12(int as12) |
121 |
{ |
122 |
return (((as12 & 0x800) >> 11) | |
123 |
((as12 & 0x400) >> 8) | |
124 |
((as12 & 0x3ff) << 3)); |
125 |
} |
126 |
|
127 |
static int reassemble_17(int as17) |
128 |
{ |
129 |
return (((as17 & 0x10000) >> 16) | |
130 |
((as17 & 0x0f800) << 5) | |
131 |
((as17 & 0x00400) >> 8) | |
132 |
((as17 & 0x003ff) << 3)); |
133 |
} |
134 |
|
135 |
static int reassemble_21(int as21) |
136 |
{ |
137 |
return (((as21 & 0x100000) >> 20) | |
138 |
((as21 & 0x0ffe00) >> 8) | |
139 |
((as21 & 0x000180) << 7) | |
140 |
((as21 & 0x00007c) << 14) | |
141 |
((as21 & 0x000003) << 12)); |
142 |
} |
143 |
|
144 |
/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
|
145 |
such relocations are simply fully handled by the assembler. */
|
146 |
#define R_PARISC_PCREL12F R_PARISC_NONE
|
147 |
|
148 |
static void patch_reloc(uint8_t *code_ptr, int type, |
149 |
tcg_target_long value, tcg_target_long addend) |
150 |
{ |
151 |
uint32_t *insn_ptr = (uint32_t *)code_ptr; |
152 |
uint32_t insn = *insn_ptr; |
153 |
tcg_target_long pcrel; |
154 |
|
155 |
value += addend; |
156 |
pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2; |
157 |
|
158 |
switch (type) {
|
159 |
case R_PARISC_PCREL12F:
|
160 |
assert(check_fit_tl(pcrel, 12));
|
161 |
/* ??? We assume all patches are forward. See tcg_out_brcond
|
162 |
re setting the NUL bit on the branch and eliding the nop. */
|
163 |
assert(pcrel >= 0);
|
164 |
insn &= ~0x1ffdu;
|
165 |
insn |= reassemble_12(pcrel); |
166 |
break;
|
167 |
case R_PARISC_PCREL17F:
|
168 |
assert(check_fit_tl(pcrel, 17));
|
169 |
insn &= ~0x1f1ffdu;
|
170 |
insn |= reassemble_17(pcrel); |
171 |
break;
|
172 |
default:
|
173 |
tcg_abort(); |
174 |
} |
175 |
|
176 |
*insn_ptr = insn; |
177 |
} |
178 |
|
179 |
/* maximum number of register used for input function arguments */
|
180 |
static inline int tcg_target_get_call_iarg_regs_count(int flags) |
181 |
{ |
182 |
return 4; |
183 |
} |
184 |
|
185 |
/* parse target specific constraints */
|
186 |
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
187 |
{ |
188 |
const char *ct_str; |
189 |
|
190 |
ct_str = *pct_str; |
191 |
switch (ct_str[0]) { |
192 |
case 'r': |
193 |
ct->ct |= TCG_CT_REG; |
194 |
tcg_regset_set32(ct->u.regs, 0, 0xffffffff); |
195 |
break;
|
196 |
case 'L': /* qemu_ld/st constraint */ |
197 |
ct->ct |= TCG_CT_REG; |
198 |
tcg_regset_set32(ct->u.regs, 0, 0xffffffff); |
199 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26); |
200 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25); |
201 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24); |
202 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23); |
203 |
break;
|
204 |
case 'Z': |
205 |
ct->ct |= TCG_CT_CONST_0; |
206 |
break;
|
207 |
case 'I': |
208 |
ct->ct |= TCG_CT_CONST_S11; |
209 |
break;
|
210 |
case 'J': |
211 |
ct->ct |= TCG_CT_CONST_S5; |
212 |
break;
|
213 |
case 'K': |
214 |
ct->ct |= TCG_CT_CONST_MS11; |
215 |
break;
|
216 |
default:
|
217 |
return -1; |
218 |
} |
219 |
ct_str++; |
220 |
*pct_str = ct_str; |
221 |
return 0; |
222 |
} |
223 |
|
224 |
/* test if a constant matches the constraint */
|
225 |
static int tcg_target_const_match(tcg_target_long val, |
226 |
const TCGArgConstraint *arg_ct)
|
227 |
{ |
228 |
int ct = arg_ct->ct;
|
229 |
if (ct & TCG_CT_CONST) {
|
230 |
return 1; |
231 |
} else if (ct & TCG_CT_CONST_0) { |
232 |
return val == 0; |
233 |
} else if (ct & TCG_CT_CONST_S5) { |
234 |
return check_fit_tl(val, 5); |
235 |
} else if (ct & TCG_CT_CONST_S11) { |
236 |
return check_fit_tl(val, 11); |
237 |
} else if (ct & TCG_CT_CONST_MS11) { |
238 |
return check_fit_tl(-val, 11); |
239 |
} |
240 |
return 0; |
241 |
} |
242 |
|
243 |
#define INSN_OP(x) ((x) << 26) |
244 |
#define INSN_EXT3BR(x) ((x) << 13) |
245 |
#define INSN_EXT3SH(x) ((x) << 10) |
246 |
#define INSN_EXT4(x) ((x) << 6) |
247 |
#define INSN_EXT5(x) (x)
|
248 |
#define INSN_EXT6(x) ((x) << 6) |
249 |
#define INSN_EXT7(x) ((x) << 6) |
250 |
#define INSN_EXT8A(x) ((x) << 6) |
251 |
#define INSN_EXT8B(x) ((x) << 5) |
252 |
#define INSN_T(x) (x)
|
253 |
#define INSN_R1(x) ((x) << 16) |
254 |
#define INSN_R2(x) ((x) << 21) |
255 |
#define INSN_DEP_LEN(x) (32 - (x)) |
256 |
#define INSN_SHDEP_CP(x) ((31 - (x)) << 5) |
257 |
#define INSN_SHDEP_P(x) ((x) << 5) |
258 |
#define INSN_COND(x) ((x) << 13) |
259 |
#define INSN_IM11(x) low_sign_ext(x, 11) |
260 |
#define INSN_IM14(x) low_sign_ext(x, 14) |
261 |
#define INSN_IM5(x) (low_sign_ext(x, 5) << 16) |
262 |
|
263 |
#define COND_NEVER 0 |
264 |
#define COND_EQ 1 |
265 |
#define COND_LT 2 |
266 |
#define COND_LE 3 |
267 |
#define COND_LTU 4 |
268 |
#define COND_LEU 5 |
269 |
#define COND_SV 6 |
270 |
#define COND_OD 7 |
271 |
#define COND_FALSE 8 |
272 |
|
273 |
#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18)) |
274 |
#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c)) |
275 |
#define INSN_ADDI (INSN_OP(0x2d)) |
276 |
#define INSN_ADDIL (INSN_OP(0x0a)) |
277 |
#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28)) |
278 |
#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08)) |
279 |
#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00)) |
280 |
#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22)) |
281 |
#define INSN_COMICLR (INSN_OP(0x24)) |
282 |
#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3)) |
283 |
#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7)) |
284 |
#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7)) |
285 |
#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6)) |
286 |
#define INSN_LDIL (INSN_OP(0x08)) |
287 |
#define INSN_LDO (INSN_OP(0x0d)) |
288 |
#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2)) |
289 |
#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09)) |
290 |
#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2)) |
291 |
#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10)) |
292 |
#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14)) |
293 |
#define INSN_SUBI (INSN_OP(0x25)) |
294 |
#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5)) |
295 |
#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4)) |
296 |
#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0)) |
297 |
#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a)) |
298 |
#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2)) |
299 |
#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0)) |
300 |
|
301 |
#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0)) |
302 |
#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2) |
303 |
#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2)) |
304 |
#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6)) |
305 |
#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2) |
306 |
#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13)) |
307 |
|
308 |
#define INSN_LDB (INSN_OP(0x10)) |
309 |
#define INSN_LDH (INSN_OP(0x11)) |
310 |
#define INSN_LDW (INSN_OP(0x12)) |
311 |
#define INSN_LDWM (INSN_OP(0x13)) |
312 |
#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12)) |
313 |
|
314 |
#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0)) |
315 |
#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1)) |
316 |
#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2)) |
317 |
|
318 |
#define INSN_STB (INSN_OP(0x18)) |
319 |
#define INSN_STH (INSN_OP(0x19)) |
320 |
#define INSN_STW (INSN_OP(0x1a)) |
321 |
#define INSN_STWM (INSN_OP(0x1b)) |
322 |
#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12)) |
323 |
|
324 |
#define INSN_COMBT (INSN_OP(0x20)) |
325 |
#define INSN_COMBF (INSN_OP(0x22)) |
326 |
#define INSN_COMIBT (INSN_OP(0x21)) |
327 |
#define INSN_COMIBF (INSN_OP(0x23)) |
328 |
|
329 |
/* supplied by libgcc */
|
330 |
extern void *__canonicalize_funcptr_for_compare(void *); |
331 |
|
332 |
static void tcg_out_mov(TCGContext *s, int ret, int arg) |
333 |
{ |
334 |
/* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
|
335 |
but hppa-dis.c is unaware of this definition */
|
336 |
if (ret != arg) {
|
337 |
tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg) |
338 |
| INSN_R2(TCG_REG_R0)); |
339 |
} |
340 |
} |
341 |
|
342 |
static void tcg_out_movi(TCGContext *s, TCGType type, |
343 |
int ret, tcg_target_long arg)
|
344 |
{ |
345 |
if (check_fit_tl(arg, 14)) { |
346 |
tcg_out32(s, INSN_LDO | INSN_R1(ret) |
347 |
| INSN_R2(TCG_REG_R0) | INSN_IM14(arg)); |
348 |
} else {
|
349 |
uint32_t hi, lo; |
350 |
hi = arg >> 11;
|
351 |
lo = arg & 0x7ff;
|
352 |
|
353 |
tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi)); |
354 |
if (lo) {
|
355 |
tcg_out32(s, INSN_LDO | INSN_R1(ret) |
356 |
| INSN_R2(ret) | INSN_IM14(lo)); |
357 |
} |
358 |
} |
359 |
} |
360 |
|
361 |
static void tcg_out_ldst(TCGContext *s, int ret, int addr, |
362 |
tcg_target_long offset, int op)
|
363 |
{ |
364 |
if (!check_fit_tl(offset, 14)) { |
365 |
uint32_t hi, lo, op; |
366 |
|
367 |
hi = offset >> 11;
|
368 |
lo = offset & 0x7ff;
|
369 |
|
370 |
if (addr == TCG_REG_R0) {
|
371 |
op = INSN_LDIL | INSN_R2(TCG_REG_R1); |
372 |
} else {
|
373 |
op = INSN_ADDIL | INSN_R2(addr); |
374 |
} |
375 |
tcg_out32(s, op | reassemble_21(hi)); |
376 |
|
377 |
addr = TCG_REG_R1; |
378 |
offset = lo; |
379 |
} |
380 |
|
381 |
if (ret != addr || offset != 0 || op != INSN_LDO) { |
382 |
tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset)); |
383 |
} |
384 |
} |
385 |
|
386 |
/* This function is required by tcg.c. */
|
387 |
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret, |
388 |
int arg1, tcg_target_long arg2)
|
389 |
{ |
390 |
tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW); |
391 |
} |
392 |
|
393 |
/* This function is required by tcg.c. */
|
394 |
static inline void tcg_out_st(TCGContext *s, TCGType type, int ret, |
395 |
int arg1, tcg_target_long arg2)
|
396 |
{ |
397 |
tcg_out_ldst(s, ret, arg1, arg2, INSN_STW); |
398 |
} |
399 |
|
400 |
static void tcg_out_ldst_index(TCGContext *s, int data, |
401 |
int base, int index, int op) |
402 |
{ |
403 |
tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base)); |
404 |
} |
405 |
|
406 |
static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1, |
407 |
tcg_target_long val) |
408 |
{ |
409 |
tcg_out_ldst(s, ret, arg1, val, INSN_LDO); |
410 |
} |
411 |
|
412 |
/* This function is required by tcg.c. */
|
413 |
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) |
414 |
{ |
415 |
tcg_out_addi2(s, reg, reg, val); |
416 |
} |
417 |
|
418 |
static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op) |
419 |
{ |
420 |
tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2)); |
421 |
} |
422 |
|
423 |
static inline void tcg_out_arithi(TCGContext *s, int t, int r1, |
424 |
tcg_target_long val, int op)
|
425 |
{ |
426 |
assert(check_fit_tl(val, 11));
|
427 |
tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val)); |
428 |
} |
429 |
|
430 |
static inline void tcg_out_nop(TCGContext *s) |
431 |
{ |
432 |
tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR); |
433 |
} |
434 |
|
435 |
static inline void tcg_out_mtctl_sar(TCGContext *s, int arg) |
436 |
{ |
437 |
tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
|
438 |
} |
439 |
|
440 |
/* Extract LEN bits at position OFS from ARG and place in RET.
|
441 |
Note that here the bit ordering is reversed from the PA-RISC
|
442 |
standard, such that the right-most bit is 0. */
|
443 |
static inline void tcg_out_extr(TCGContext *s, int ret, int arg, |
444 |
unsigned ofs, unsigned len, int sign) |
445 |
{ |
446 |
assert(ofs < 32 && len <= 32 - ofs); |
447 |
tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU) |
448 |
| INSN_R1(ret) | INSN_R2(arg) |
449 |
| INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
|
450 |
} |
451 |
|
452 |
/* Likewise with OFS interpreted little-endian. */
|
453 |
static inline void tcg_out_dep(TCGContext *s, int ret, int arg, |
454 |
unsigned ofs, unsigned len) |
455 |
{ |
456 |
assert(ofs < 32 && len <= 32 - ofs); |
457 |
tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg) |
458 |
| INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
|
459 |
} |
460 |
|
461 |
static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo, |
462 |
unsigned count)
|
463 |
{ |
464 |
assert(count < 32);
|
465 |
tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret) |
466 |
| INSN_SHDEP_CP(count)); |
467 |
} |
468 |
|
469 |
static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg) |
470 |
{ |
471 |
tcg_out_mtctl_sar(s, creg); |
472 |
tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo)); |
473 |
} |
474 |
|
475 |
static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m) |
476 |
{ |
477 |
if (m == 0) { |
478 |
tcg_out_mov(s, ret, arg); |
479 |
} else if (m == -1) { |
480 |
tcg_out_movi(s, TCG_TYPE_I32, ret, -1);
|
481 |
} else if (or_mask_p(m)) { |
482 |
int bs0, bs1;
|
483 |
|
484 |
for (bs0 = 0; bs0 < 32; bs0++) { |
485 |
if ((m & (1u << bs0)) != 0) { |
486 |
break;
|
487 |
} |
488 |
} |
489 |
for (bs1 = bs0; bs1 < 32; bs1++) { |
490 |
if ((m & (1u << bs1)) == 0) { |
491 |
break;
|
492 |
} |
493 |
} |
494 |
assert(bs1 == 32 || (1ul << bs1) > m); |
495 |
|
496 |
tcg_out_mov(s, ret, arg); |
497 |
tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
|
498 |
| INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
|
499 |
} else {
|
500 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R1, m); |
501 |
tcg_out_arith(s, ret, arg, TCG_REG_R1, INSN_OR); |
502 |
} |
503 |
} |
504 |
|
505 |
static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m) |
506 |
{ |
507 |
if (m == 0) { |
508 |
tcg_out_mov(s, ret, TCG_REG_R0); |
509 |
} else if (m == -1) { |
510 |
tcg_out_mov(s, ret, arg); |
511 |
} else if (and_mask_p(m)) { |
512 |
int ls0, ls1, ms0;
|
513 |
|
514 |
for (ls0 = 0; ls0 < 32; ls0++) { |
515 |
if ((m & (1u << ls0)) == 0) { |
516 |
break;
|
517 |
} |
518 |
} |
519 |
for (ls1 = ls0; ls1 < 32; ls1++) { |
520 |
if ((m & (1u << ls1)) != 0) { |
521 |
break;
|
522 |
} |
523 |
} |
524 |
for (ms0 = ls1; ms0 < 32; ms0++) { |
525 |
if ((m & (1u << ms0)) == 0) { |
526 |
break;
|
527 |
} |
528 |
} |
529 |
assert (ms0 == 32);
|
530 |
|
531 |
if (ls1 == 32) { |
532 |
tcg_out_extr(s, ret, arg, 0, ls0, 0); |
533 |
} else {
|
534 |
tcg_out_mov(s, ret, arg); |
535 |
tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
|
536 |
| INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
|
537 |
} |
538 |
} else {
|
539 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R1, m); |
540 |
tcg_out_arith(s, ret, arg, TCG_REG_R1, INSN_AND); |
541 |
} |
542 |
} |
543 |
|
544 |
static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) |
545 |
{ |
546 |
tcg_out_extr(s, ret, arg, 0, 8, 1); |
547 |
} |
548 |
|
549 |
static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) |
550 |
{ |
551 |
tcg_out_extr(s, ret, arg, 0, 16, 1); |
552 |
} |
553 |
|
554 |
static void tcg_out_shli(TCGContext *s, int ret, int arg, int count) |
555 |
{ |
556 |
count &= 31;
|
557 |
tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg) |
558 |
| INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count)); |
559 |
} |
560 |
|
561 |
static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg) |
562 |
{ |
563 |
tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
|
564 |
tcg_out_mtctl_sar(s, TCG_REG_R20); |
565 |
tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
|
566 |
} |
567 |
|
568 |
static void tcg_out_shri(TCGContext *s, int ret, int arg, int count) |
569 |
{ |
570 |
count &= 31;
|
571 |
tcg_out_extr(s, ret, arg, count, 32 - count, 0); |
572 |
} |
573 |
|
574 |
static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg) |
575 |
{ |
576 |
tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg); |
577 |
} |
578 |
|
579 |
static void tcg_out_sari(TCGContext *s, int ret, int arg, int count) |
580 |
{ |
581 |
count &= 31;
|
582 |
tcg_out_extr(s, ret, arg, count, 32 - count, 1); |
583 |
} |
584 |
|
585 |
static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg) |
586 |
{ |
587 |
tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
|
588 |
tcg_out_mtctl_sar(s, TCG_REG_R20); |
589 |
tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
|
590 |
} |
591 |
|
592 |
static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count) |
593 |
{ |
594 |
count &= 31;
|
595 |
tcg_out_shd(s, ret, arg, arg, 32 - count);
|
596 |
} |
597 |
|
598 |
static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg) |
599 |
{ |
600 |
tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
|
601 |
tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20); |
602 |
} |
603 |
|
604 |
static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count) |
605 |
{ |
606 |
count &= 31;
|
607 |
tcg_out_shd(s, ret, arg, arg, count); |
608 |
} |
609 |
|
610 |
static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg) |
611 |
{ |
612 |
tcg_out_vshd(s, ret, arg, arg, creg); |
613 |
} |
614 |
|
615 |
static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign) |
616 |
{ |
617 |
if (ret != arg) {
|
618 |
tcg_out_mov(s, ret, arg); /* arg = xxAB */
|
619 |
} |
620 |
tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */ |
621 |
tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */ |
622 |
} |
623 |
|
624 |
static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) |
625 |
{ |
626 |
/* arg = ABCD */
|
627 |
tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */ |
628 |
tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */ |
629 |
tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */ |
630 |
} |
631 |
|
632 |
static void tcg_out_call(TCGContext *s, void *func) |
633 |
{ |
634 |
tcg_target_long val, hi, lo, disp; |
635 |
|
636 |
val = (uint32_t)__canonicalize_funcptr_for_compare(func); |
637 |
disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2; |
638 |
|
639 |
if (check_fit_tl(disp, 17)) { |
640 |
tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp)); |
641 |
} else {
|
642 |
hi = val >> 11;
|
643 |
lo = val & 0x7ff;
|
644 |
|
645 |
tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi)); |
646 |
tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20) |
647 |
| reassemble_17(lo >> 2));
|
648 |
tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31); |
649 |
} |
650 |
} |
651 |
|
652 |
static void tcg_out_xmpyu(TCGContext *s, int retl, int reth, |
653 |
int arg1, int arg2) |
654 |
{ |
655 |
/* Store both words into the stack for copy to the FPU. */
|
656 |
tcg_out_ldst(s, arg1, TCG_REG_SP, STACK_TEMP_OFS, INSN_STW); |
657 |
tcg_out_ldst(s, arg2, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_STW);
|
658 |
|
659 |
/* Load both words into the FPU at the same time. We get away
|
660 |
with this because we can address the left and right half of the
|
661 |
FPU registers individually once loaded. */
|
662 |
/* fldds stack_temp(sp),fr22 */
|
663 |
tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_SP) |
664 |
| INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
|
665 |
|
666 |
/* xmpyu fr22r,fr22,fr22 */
|
667 |
tcg_out32(s, 0x3ad64796);
|
668 |
|
669 |
/* Store the 64-bit result back into the stack. */
|
670 |
/* fstds stack_temp(sp),fr22 */
|
671 |
tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_SP) |
672 |
| INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
|
673 |
|
674 |
/* Load the pieces of the result that the caller requested. */
|
675 |
if (reth) {
|
676 |
tcg_out_ldst(s, reth, TCG_REG_SP, STACK_TEMP_OFS, INSN_LDW); |
677 |
} |
678 |
if (retl) {
|
679 |
tcg_out_ldst(s, retl, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_LDW);
|
680 |
} |
681 |
} |
682 |
|
683 |
static void tcg_out_add2(TCGContext *s, int destl, int desth, |
684 |
int al, int ah, int bl, int bh, int blconst) |
685 |
{ |
686 |
int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
|
687 |
|
688 |
if (blconst) {
|
689 |
tcg_out_arithi(s, tmp, al, bl, INSN_ADDI); |
690 |
} else {
|
691 |
tcg_out_arith(s, tmp, al, bl, INSN_ADD); |
692 |
} |
693 |
tcg_out_arith(s, desth, ah, bh, INSN_ADDC); |
694 |
|
695 |
tcg_out_mov(s, destl, tmp); |
696 |
} |
697 |
|
698 |
static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah, |
699 |
int bl, int bh, int alconst, int blconst) |
700 |
{ |
701 |
int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
|
702 |
|
703 |
if (alconst) {
|
704 |
if (blconst) {
|
705 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl); |
706 |
bl = TCG_REG_R20; |
707 |
} |
708 |
tcg_out_arithi(s, tmp, bl, al, INSN_SUBI); |
709 |
} else if (blconst) { |
710 |
tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI); |
711 |
} else {
|
712 |
tcg_out_arith(s, tmp, al, bl, INSN_SUB); |
713 |
} |
714 |
tcg_out_arith(s, desth, ah, bh, INSN_SUBB); |
715 |
|
716 |
tcg_out_mov(s, destl, tmp); |
717 |
} |
718 |
|
719 |
static void tcg_out_branch(TCGContext *s, int label_index, int nul) |
720 |
{ |
721 |
TCGLabel *l = &s->labels[label_index]; |
722 |
uint32_t op = nul ? INSN_BL_N : INSN_BL; |
723 |
|
724 |
if (l->has_value) {
|
725 |
tcg_target_long val = l->u.value; |
726 |
|
727 |
val -= (tcg_target_long)s->code_ptr + 8;
|
728 |
val >>= 2;
|
729 |
assert(check_fit_tl(val, 17));
|
730 |
|
731 |
tcg_out32(s, op | reassemble_17(val)); |
732 |
} else {
|
733 |
tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
|
734 |
tcg_out32(s, op); |
735 |
} |
736 |
} |
737 |
|
738 |
static const uint8_t tcg_cond_to_cmp_cond[10] = |
739 |
{ |
740 |
[TCG_COND_EQ] = COND_EQ, |
741 |
[TCG_COND_NE] = COND_EQ | COND_FALSE, |
742 |
[TCG_COND_LT] = COND_LT, |
743 |
[TCG_COND_GE] = COND_LT | COND_FALSE, |
744 |
[TCG_COND_LE] = COND_LE, |
745 |
[TCG_COND_GT] = COND_LE | COND_FALSE, |
746 |
[TCG_COND_LTU] = COND_LTU, |
747 |
[TCG_COND_GEU] = COND_LTU | COND_FALSE, |
748 |
[TCG_COND_LEU] = COND_LEU, |
749 |
[TCG_COND_GTU] = COND_LEU | COND_FALSE, |
750 |
}; |
751 |
|
752 |
static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1, |
753 |
TCGArg c2, int c2const, int label_index) |
754 |
{ |
755 |
TCGLabel *l = &s->labels[label_index]; |
756 |
int op, pacond;
|
757 |
|
758 |
/* Note that COMIB operates as if the immediate is the first
|
759 |
operand. We model brcond with the immediate in the second
|
760 |
to better match what targets are likely to give us. For
|
761 |
consistency, model COMB with reversed operands as well. */
|
762 |
pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)]; |
763 |
|
764 |
if (c2const) {
|
765 |
op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT); |
766 |
op |= INSN_IM5(c2); |
767 |
} else {
|
768 |
op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT); |
769 |
op |= INSN_R1(c2); |
770 |
} |
771 |
op |= INSN_R2(c1); |
772 |
op |= INSN_COND(pacond & 7);
|
773 |
|
774 |
if (l->has_value) {
|
775 |
tcg_target_long val = l->u.value; |
776 |
|
777 |
val -= (tcg_target_long)s->code_ptr + 8;
|
778 |
val >>= 2;
|
779 |
assert(check_fit_tl(val, 12));
|
780 |
|
781 |
/* ??? Assume that all branches to defined labels are backward.
|
782 |
Which means that if the nul bit is set, the delay slot is
|
783 |
executed if the branch is taken, and not executed in fallthru. */
|
784 |
tcg_out32(s, op | reassemble_12(val)); |
785 |
tcg_out_nop(s); |
786 |
} else {
|
787 |
tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
|
788 |
/* ??? Assume that all branches to undefined labels are forward.
|
789 |
Which means that if the nul bit is set, the delay slot is
|
790 |
not executed if the branch is taken, which is what we want. */
|
791 |
tcg_out32(s, op | 2);
|
792 |
} |
793 |
} |
794 |
|
795 |
static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret, |
796 |
TCGArg c1, TCGArg c2, int c2const)
|
797 |
{ |
798 |
int op, pacond;
|
799 |
|
800 |
/* Note that COMICLR operates as if the immediate is the first
|
801 |
operand. We model setcond with the immediate in the second
|
802 |
to better match what targets are likely to give us. For
|
803 |
consistency, model COMCLR with reversed operands as well. */
|
804 |
pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)]; |
805 |
|
806 |
if (c2const) {
|
807 |
op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2); |
808 |
} else {
|
809 |
op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret); |
810 |
} |
811 |
op |= INSN_COND(pacond & 7);
|
812 |
op |= pacond & COND_FALSE ? 1 << 12 : 0; |
813 |
|
814 |
tcg_out32(s, op); |
815 |
} |
816 |
|
817 |
static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah, |
818 |
TCGArg bl, int blconst, TCGArg bh, int bhconst, |
819 |
int label_index)
|
820 |
{ |
821 |
switch (cond) {
|
822 |
case TCG_COND_EQ:
|
823 |
case TCG_COND_NE:
|
824 |
tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst); |
825 |
tcg_out_brcond(s, cond, ah, bh, bhconst, label_index); |
826 |
break;
|
827 |
|
828 |
default:
|
829 |
tcg_out_brcond(s, cond, ah, bh, bhconst, label_index); |
830 |
tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst); |
831 |
tcg_out_brcond(s, tcg_unsigned_cond(cond), |
832 |
al, bl, blconst, label_index); |
833 |
break;
|
834 |
} |
835 |
} |
836 |
|
837 |
static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret, |
838 |
TCGArg c1, TCGArg c2, int c2const)
|
839 |
{ |
840 |
tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const); |
841 |
tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
|
842 |
} |
843 |
|
844 |
static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret, |
845 |
TCGArg al, TCGArg ah, TCGArg bl, int blconst,
|
846 |
TCGArg bh, int bhconst)
|
847 |
{ |
848 |
int scratch = TCG_REG_R20;
|
849 |
|
850 |
if (ret != al && ret != ah
|
851 |
&& (blconst || ret != bl) |
852 |
&& (bhconst || ret != bh)) { |
853 |
scratch = ret; |
854 |
} |
855 |
|
856 |
switch (cond) {
|
857 |
case TCG_COND_EQ:
|
858 |
case TCG_COND_NE:
|
859 |
tcg_out_setcond(s, cond, scratch, al, bl, blconst); |
860 |
tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst); |
861 |
tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE); |
862 |
break;
|
863 |
|
864 |
default:
|
865 |
tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst); |
866 |
tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst); |
867 |
tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
|
868 |
tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst); |
869 |
tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
|
870 |
break;
|
871 |
} |
872 |
|
873 |
tcg_out_mov(s, ret, scratch); |
874 |
} |
875 |
|
876 |
#if defined(CONFIG_SOFTMMU)
|
877 |
#include "../../softmmu_defs.h" |
878 |
|
879 |
static void *qemu_ld_helpers[4] = { |
880 |
__ldb_mmu, |
881 |
__ldw_mmu, |
882 |
__ldl_mmu, |
883 |
__ldq_mmu, |
884 |
}; |
885 |
|
886 |
static void *qemu_st_helpers[4] = { |
887 |
__stb_mmu, |
888 |
__stw_mmu, |
889 |
__stl_mmu, |
890 |
__stq_mmu, |
891 |
}; |
892 |
|
893 |
/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
|
894 |
the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
|
895 |
TLB for the memory index. The return value is the offset from ENV
|
896 |
contained in R1 afterward (to be used when loading ADDEND); if the
|
897 |
return value is 0, R1 is not used. */
|
898 |
|
899 |
static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo, |
900 |
int addrhi, int s_bits, int lab_miss, int offset) |
901 |
{ |
902 |
int ret;
|
903 |
|
904 |
/* Extracting the index into the TLB. The "normal C operation" is
|
905 |
r1 = addr_reg >> TARGET_PAGE_BITS;
|
906 |
r1 &= CPU_TLB_SIZE - 1;
|
907 |
r1 <<= CPU_TLB_ENTRY_BITS;
|
908 |
What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
|
909 |
and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
|
910 |
operations with an EXTRU. Unfortunately, the current value of
|
911 |
CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
|
912 |
add that follows. */
|
913 |
tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
|
914 |
tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); |
915 |
tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS); |
916 |
tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL); |
917 |
|
918 |
/* Make sure that both the addr_{read,write} and addend can be
|
919 |
read with a 14-bit offset from the same base register. */
|
920 |
if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) { |
921 |
ret = 0;
|
922 |
} else {
|
923 |
ret = (offset + 0x400) & ~0x7ff; |
924 |
offset = ret - offset; |
925 |
tcg_out_addi2(s, TCG_REG_R1, r1, ret); |
926 |
r1 = TCG_REG_R1; |
927 |
} |
928 |
|
929 |
/* Load the entry from the computed slot. */
|
930 |
if (TARGET_LONG_BITS == 64) { |
931 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset); |
932 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
|
933 |
} else {
|
934 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset); |
935 |
} |
936 |
|
937 |
/* If not equal, jump to lab_miss. */
|
938 |
if (TARGET_LONG_BITS == 64) { |
939 |
tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23, |
940 |
r0, 0, addrhi, 0, lab_miss); |
941 |
} else {
|
942 |
tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
|
943 |
} |
944 |
|
945 |
return ret;
|
946 |
} |
947 |
#endif
|
948 |
|
949 |
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) |
950 |
{ |
951 |
int addr_reg, addr_reg2;
|
952 |
int data_reg, data_reg2;
|
953 |
int r0, r1, mem_index, s_bits, bswap;
|
954 |
tcg_target_long offset; |
955 |
#if defined(CONFIG_SOFTMMU)
|
956 |
int lab1, lab2, argreg;
|
957 |
#endif
|
958 |
|
959 |
data_reg = *args++; |
960 |
data_reg2 = (opc == 3 ? *args++ : TCG_REG_R0);
|
961 |
addr_reg = *args++; |
962 |
addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
|
963 |
mem_index = *args; |
964 |
s_bits = opc & 3;
|
965 |
|
966 |
r0 = TCG_REG_R26; |
967 |
r1 = TCG_REG_R25; |
968 |
|
969 |
#if defined(CONFIG_SOFTMMU)
|
970 |
lab1 = gen_new_label(); |
971 |
lab2 = gen_new_label(); |
972 |
|
973 |
offset = tcg_out_tlb_read(s, r0, r1, addr_reg, addr_reg2, s_bits, lab1, |
974 |
offsetof(CPUState, |
975 |
tlb_table[mem_index][0].addr_read));
|
976 |
|
977 |
/* TLB Hit. */
|
978 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : r1), |
979 |
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
|
980 |
|
981 |
tcg_out_arith(s, r0, addr_reg, TCG_REG_R20, INSN_ADDL); |
982 |
offset = TCG_REG_R0; |
983 |
#else
|
984 |
r0 = addr_reg; |
985 |
offset = GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0; |
986 |
#endif
|
987 |
|
988 |
#ifdef TARGET_WORDS_BIGENDIAN
|
989 |
bswap = 0;
|
990 |
#else
|
991 |
bswap = 1;
|
992 |
#endif
|
993 |
switch (opc) {
|
994 |
case 0: |
995 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDBX); |
996 |
break;
|
997 |
case 0 | 4: |
998 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDBX); |
999 |
tcg_out_ext8s(s, data_reg, data_reg); |
1000 |
break;
|
1001 |
case 1: |
1002 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDHX); |
1003 |
if (bswap) {
|
1004 |
tcg_out_bswap16(s, data_reg, data_reg, 0);
|
1005 |
} |
1006 |
break;
|
1007 |
case 1 | 4: |
1008 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDHX); |
1009 |
if (bswap) {
|
1010 |
tcg_out_bswap16(s, data_reg, data_reg, 1);
|
1011 |
} else {
|
1012 |
tcg_out_ext16s(s, data_reg, data_reg); |
1013 |
} |
1014 |
break;
|
1015 |
case 2: |
1016 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDWX); |
1017 |
if (bswap) {
|
1018 |
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20); |
1019 |
} |
1020 |
break;
|
1021 |
case 3: |
1022 |
if (bswap) {
|
1023 |
int t = data_reg2;
|
1024 |
data_reg2 = data_reg; |
1025 |
data_reg = t; |
1026 |
} |
1027 |
if (offset == TCG_REG_R0) {
|
1028 |
/* Make sure not to clobber the base register. */
|
1029 |
if (data_reg2 == r0) {
|
1030 |
tcg_out_ldst(s, data_reg, r0, 4, INSN_LDW);
|
1031 |
tcg_out_ldst(s, data_reg2, r0, 0, INSN_LDW);
|
1032 |
} else {
|
1033 |
tcg_out_ldst(s, data_reg2, r0, 0, INSN_LDW);
|
1034 |
tcg_out_ldst(s, data_reg, r0, 4, INSN_LDW);
|
1035 |
} |
1036 |
} else {
|
1037 |
tcg_out_addi2(s, TCG_REG_R20, r0, 4);
|
1038 |
tcg_out_ldst_index(s, data_reg2, r0, offset, INSN_LDWX); |
1039 |
tcg_out_ldst_index(s, data_reg, TCG_REG_R20, offset, INSN_LDWX); |
1040 |
} |
1041 |
if (bswap) {
|
1042 |
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20); |
1043 |
tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20); |
1044 |
} |
1045 |
break;
|
1046 |
default:
|
1047 |
tcg_abort(); |
1048 |
} |
1049 |
|
1050 |
#if defined(CONFIG_SOFTMMU)
|
1051 |
tcg_out_branch(s, lab2, 1);
|
1052 |
|
1053 |
/* TLB Miss. */
|
1054 |
/* label1: */
|
1055 |
tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr); |
1056 |
|
1057 |
argreg = TCG_REG_R26; |
1058 |
tcg_out_mov(s, argreg--, addr_reg); |
1059 |
if (TARGET_LONG_BITS == 64) { |
1060 |
tcg_out_mov(s, argreg--, addr_reg2); |
1061 |
} |
1062 |
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); |
1063 |
|
1064 |
tcg_out_call(s, qemu_ld_helpers[s_bits]); |
1065 |
|
1066 |
switch (opc) {
|
1067 |
case 0: |
1068 |
tcg_out_andi(s, data_reg, TCG_REG_RET0, 0xff);
|
1069 |
break;
|
1070 |
case 0 | 4: |
1071 |
tcg_out_ext8s(s, data_reg, TCG_REG_RET0); |
1072 |
break;
|
1073 |
case 1: |
1074 |
tcg_out_andi(s, data_reg, TCG_REG_RET0, 0xffff);
|
1075 |
break;
|
1076 |
case 1 | 4: |
1077 |
tcg_out_ext16s(s, data_reg, TCG_REG_RET0); |
1078 |
break;
|
1079 |
case 2: |
1080 |
case 2 | 4: |
1081 |
tcg_out_mov(s, data_reg, TCG_REG_RET0); |
1082 |
break;
|
1083 |
case 3: |
1084 |
tcg_out_mov(s, data_reg, TCG_REG_RET0); |
1085 |
tcg_out_mov(s, data_reg2, TCG_REG_RET1); |
1086 |
break;
|
1087 |
default:
|
1088 |
tcg_abort(); |
1089 |
} |
1090 |
|
1091 |
/* label2: */
|
1092 |
tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr); |
1093 |
#endif
|
1094 |
} |
1095 |
|
1096 |
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) |
1097 |
{ |
1098 |
int addr_reg, addr_reg2;
|
1099 |
int data_reg, data_reg2;
|
1100 |
int r0, r1, mem_index, s_bits, bswap;
|
1101 |
#if defined(CONFIG_SOFTMMU)
|
1102 |
tcg_target_long offset; |
1103 |
int lab1, lab2, argreg;
|
1104 |
#endif
|
1105 |
|
1106 |
data_reg = *args++; |
1107 |
data_reg2 = (opc == 3 ? *args++ : 0); |
1108 |
addr_reg = *args++; |
1109 |
addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0); |
1110 |
mem_index = *args; |
1111 |
s_bits = opc; |
1112 |
|
1113 |
r0 = TCG_REG_R26; |
1114 |
r1 = TCG_REG_R25; |
1115 |
|
1116 |
#if defined(CONFIG_SOFTMMU)
|
1117 |
lab1 = gen_new_label(); |
1118 |
lab2 = gen_new_label(); |
1119 |
|
1120 |
offset = tcg_out_tlb_read(s, r0, r1, addr_reg, addr_reg2, s_bits, lab1, |
1121 |
offsetof(CPUState, |
1122 |
tlb_table[mem_index][0].addr_write));
|
1123 |
|
1124 |
/* TLB Hit. */
|
1125 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : r1), |
1126 |
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
|
1127 |
|
1128 |
tcg_out_arith(s, r0, addr_reg, TCG_REG_R20, INSN_ADDL); |
1129 |
#else
|
1130 |
/* There are no indexed stores, so if GUEST_BASE is set
|
1131 |
we must do the add explicitly. Careful to avoid R20,
|
1132 |
which is used for the bswaps to follow. */
|
1133 |
if (GUEST_BASE == 0) { |
1134 |
r0 = addr_reg; |
1135 |
} else {
|
1136 |
tcg_out_arith(s, TCG_REG_R31, addr_reg, TCG_GUEST_BASE_REG, INSN_ADDL); |
1137 |
r0 = TCG_REG_R31; |
1138 |
} |
1139 |
#endif
|
1140 |
|
1141 |
#ifdef TARGET_WORDS_BIGENDIAN
|
1142 |
bswap = 0;
|
1143 |
#else
|
1144 |
bswap = 1;
|
1145 |
#endif
|
1146 |
switch (opc) {
|
1147 |
case 0: |
1148 |
tcg_out_ldst(s, data_reg, r0, 0, INSN_STB);
|
1149 |
break;
|
1150 |
case 1: |
1151 |
if (bswap) {
|
1152 |
tcg_out_bswap16(s, TCG_REG_R20, data_reg, 0);
|
1153 |
data_reg = TCG_REG_R20; |
1154 |
} |
1155 |
tcg_out_ldst(s, data_reg, r0, 0, INSN_STH);
|
1156 |
break;
|
1157 |
case 2: |
1158 |
if (bswap) {
|
1159 |
tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20); |
1160 |
data_reg = TCG_REG_R20; |
1161 |
} |
1162 |
tcg_out_ldst(s, data_reg, r0, 0, INSN_STW);
|
1163 |
break;
|
1164 |
case 3: |
1165 |
if (bswap) {
|
1166 |
tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20); |
1167 |
tcg_out_bswap32(s, TCG_REG_R23, data_reg2, TCG_REG_R23); |
1168 |
data_reg2 = TCG_REG_R20; |
1169 |
data_reg = TCG_REG_R23; |
1170 |
} |
1171 |
tcg_out_ldst(s, data_reg2, r0, 0, INSN_STW);
|
1172 |
tcg_out_ldst(s, data_reg, r0, 4, INSN_STW);
|
1173 |
break;
|
1174 |
default:
|
1175 |
tcg_abort(); |
1176 |
} |
1177 |
|
1178 |
#if defined(CONFIG_SOFTMMU)
|
1179 |
tcg_out_branch(s, lab2, 1);
|
1180 |
|
1181 |
/* TLB Miss. */
|
1182 |
/* label1: */
|
1183 |
tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr); |
1184 |
|
1185 |
argreg = TCG_REG_R26; |
1186 |
tcg_out_mov(s, argreg--, addr_reg); |
1187 |
if (TARGET_LONG_BITS == 64) { |
1188 |
tcg_out_mov(s, argreg--, addr_reg2); |
1189 |
} |
1190 |
|
1191 |
switch(opc) {
|
1192 |
case 0: |
1193 |
tcg_out_andi(s, argreg--, data_reg, 0xff);
|
1194 |
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); |
1195 |
break;
|
1196 |
case 1: |
1197 |
tcg_out_andi(s, argreg--, data_reg, 0xffff);
|
1198 |
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); |
1199 |
break;
|
1200 |
case 2: |
1201 |
tcg_out_mov(s, argreg--, data_reg); |
1202 |
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); |
1203 |
break;
|
1204 |
case 3: |
1205 |
/* Because of the alignment required by the 64-bit data argument,
|
1206 |
we will always use R23/R24. Also, we will always run out of
|
1207 |
argument registers for storing mem_index, so that will have
|
1208 |
to go on the stack. */
|
1209 |
if (mem_index == 0) { |
1210 |
argreg = TCG_REG_R0; |
1211 |
} else {
|
1212 |
argreg = TCG_REG_R20; |
1213 |
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); |
1214 |
} |
1215 |
tcg_out_mov(s, TCG_REG_R23, data_reg2); |
1216 |
tcg_out_mov(s, TCG_REG_R24, data_reg); |
1217 |
tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_SP, |
1218 |
TCG_TARGET_CALL_STACK_OFFSET - 4);
|
1219 |
break;
|
1220 |
default:
|
1221 |
tcg_abort(); |
1222 |
} |
1223 |
|
1224 |
tcg_out_call(s, qemu_st_helpers[s_bits]); |
1225 |
|
1226 |
/* label2: */
|
1227 |
tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr); |
1228 |
#endif
|
1229 |
} |
1230 |
|
1231 |
static void tcg_out_exit_tb(TCGContext *s, TCGArg arg) |
1232 |
{ |
1233 |
if (!check_fit_tl(arg, 14)) { |
1234 |
uint32_t hi, lo; |
1235 |
hi = arg & ~0x7ff;
|
1236 |
lo = arg & 0x7ff;
|
1237 |
if (lo) {
|
1238 |
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi); |
1239 |
tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18)); |
1240 |
tcg_out_addi(s, TCG_REG_RET0, lo); |
1241 |
return;
|
1242 |
} |
1243 |
arg = hi; |
1244 |
} |
1245 |
tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18)); |
1246 |
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg); |
1247 |
} |
1248 |
|
1249 |
static void tcg_out_goto_tb(TCGContext *s, TCGArg arg) |
1250 |
{ |
1251 |
if (s->tb_jmp_offset) {
|
1252 |
/* direct jump method */
|
1253 |
fprintf(stderr, "goto_tb direct\n");
|
1254 |
tcg_abort(); |
1255 |
} else {
|
1256 |
/* indirect jump method */
|
1257 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0, |
1258 |
(tcg_target_long)(s->tb_next + arg)); |
1259 |
tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20)); |
1260 |
} |
1261 |
s->tb_next_offset[arg] = s->code_ptr - s->code_buf; |
1262 |
} |
1263 |
|
1264 |
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, |
1265 |
const int *const_args) |
1266 |
{ |
1267 |
switch (opc) {
|
1268 |
case INDEX_op_exit_tb:
|
1269 |
tcg_out_exit_tb(s, args[0]);
|
1270 |
break;
|
1271 |
case INDEX_op_goto_tb:
|
1272 |
tcg_out_goto_tb(s, args[0]);
|
1273 |
break;
|
1274 |
|
1275 |
case INDEX_op_call:
|
1276 |
if (const_args[0]) { |
1277 |
tcg_out_call(s, (void *)args[0]); |
1278 |
} else {
|
1279 |
/* ??? FIXME: the value in the register in args[0] is almost
|
1280 |
certainly a procedure descriptor, not a code address. We
|
1281 |
probably need to use the millicode $$dyncall routine. */
|
1282 |
tcg_abort(); |
1283 |
} |
1284 |
break;
|
1285 |
|
1286 |
case INDEX_op_jmp:
|
1287 |
fprintf(stderr, "unimplemented jmp\n");
|
1288 |
tcg_abort(); |
1289 |
break;
|
1290 |
|
1291 |
case INDEX_op_br:
|
1292 |
tcg_out_branch(s, args[0], 1); |
1293 |
break;
|
1294 |
|
1295 |
case INDEX_op_movi_i32:
|
1296 |
tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); |
1297 |
break;
|
1298 |
|
1299 |
case INDEX_op_ld8u_i32:
|
1300 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB); |
1301 |
break;
|
1302 |
case INDEX_op_ld8s_i32:
|
1303 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB); |
1304 |
tcg_out_ext8s(s, args[0], args[0]); |
1305 |
break;
|
1306 |
case INDEX_op_ld16u_i32:
|
1307 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH); |
1308 |
break;
|
1309 |
case INDEX_op_ld16s_i32:
|
1310 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH); |
1311 |
tcg_out_ext16s(s, args[0], args[0]); |
1312 |
break;
|
1313 |
case INDEX_op_ld_i32:
|
1314 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW); |
1315 |
break;
|
1316 |
|
1317 |
case INDEX_op_st8_i32:
|
1318 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB); |
1319 |
break;
|
1320 |
case INDEX_op_st16_i32:
|
1321 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH); |
1322 |
break;
|
1323 |
case INDEX_op_st_i32:
|
1324 |
tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW); |
1325 |
break;
|
1326 |
|
1327 |
case INDEX_op_add_i32:
|
1328 |
if (const_args[2]) { |
1329 |
tcg_out_addi2(s, args[0], args[1], args[2]); |
1330 |
} else {
|
1331 |
tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL); |
1332 |
} |
1333 |
break;
|
1334 |
|
1335 |
case INDEX_op_sub_i32:
|
1336 |
if (const_args[1]) { |
1337 |
if (const_args[2]) { |
1338 |
tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]); |
1339 |
} else {
|
1340 |
/* Recall that SUBI is a reversed subtract. */
|
1341 |
tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI); |
1342 |
} |
1343 |
} else if (const_args[2]) { |
1344 |
tcg_out_addi2(s, args[0], args[1], -args[2]); |
1345 |
} else {
|
1346 |
tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB); |
1347 |
} |
1348 |
break;
|
1349 |
|
1350 |
case INDEX_op_and_i32:
|
1351 |
if (const_args[2]) { |
1352 |
tcg_out_andi(s, args[0], args[1], args[2]); |
1353 |
} else {
|
1354 |
tcg_out_arith(s, args[0], args[1], args[2], INSN_AND); |
1355 |
} |
1356 |
break;
|
1357 |
|
1358 |
case INDEX_op_or_i32:
|
1359 |
if (const_args[2]) { |
1360 |
tcg_out_ori(s, args[0], args[1], args[2]); |
1361 |
} else {
|
1362 |
tcg_out_arith(s, args[0], args[1], args[2], INSN_OR); |
1363 |
} |
1364 |
break;
|
1365 |
|
1366 |
case INDEX_op_xor_i32:
|
1367 |
tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR); |
1368 |
break;
|
1369 |
|
1370 |
case INDEX_op_andc_i32:
|
1371 |
if (const_args[2]) { |
1372 |
tcg_out_andi(s, args[0], args[1], ~args[2]); |
1373 |
} else {
|
1374 |
tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM); |
1375 |
} |
1376 |
break;
|
1377 |
|
1378 |
case INDEX_op_shl_i32:
|
1379 |
if (const_args[2]) { |
1380 |
tcg_out_shli(s, args[0], args[1], args[2]); |
1381 |
} else {
|
1382 |
tcg_out_shl(s, args[0], args[1], args[2]); |
1383 |
} |
1384 |
break;
|
1385 |
|
1386 |
case INDEX_op_shr_i32:
|
1387 |
if (const_args[2]) { |
1388 |
tcg_out_shri(s, args[0], args[1], args[2]); |
1389 |
} else {
|
1390 |
tcg_out_shr(s, args[0], args[1], args[2]); |
1391 |
} |
1392 |
break;
|
1393 |
|
1394 |
case INDEX_op_sar_i32:
|
1395 |
if (const_args[2]) { |
1396 |
tcg_out_sari(s, args[0], args[1], args[2]); |
1397 |
} else {
|
1398 |
tcg_out_sar(s, args[0], args[1], args[2]); |
1399 |
} |
1400 |
break;
|
1401 |
|
1402 |
case INDEX_op_rotl_i32:
|
1403 |
if (const_args[2]) { |
1404 |
tcg_out_rotli(s, args[0], args[1], args[2]); |
1405 |
} else {
|
1406 |
tcg_out_rotl(s, args[0], args[1], args[2]); |
1407 |
} |
1408 |
break;
|
1409 |
|
1410 |
case INDEX_op_rotr_i32:
|
1411 |
if (const_args[2]) { |
1412 |
tcg_out_rotri(s, args[0], args[1], args[2]); |
1413 |
} else {
|
1414 |
tcg_out_rotr(s, args[0], args[1], args[2]); |
1415 |
} |
1416 |
break;
|
1417 |
|
1418 |
case INDEX_op_mul_i32:
|
1419 |
tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]); |
1420 |
break;
|
1421 |
case INDEX_op_mulu2_i32:
|
1422 |
tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]); |
1423 |
break;
|
1424 |
|
1425 |
case INDEX_op_bswap16_i32:
|
1426 |
tcg_out_bswap16(s, args[0], args[1], 0); |
1427 |
break;
|
1428 |
case INDEX_op_bswap32_i32:
|
1429 |
tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20); |
1430 |
break;
|
1431 |
|
1432 |
case INDEX_op_not_i32:
|
1433 |
tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI); |
1434 |
break;
|
1435 |
case INDEX_op_ext8s_i32:
|
1436 |
tcg_out_ext8s(s, args[0], args[1]); |
1437 |
break;
|
1438 |
case INDEX_op_ext16s_i32:
|
1439 |
tcg_out_ext16s(s, args[0], args[1]); |
1440 |
break;
|
1441 |
|
1442 |
/* These three correspond exactly to the fallback implementation.
|
1443 |
But by including them we reduce the number of TCG ops that
|
1444 |
need to be generated, and these opcodes are fairly common. */
|
1445 |
case INDEX_op_neg_i32:
|
1446 |
tcg_out_arith(s, args[0], TCG_REG_R0, args[1], INSN_SUB); |
1447 |
break;
|
1448 |
case INDEX_op_ext8u_i32:
|
1449 |
tcg_out_andi(s, args[0], args[1], 0xff); |
1450 |
break;
|
1451 |
case INDEX_op_ext16u_i32:
|
1452 |
tcg_out_andi(s, args[0], args[1], 0xffff); |
1453 |
break;
|
1454 |
|
1455 |
case INDEX_op_brcond_i32:
|
1456 |
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]); |
1457 |
break;
|
1458 |
case INDEX_op_brcond2_i32:
|
1459 |
tcg_out_brcond2(s, args[4], args[0], args[1], |
1460 |
args[2], const_args[2], |
1461 |
args[3], const_args[3], args[5]); |
1462 |
break;
|
1463 |
|
1464 |
case INDEX_op_setcond_i32:
|
1465 |
tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]); |
1466 |
break;
|
1467 |
case INDEX_op_setcond2_i32:
|
1468 |
tcg_out_setcond2(s, args[5], args[0], args[1], args[2], |
1469 |
args[3], const_args[3], args[4], const_args[4]); |
1470 |
break;
|
1471 |
|
1472 |
case INDEX_op_add2_i32:
|
1473 |
tcg_out_add2(s, args[0], args[1], args[2], args[3], |
1474 |
args[4], args[5], const_args[4]); |
1475 |
break;
|
1476 |
|
1477 |
case INDEX_op_sub2_i32:
|
1478 |
tcg_out_sub2(s, args[0], args[1], args[2], args[3], |
1479 |
args[4], args[5], const_args[2], const_args[4]); |
1480 |
break;
|
1481 |
|
1482 |
case INDEX_op_qemu_ld8u:
|
1483 |
tcg_out_qemu_ld(s, args, 0);
|
1484 |
break;
|
1485 |
case INDEX_op_qemu_ld8s:
|
1486 |
tcg_out_qemu_ld(s, args, 0 | 4); |
1487 |
break;
|
1488 |
case INDEX_op_qemu_ld16u:
|
1489 |
tcg_out_qemu_ld(s, args, 1);
|
1490 |
break;
|
1491 |
case INDEX_op_qemu_ld16s:
|
1492 |
tcg_out_qemu_ld(s, args, 1 | 4); |
1493 |
break;
|
1494 |
case INDEX_op_qemu_ld32:
|
1495 |
tcg_out_qemu_ld(s, args, 2);
|
1496 |
break;
|
1497 |
case INDEX_op_qemu_ld64:
|
1498 |
tcg_out_qemu_ld(s, args, 3);
|
1499 |
break;
|
1500 |
|
1501 |
case INDEX_op_qemu_st8:
|
1502 |
tcg_out_qemu_st(s, args, 0);
|
1503 |
break;
|
1504 |
case INDEX_op_qemu_st16:
|
1505 |
tcg_out_qemu_st(s, args, 1);
|
1506 |
break;
|
1507 |
case INDEX_op_qemu_st32:
|
1508 |
tcg_out_qemu_st(s, args, 2);
|
1509 |
break;
|
1510 |
case INDEX_op_qemu_st64:
|
1511 |
tcg_out_qemu_st(s, args, 3);
|
1512 |
break;
|
1513 |
|
1514 |
default:
|
1515 |
fprintf(stderr, "unknown opcode 0x%x\n", opc);
|
1516 |
tcg_abort(); |
1517 |
} |
1518 |
} |
1519 |
|
1520 |
static const TCGTargetOpDef hppa_op_defs[] = { |
1521 |
{ INDEX_op_exit_tb, { } }, |
1522 |
{ INDEX_op_goto_tb, { } }, |
1523 |
|
1524 |
{ INDEX_op_call, { "ri" } },
|
1525 |
{ INDEX_op_jmp, { "r" } },
|
1526 |
{ INDEX_op_br, { } }, |
1527 |
|
1528 |
{ INDEX_op_mov_i32, { "r", "r" } }, |
1529 |
{ INDEX_op_movi_i32, { "r" } },
|
1530 |
|
1531 |
{ INDEX_op_ld8u_i32, { "r", "r" } }, |
1532 |
{ INDEX_op_ld8s_i32, { "r", "r" } }, |
1533 |
{ INDEX_op_ld16u_i32, { "r", "r" } }, |
1534 |
{ INDEX_op_ld16s_i32, { "r", "r" } }, |
1535 |
{ INDEX_op_ld_i32, { "r", "r" } }, |
1536 |
{ INDEX_op_st8_i32, { "rZ", "r" } }, |
1537 |
{ INDEX_op_st16_i32, { "rZ", "r" } }, |
1538 |
{ INDEX_op_st_i32, { "rZ", "r" } }, |
1539 |
|
1540 |
{ INDEX_op_add_i32, { "r", "rZ", "ri" } }, |
1541 |
{ INDEX_op_sub_i32, { "r", "rI", "ri" } }, |
1542 |
{ INDEX_op_and_i32, { "r", "rZ", "ri" } }, |
1543 |
{ INDEX_op_or_i32, { "r", "rZ", "ri" } }, |
1544 |
{ INDEX_op_xor_i32, { "r", "rZ", "rZ" } }, |
1545 |
{ INDEX_op_andc_i32, { "r", "rZ", "ri" } }, |
1546 |
|
1547 |
{ INDEX_op_mul_i32, { "r", "r", "r" } }, |
1548 |
{ INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, |
1549 |
|
1550 |
{ INDEX_op_shl_i32, { "r", "r", "ri" } }, |
1551 |
{ INDEX_op_shr_i32, { "r", "r", "ri" } }, |
1552 |
{ INDEX_op_sar_i32, { "r", "r", "ri" } }, |
1553 |
{ INDEX_op_rotl_i32, { "r", "r", "ri" } }, |
1554 |
{ INDEX_op_rotr_i32, { "r", "r", "ri" } }, |
1555 |
|
1556 |
{ INDEX_op_bswap16_i32, { "r", "r" } }, |
1557 |
{ INDEX_op_bswap32_i32, { "r", "r" } }, |
1558 |
{ INDEX_op_neg_i32, { "r", "r" } }, |
1559 |
{ INDEX_op_not_i32, { "r", "r" } }, |
1560 |
|
1561 |
{ INDEX_op_ext8s_i32, { "r", "r" } }, |
1562 |
{ INDEX_op_ext8u_i32, { "r", "r" } }, |
1563 |
{ INDEX_op_ext16s_i32, { "r", "r" } }, |
1564 |
{ INDEX_op_ext16u_i32, { "r", "r" } }, |
1565 |
|
1566 |
{ INDEX_op_brcond_i32, { "rZ", "rJ" } }, |
1567 |
{ INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } }, |
1568 |
|
1569 |
{ INDEX_op_setcond_i32, { "r", "rZ", "rI" } }, |
1570 |
{ INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } }, |
1571 |
|
1572 |
{ INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } }, |
1573 |
{ INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } }, |
1574 |
|
1575 |
#if TARGET_LONG_BITS == 32 |
1576 |
{ INDEX_op_qemu_ld8u, { "r", "L" } }, |
1577 |
{ INDEX_op_qemu_ld8s, { "r", "L" } }, |
1578 |
{ INDEX_op_qemu_ld16u, { "r", "L" } }, |
1579 |
{ INDEX_op_qemu_ld16s, { "r", "L" } }, |
1580 |
{ INDEX_op_qemu_ld32, { "r", "L" } }, |
1581 |
{ INDEX_op_qemu_ld64, { "r", "r", "L" } }, |
1582 |
|
1583 |
{ INDEX_op_qemu_st8, { "LZ", "L" } }, |
1584 |
{ INDEX_op_qemu_st16, { "LZ", "L" } }, |
1585 |
{ INDEX_op_qemu_st32, { "LZ", "L" } }, |
1586 |
{ INDEX_op_qemu_st64, { "LZ", "LZ", "L" } }, |
1587 |
#else
|
1588 |
{ INDEX_op_qemu_ld8u, { "r", "L", "L" } }, |
1589 |
{ INDEX_op_qemu_ld8s, { "r", "L", "L" } }, |
1590 |
{ INDEX_op_qemu_ld16u, { "r", "L", "L" } }, |
1591 |
{ INDEX_op_qemu_ld16s, { "r", "L", "L" } }, |
1592 |
{ INDEX_op_qemu_ld32, { "r", "L", "L" } }, |
1593 |
{ INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, |
1594 |
|
1595 |
{ INDEX_op_qemu_st8, { "LZ", "L", "L" } }, |
1596 |
{ INDEX_op_qemu_st16, { "LZ", "L", "L" } }, |
1597 |
{ INDEX_op_qemu_st32, { "LZ", "L", "L" } }, |
1598 |
{ INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } }, |
1599 |
#endif
|
1600 |
{ -1 },
|
1601 |
}; |
1602 |
|
1603 |
static int tcg_target_callee_save_regs[] = { |
1604 |
/* R2, the return address register, is saved specially
|
1605 |
in the caller's frame. */
|
1606 |
/* R3, the frame pointer, is not currently modified. */
|
1607 |
TCG_REG_R4, |
1608 |
TCG_REG_R5, |
1609 |
TCG_REG_R6, |
1610 |
TCG_REG_R7, |
1611 |
TCG_REG_R8, |
1612 |
TCG_REG_R9, |
1613 |
TCG_REG_R10, |
1614 |
TCG_REG_R11, |
1615 |
TCG_REG_R12, |
1616 |
TCG_REG_R13, |
1617 |
TCG_REG_R14, |
1618 |
TCG_REG_R15, |
1619 |
TCG_REG_R16, |
1620 |
/* R17 is the global env, so no need to save. */
|
1621 |
TCG_REG_R18 |
1622 |
}; |
1623 |
|
1624 |
void tcg_target_qemu_prologue(TCGContext *s)
|
1625 |
{ |
1626 |
int frame_size, i;
|
1627 |
|
1628 |
/* Allocate space for the fixed frame marker. */
|
1629 |
frame_size = -TCG_TARGET_CALL_STACK_OFFSET; |
1630 |
frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE; |
1631 |
|
1632 |
/* Allocate space for the saved registers. */
|
1633 |
frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
|
1634 |
|
1635 |
/* Align the allocated space. */
|
1636 |
frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
|
1637 |
& -TCG_TARGET_STACK_ALIGN); |
1638 |
|
1639 |
/* The return address is stored in the caller's frame. */
|
1640 |
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -20);
|
1641 |
|
1642 |
/* Allocate stack frame, saving the first register at the same time. */
|
1643 |
tcg_out_ldst(s, tcg_target_callee_save_regs[0],
|
1644 |
TCG_REG_SP, frame_size, INSN_STWM); |
1645 |
|
1646 |
/* Save all callee saved registers. */
|
1647 |
for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { |
1648 |
tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i], |
1649 |
TCG_REG_SP, -frame_size + i * 4);
|
1650 |
} |
1651 |
|
1652 |
if (GUEST_BASE != 0) { |
1653 |
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); |
1654 |
} |
1655 |
|
1656 |
/* Jump to TB, and adjust R18 to be the return address. */
|
1657 |
tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R26)); |
1658 |
tcg_out_mov(s, TCG_REG_R18, TCG_REG_R31); |
1659 |
|
1660 |
/* Restore callee saved registers. */
|
1661 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -frame_size - 20);
|
1662 |
for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { |
1663 |
tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i], |
1664 |
TCG_REG_SP, -frame_size + i * 4);
|
1665 |
} |
1666 |
|
1667 |
/* Deallocate stack frame and return. */
|
1668 |
tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP)); |
1669 |
tcg_out_ldst(s, tcg_target_callee_save_regs[0],
|
1670 |
TCG_REG_SP, -frame_size, INSN_LDWM); |
1671 |
} |
1672 |
|
1673 |
void tcg_target_init(TCGContext *s)
|
1674 |
{ |
1675 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); |
1676 |
|
1677 |
tcg_regset_clear(tcg_target_call_clobber_regs); |
1678 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20); |
1679 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21); |
1680 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22); |
1681 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23); |
1682 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24); |
1683 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25); |
1684 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26); |
1685 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0); |
1686 |
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1); |
1687 |
|
1688 |
tcg_regset_clear(s->reserved_regs); |
1689 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
|
1690 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
|
1691 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
|
1692 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
|
1693 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
|
1694 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
|
1695 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
|
1696 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
|
1697 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
|
1698 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
|
1699 |
if (GUEST_BASE != 0) { |
1700 |
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
1701 |
} |
1702 |
|
1703 |
tcg_add_target_add_op_defs(hppa_op_defs); |
1704 |
} |