root / tcg / sparc / tcg-target.c @ 53cd9273
History | View | Annotate | Download (18.4 kB)
1 |
/*
|
---|---|
2 |
* Tiny Code Generator for QEMU
|
3 |
*
|
4 |
* Copyright (c) 2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
26 |
"%g0",
|
27 |
"%g1",
|
28 |
"%g2",
|
29 |
"%g3",
|
30 |
"%g4",
|
31 |
"%g5",
|
32 |
"%g6",
|
33 |
"%g7",
|
34 |
"%o0",
|
35 |
"%o1",
|
36 |
"%o2",
|
37 |
"%o3",
|
38 |
"%o4",
|
39 |
"%o5",
|
40 |
"%o6",
|
41 |
"%o7",
|
42 |
"%l0",
|
43 |
"%l1",
|
44 |
"%l2",
|
45 |
"%l3",
|
46 |
"%l4",
|
47 |
"%l5",
|
48 |
"%l6",
|
49 |
"%l7",
|
50 |
"%i0",
|
51 |
"%i1",
|
52 |
"%i2",
|
53 |
"%i3",
|
54 |
"%i4",
|
55 |
"%i5",
|
56 |
"%i6",
|
57 |
"%i7",
|
58 |
}; |
59 |
|
60 |
static const int tcg_target_reg_alloc_order[TCG_TARGET_NB_REGS] = { |
61 |
TCG_REG_L0, |
62 |
TCG_REG_L1, |
63 |
TCG_REG_L2, |
64 |
TCG_REG_L3, |
65 |
TCG_REG_L4, |
66 |
TCG_REG_L5, |
67 |
TCG_REG_L6, |
68 |
TCG_REG_L7, |
69 |
TCG_REG_I0, |
70 |
TCG_REG_I1, |
71 |
TCG_REG_I2, |
72 |
TCG_REG_I3, |
73 |
TCG_REG_I4, |
74 |
TCG_REG_I5, |
75 |
}; |
76 |
|
77 |
static const int tcg_target_call_iarg_regs[6] = { |
78 |
TCG_REG_O0, |
79 |
TCG_REG_O1, |
80 |
TCG_REG_O2, |
81 |
TCG_REG_O3, |
82 |
TCG_REG_O4, |
83 |
TCG_REG_O5, |
84 |
}; |
85 |
|
86 |
static const int tcg_target_call_oarg_regs[2] = { |
87 |
TCG_REG_O0, |
88 |
TCG_REG_O1, |
89 |
}; |
90 |
|
91 |
static void patch_reloc(uint8_t *code_ptr, int type, |
92 |
tcg_target_long value) |
93 |
{ |
94 |
switch (type) {
|
95 |
case R_SPARC_32:
|
96 |
if (value != (uint32_t)value)
|
97 |
tcg_abort(); |
98 |
*(uint32_t *)code_ptr = value; |
99 |
break;
|
100 |
default:
|
101 |
tcg_abort(); |
102 |
} |
103 |
} |
104 |
|
105 |
/* maximum number of register used for input function arguments */
|
106 |
static inline int tcg_target_get_call_iarg_regs_count(int flags) |
107 |
{ |
108 |
return 6; |
109 |
} |
110 |
|
111 |
/* parse target specific constraints */
|
112 |
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
113 |
{ |
114 |
const char *ct_str; |
115 |
|
116 |
ct_str = *pct_str; |
117 |
switch (ct_str[0]) { |
118 |
case 'r': |
119 |
case 'L': /* qemu_ld/st constraint */ |
120 |
ct->ct |= TCG_CT_REG; |
121 |
tcg_regset_set32(ct->u.regs, 0, 0xffffffff); |
122 |
break;
|
123 |
case 'I': |
124 |
ct->ct |= TCG_CT_CONST_S11; |
125 |
break;
|
126 |
case 'J': |
127 |
ct->ct |= TCG_CT_CONST_S13; |
128 |
break;
|
129 |
default:
|
130 |
return -1; |
131 |
} |
132 |
ct_str++; |
133 |
*pct_str = ct_str; |
134 |
return 0; |
135 |
} |
136 |
|
137 |
#define ABS(x) ((x) < 0? -(x) : (x)) |
138 |
/* test if a constant matches the constraint */
|
139 |
static inline int tcg_target_const_match(tcg_target_long val, |
140 |
const TCGArgConstraint *arg_ct)
|
141 |
{ |
142 |
int ct;
|
143 |
|
144 |
ct = arg_ct->ct; |
145 |
if (ct & TCG_CT_CONST)
|
146 |
return 1; |
147 |
else if ((ct & TCG_CT_CONST_S11) && ABS(val) == (ABS(val) & 0x3ff)) |
148 |
return 1; |
149 |
else if ((ct & TCG_CT_CONST_S13) && ABS(val) == (ABS(val) & 0xfff)) |
150 |
return 1; |
151 |
else
|
152 |
return 0; |
153 |
} |
154 |
|
155 |
#define INSN_OP(x) ((x) << 30) |
156 |
#define INSN_OP2(x) ((x) << 22) |
157 |
#define INSN_OP3(x) ((x) << 19) |
158 |
#define INSN_OPF(x) ((x) << 5) |
159 |
#define INSN_RD(x) ((x) << 25) |
160 |
#define INSN_RS1(x) ((x) << 14) |
161 |
#define INSN_RS2(x) (x)
|
162 |
|
163 |
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) |
164 |
|
165 |
#define INSN_COND(x, a) (((x) << 25) | ((a) << 29) |
166 |
|
167 |
#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) |
168 |
#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) |
169 |
#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) |
170 |
#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) |
171 |
#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x08)) |
172 |
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10)) |
173 |
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c)) |
174 |
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) |
175 |
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) |
176 |
#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) |
177 |
#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) |
178 |
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) |
179 |
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) |
180 |
|
181 |
#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) |
182 |
#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) |
183 |
#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) |
184 |
|
185 |
#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) |
186 |
#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) |
187 |
#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) |
188 |
|
189 |
#define WRY (INSN_OP(2) | INSN_OP3(0x30)) |
190 |
#define JMPL (INSN_OP(2) | INSN_OP3(0x38)) |
191 |
#define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) |
192 |
#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) |
193 |
#define SETHI (INSN_OP(0) | INSN_OP2(0x4)) |
194 |
#define CALL INSN_OP(1) |
195 |
#define LDUB (INSN_OP(3) | INSN_OP3(0x01)) |
196 |
#define LDSB (INSN_OP(3) | INSN_OP3(0x09)) |
197 |
#define LDUH (INSN_OP(3) | INSN_OP3(0x02)) |
198 |
#define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) |
199 |
#define LDUW (INSN_OP(3) | INSN_OP3(0x00)) |
200 |
#define LDSW (INSN_OP(3) | INSN_OP3(0x08)) |
201 |
#define LDX (INSN_OP(3) | INSN_OP3(0x0b)) |
202 |
#define STB (INSN_OP(3) | INSN_OP3(0x05)) |
203 |
#define STH (INSN_OP(3) | INSN_OP3(0x06)) |
204 |
#define STW (INSN_OP(3) | INSN_OP3(0x04)) |
205 |
#define STX (INSN_OP(3) | INSN_OP3(0x0e)) |
206 |
|
207 |
static inline void tcg_out_mov(TCGContext *s, int ret, int arg) |
208 |
{ |
209 |
tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(arg) | |
210 |
INSN_RS2(TCG_REG_G0)); |
211 |
} |
212 |
|
213 |
static inline void tcg_out_movi(TCGContext *s, TCGType type, |
214 |
int ret, tcg_target_long arg)
|
215 |
{ |
216 |
if (arg == (arg & 0xfff)) |
217 |
tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(TCG_REG_G0) | |
218 |
INSN_IMM13(arg)); |
219 |
else {
|
220 |
tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); |
221 |
if (arg & 0x3ff) |
222 |
tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(ret) | |
223 |
INSN_IMM13(arg & 0x3ff));
|
224 |
} |
225 |
} |
226 |
|
227 |
static inline void tcg_out_ld_raw(TCGContext *s, int ret, |
228 |
tcg_target_long arg) |
229 |
{ |
230 |
tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10)); |
231 |
tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) | |
232 |
INSN_IMM13(arg & 0x3ff));
|
233 |
} |
234 |
|
235 |
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op) |
236 |
{ |
237 |
if (offset == (offset & 0xfff)) |
238 |
tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | |
239 |
INSN_IMM13(offset)); |
240 |
else
|
241 |
fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
|
242 |
} |
243 |
|
244 |
static inline void tcg_out_ld(TCGContext *s, int ret, |
245 |
int arg1, tcg_target_long arg2)
|
246 |
{ |
247 |
fprintf(stderr, "unimplemented %s\n", __func__);
|
248 |
} |
249 |
|
250 |
static inline void tcg_out_st(TCGContext *s, int arg, |
251 |
int arg1, tcg_target_long arg2)
|
252 |
{ |
253 |
fprintf(stderr, "unimplemented %s\n", __func__);
|
254 |
} |
255 |
|
256 |
static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2, |
257 |
int op)
|
258 |
{ |
259 |
tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | |
260 |
INSN_RS2(rs2)); |
261 |
} |
262 |
|
263 |
static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1, int offset, |
264 |
int op)
|
265 |
{ |
266 |
tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | |
267 |
INSN_IMM13(offset)); |
268 |
} |
269 |
|
270 |
static inline void tcg_out_sety(TCGContext *s, tcg_target_long val) |
271 |
{ |
272 |
if (val == 0 || val == -1) |
273 |
tcg_out32(s, WRY | INSN_IMM13(val)); |
274 |
else
|
275 |
fprintf(stderr, "unimplemented sety %ld\n", (long)val); |
276 |
} |
277 |
|
278 |
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) |
279 |
{ |
280 |
if (val != 0) { |
281 |
if (val == (val & 0xfff)) |
282 |
tcg_out_arithi(s, reg, reg, val, ARITH_ADD); |
283 |
else
|
284 |
fprintf(stderr, "unimplemented addi %ld\n", (long)val); |
285 |
} |
286 |
} |
287 |
|
288 |
static inline void tcg_out_nop(TCGContext *s) |
289 |
{ |
290 |
tcg_out32(s, SETHI | INSN_RD(TCG_REG_G0) | 0);
|
291 |
} |
292 |
|
293 |
static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args, |
294 |
const int *const_args) |
295 |
{ |
296 |
int c;
|
297 |
|
298 |
switch (opc) {
|
299 |
case INDEX_op_exit_tb:
|
300 |
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_O0, args[0]);
|
301 |
tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_O7) | |
302 |
INSN_IMM13(8));
|
303 |
tcg_out_nop(s); |
304 |
break;
|
305 |
case INDEX_op_goto_tb:
|
306 |
if (s->tb_jmp_offset) {
|
307 |
/* direct jump method */
|
308 |
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_I5, args[0]);
|
309 |
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
|
310 |
} else {
|
311 |
/* indirect jump method */
|
312 |
tcg_out_ld_raw(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
|
313 |
} |
314 |
tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) | |
315 |
INSN_RS2(TCG_REG_G0)); |
316 |
tcg_out_nop(s); |
317 |
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
|
318 |
break;
|
319 |
case INDEX_op_call:
|
320 |
if (const_args[0]) { |
321 |
tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
|
322 |
- (tcg_target_ulong)s->code_ptr) >> 2)
|
323 |
& 0x3fffffff));
|
324 |
tcg_out_nop(s); |
325 |
} else {
|
326 |
tcg_out_ld_raw(s, TCG_REG_O7, (tcg_target_long)(s->tb_next + args[0]));
|
327 |
tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_O7) | |
328 |
INSN_RS2(TCG_REG_G0)); |
329 |
tcg_out_nop(s); |
330 |
} |
331 |
break;
|
332 |
case INDEX_op_jmp:
|
333 |
fprintf(stderr, "unimplemented jmp\n");
|
334 |
break;
|
335 |
case INDEX_op_br:
|
336 |
fprintf(stderr, "unimplemented br\n");
|
337 |
break;
|
338 |
case INDEX_op_movi_i32:
|
339 |
tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); |
340 |
break;
|
341 |
|
342 |
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
|
343 |
#define OP_32_64(x) \
|
344 |
glue(glue(case INDEX_op_, x), _i32:) \
|
345 |
glue(glue(case INDEX_op_, x), _i64:)
|
346 |
#else
|
347 |
#define OP_32_64(x) \
|
348 |
glue(glue(case INDEX_op_, x), _i32:)
|
349 |
#endif
|
350 |
OP_32_64(ld8u); |
351 |
tcg_out_ldst(s, args[0], args[1], args[2], LDUB); |
352 |
break;
|
353 |
OP_32_64(ld8s); |
354 |
tcg_out_ldst(s, args[0], args[1], args[2], LDSB); |
355 |
break;
|
356 |
OP_32_64(ld16u); |
357 |
tcg_out_ldst(s, args[0], args[1], args[2], LDUH); |
358 |
break;
|
359 |
OP_32_64(ld16s); |
360 |
tcg_out_ldst(s, args[0], args[1], args[2], LDSH); |
361 |
break;
|
362 |
case INDEX_op_ld_i32:
|
363 |
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
|
364 |
case INDEX_op_ld32u_i64:
|
365 |
#endif
|
366 |
tcg_out_ldst(s, args[0], args[1], args[2], LDUW); |
367 |
break;
|
368 |
OP_32_64(st8); |
369 |
tcg_out_ldst(s, args[0], args[1], args[2], STB); |
370 |
break;
|
371 |
OP_32_64(st16); |
372 |
tcg_out_ldst(s, args[0], args[1], args[2], STH); |
373 |
break;
|
374 |
case INDEX_op_st_i32:
|
375 |
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
|
376 |
case INDEX_op_st32_i64:
|
377 |
#endif
|
378 |
tcg_out_ldst(s, args[0], args[1], args[2], STW); |
379 |
break;
|
380 |
OP_32_64(add); |
381 |
c = ARITH_ADD; |
382 |
goto gen_arith32;
|
383 |
OP_32_64(sub); |
384 |
c = ARITH_SUB; |
385 |
goto gen_arith32;
|
386 |
OP_32_64(and); |
387 |
c = ARITH_AND; |
388 |
goto gen_arith32;
|
389 |
OP_32_64(or); |
390 |
c = ARITH_OR; |
391 |
goto gen_arith32;
|
392 |
OP_32_64(xor); |
393 |
c = ARITH_XOR; |
394 |
goto gen_arith32;
|
395 |
case INDEX_op_shl_i32:
|
396 |
c = SHIFT_SLL; |
397 |
goto gen_arith32;
|
398 |
case INDEX_op_shr_i32:
|
399 |
c = SHIFT_SRL; |
400 |
goto gen_arith32;
|
401 |
case INDEX_op_sar_i32:
|
402 |
c = SHIFT_SRA; |
403 |
goto gen_arith32;
|
404 |
case INDEX_op_mul_i32:
|
405 |
c = ARITH_UMUL; |
406 |
goto gen_arith32;
|
407 |
case INDEX_op_div2_i32:
|
408 |
#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
|
409 |
c = ARITH_SDIVX; |
410 |
goto gen_arith32;
|
411 |
#else
|
412 |
tcg_out_sety(s, 0);
|
413 |
c = ARITH_SDIV; |
414 |
goto gen_arith32;
|
415 |
#endif
|
416 |
case INDEX_op_divu2_i32:
|
417 |
#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
|
418 |
c = ARITH_UDIVX; |
419 |
goto gen_arith32;
|
420 |
#else
|
421 |
tcg_out_sety(s, 0);
|
422 |
c = ARITH_UDIV; |
423 |
goto gen_arith32;
|
424 |
#endif
|
425 |
|
426 |
case INDEX_op_brcond_i32:
|
427 |
fprintf(stderr, "unimplemented brcond\n");
|
428 |
break;
|
429 |
|
430 |
case INDEX_op_qemu_ld8u:
|
431 |
fprintf(stderr, "unimplemented qld\n");
|
432 |
break;
|
433 |
case INDEX_op_qemu_ld8s:
|
434 |
fprintf(stderr, "unimplemented qld\n");
|
435 |
break;
|
436 |
case INDEX_op_qemu_ld16u:
|
437 |
fprintf(stderr, "unimplemented qld\n");
|
438 |
break;
|
439 |
case INDEX_op_qemu_ld16s:
|
440 |
fprintf(stderr, "unimplemented qld\n");
|
441 |
break;
|
442 |
case INDEX_op_qemu_ld32u:
|
443 |
fprintf(stderr, "unimplemented qld\n");
|
444 |
break;
|
445 |
case INDEX_op_qemu_ld32s:
|
446 |
fprintf(stderr, "unimplemented qld\n");
|
447 |
break;
|
448 |
case INDEX_op_qemu_st8:
|
449 |
fprintf(stderr, "unimplemented qst\n");
|
450 |
break;
|
451 |
case INDEX_op_qemu_st16:
|
452 |
fprintf(stderr, "unimplemented qst\n");
|
453 |
break;
|
454 |
case INDEX_op_qemu_st32:
|
455 |
fprintf(stderr, "unimplemented qst\n");
|
456 |
break;
|
457 |
|
458 |
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
|
459 |
case INDEX_op_movi_i64:
|
460 |
tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); |
461 |
break;
|
462 |
case INDEX_op_ld32s_i64:
|
463 |
tcg_out_ldst(s, args[0], args[1], args[2], LDSW); |
464 |
break;
|
465 |
case INDEX_op_ld_i64:
|
466 |
tcg_out_ldst(s, args[0], args[1], args[2], LDX); |
467 |
break;
|
468 |
case INDEX_op_st_i64:
|
469 |
tcg_out_ldst(s, args[0], args[1], args[2], STX); |
470 |
break;
|
471 |
case INDEX_op_shl_i64:
|
472 |
c = SHIFT_SLLX; |
473 |
goto gen_arith32;
|
474 |
case INDEX_op_shr_i64:
|
475 |
c = SHIFT_SRLX; |
476 |
goto gen_arith32;
|
477 |
case INDEX_op_sar_i64:
|
478 |
c = SHIFT_SRAX; |
479 |
goto gen_arith32;
|
480 |
case INDEX_op_mul_i64:
|
481 |
c = ARITH_MULX; |
482 |
goto gen_arith32;
|
483 |
case INDEX_op_div2_i64:
|
484 |
c = ARITH_SDIVX; |
485 |
goto gen_arith32;
|
486 |
case INDEX_op_divu2_i64:
|
487 |
c = ARITH_UDIVX; |
488 |
goto gen_arith32;
|
489 |
|
490 |
case INDEX_op_brcond_i64:
|
491 |
fprintf(stderr, "unimplemented brcond\n");
|
492 |
break;
|
493 |
case INDEX_op_qemu_ld64:
|
494 |
fprintf(stderr, "unimplemented qld\n");
|
495 |
break;
|
496 |
case INDEX_op_qemu_st64:
|
497 |
fprintf(stderr, "unimplemented qst\n");
|
498 |
break;
|
499 |
|
500 |
#endif
|
501 |
gen_arith32:
|
502 |
if (const_args[2]) { |
503 |
tcg_out_arithi(s, args[0], args[1], args[2], c); |
504 |
} else {
|
505 |
tcg_out_arith(s, args[0], args[1], args[2], c); |
506 |
} |
507 |
break;
|
508 |
|
509 |
default:
|
510 |
fprintf(stderr, "unknown opcode 0x%x\n", opc);
|
511 |
tcg_abort(); |
512 |
} |
513 |
} |
514 |
|
515 |
static const TCGTargetOpDef sparc_op_defs[] = { |
516 |
{ INDEX_op_exit_tb, { } }, |
517 |
{ INDEX_op_goto_tb, { "r" } },
|
518 |
{ INDEX_op_call, { "ri" } },
|
519 |
{ INDEX_op_jmp, { "ri" } },
|
520 |
{ INDEX_op_br, { } }, |
521 |
|
522 |
{ INDEX_op_mov_i32, { "r", "r" } }, |
523 |
{ INDEX_op_movi_i32, { "r" } },
|
524 |
{ INDEX_op_ld8u_i32, { "r", "r" } }, |
525 |
{ INDEX_op_ld8s_i32, { "r", "r" } }, |
526 |
{ INDEX_op_ld16u_i32, { "r", "r" } }, |
527 |
{ INDEX_op_ld16s_i32, { "r", "r" } }, |
528 |
{ INDEX_op_ld_i32, { "r", "r" } }, |
529 |
{ INDEX_op_st8_i32, { "r", "r" } }, |
530 |
{ INDEX_op_st16_i32, { "r", "r" } }, |
531 |
{ INDEX_op_st_i32, { "r", "r" } }, |
532 |
|
533 |
{ INDEX_op_add_i32, { "r", "r", "rJ" } }, |
534 |
{ INDEX_op_mul_i32, { "r", "r", "rJ" } }, |
535 |
{ INDEX_op_div2_i32, { "r", "r", "0", "1", "r" } }, |
536 |
{ INDEX_op_divu2_i32, { "r", "r", "0", "1", "r" } }, |
537 |
{ INDEX_op_sub_i32, { "r", "r", "rJ" } }, |
538 |
{ INDEX_op_and_i32, { "r", "r", "rJ" } }, |
539 |
{ INDEX_op_or_i32, { "r", "r", "rJ" } }, |
540 |
{ INDEX_op_xor_i32, { "r", "r", "rJ" } }, |
541 |
|
542 |
{ INDEX_op_shl_i32, { "r", "r", "rJ" } }, |
543 |
{ INDEX_op_shr_i32, { "r", "r", "rJ" } }, |
544 |
{ INDEX_op_sar_i32, { "r", "r", "rJ" } }, |
545 |
|
546 |
{ INDEX_op_brcond_i32, { "r", "ri" } }, |
547 |
|
548 |
{ INDEX_op_qemu_ld8u, { "r", "L" } }, |
549 |
{ INDEX_op_qemu_ld8s, { "r", "L" } }, |
550 |
{ INDEX_op_qemu_ld16u, { "r", "L" } }, |
551 |
{ INDEX_op_qemu_ld16s, { "r", "L" } }, |
552 |
{ INDEX_op_qemu_ld32u, { "r", "L" } }, |
553 |
{ INDEX_op_qemu_ld32s, { "r", "L" } }, |
554 |
|
555 |
{ INDEX_op_qemu_st8, { "L", "L" } }, |
556 |
{ INDEX_op_qemu_st16, { "L", "L" } }, |
557 |
{ INDEX_op_qemu_st32, { "L", "L" } }, |
558 |
|
559 |
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
|
560 |
{ INDEX_op_mov_i64, { "r", "r" } }, |
561 |
{ INDEX_op_movi_i64, { "r" } },
|
562 |
{ INDEX_op_ld8u_i64, { "r", "r" } }, |
563 |
{ INDEX_op_ld8s_i64, { "r", "r" } }, |
564 |
{ INDEX_op_ld16u_i64, { "r", "r" } }, |
565 |
{ INDEX_op_ld16s_i64, { "r", "r" } }, |
566 |
{ INDEX_op_ld32u_i64, { "r", "r" } }, |
567 |
{ INDEX_op_ld32s_i64, { "r", "r" } }, |
568 |
{ INDEX_op_ld_i64, { "r", "r" } }, |
569 |
{ INDEX_op_st8_i64, { "r", "r" } }, |
570 |
{ INDEX_op_st16_i64, { "r", "r" } }, |
571 |
{ INDEX_op_st32_i64, { "r", "r" } }, |
572 |
{ INDEX_op_st_i64, { "r", "r" } }, |
573 |
|
574 |
{ INDEX_op_add_i64, { "r", "r", "rJ" } }, |
575 |
{ INDEX_op_mul_i64, { "r", "r", "rJ" } }, |
576 |
{ INDEX_op_div2_i64, { "r", "r", "0", "1", "r" } }, |
577 |
{ INDEX_op_divu2_i64, { "r", "r", "0", "1", "r" } }, |
578 |
{ INDEX_op_sub_i64, { "r", "r", "rJ" } }, |
579 |
{ INDEX_op_and_i64, { "r", "r", "rJ" } }, |
580 |
{ INDEX_op_or_i64, { "r", "r", "rJ" } }, |
581 |
{ INDEX_op_xor_i64, { "r", "r", "rJ" } }, |
582 |
|
583 |
{ INDEX_op_shl_i64, { "r", "r", "rJ" } }, |
584 |
{ INDEX_op_shr_i64, { "r", "r", "rJ" } }, |
585 |
{ INDEX_op_sar_i64, { "r", "r", "rJ" } }, |
586 |
|
587 |
{ INDEX_op_brcond_i64, { "r", "ri" } }, |
588 |
#endif
|
589 |
{ -1 },
|
590 |
}; |
591 |
|
592 |
void tcg_target_init(TCGContext *s)
|
593 |
{ |
594 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); |
595 |
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
|
596 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); |
597 |
#endif
|
598 |
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
|
599 |
(1 << TCG_REG_O0) |
|
600 |
(1 << TCG_REG_O1) |
|
601 |
(1 << TCG_REG_O2) |
|
602 |
(1 << TCG_REG_O3) |
|
603 |
(1 << TCG_REG_O4) |
|
604 |
(1 << TCG_REG_O5) |
|
605 |
(1 << TCG_REG_O6) |
|
606 |
(1 << TCG_REG_O7));
|
607 |
|
608 |
tcg_regset_clear(s->reserved_regs); |
609 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); |
610 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
|
611 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); |
612 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); |
613 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); |
614 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7); |
615 |
tcg_add_target_add_op_defs(sparc_op_defs); |
616 |
} |