root / tcg / ppc64 / tcg-target.c @ 5e0f40cf
History | View | Annotate | Download (60.4 kB)
1 |
/*
|
---|---|
2 |
* Tiny Code Generator for QEMU
|
3 |
*
|
4 |
* Copyright (c) 2008 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#define TCG_CT_CONST_S16 0x100 |
26 |
#define TCG_CT_CONST_U16 0x200 |
27 |
#define TCG_CT_CONST_S32 0x400 |
28 |
#define TCG_CT_CONST_U32 0x800 |
29 |
#define TCG_CT_CONST_ZERO 0x1000 |
30 |
#define TCG_CT_CONST_MONE 0x2000 |
31 |
|
32 |
static uint8_t *tb_ret_addr;
|
33 |
|
34 |
#define FAST_PATH
|
35 |
|
36 |
#if TARGET_LONG_BITS == 32 |
37 |
#define LD_ADDR LWZU
|
38 |
#define CMP_L 0 |
39 |
#else
|
40 |
#define LD_ADDR LDU
|
41 |
#define CMP_L (1<<21) |
42 |
#endif
|
43 |
|
44 |
#ifndef GUEST_BASE
|
45 |
#define GUEST_BASE 0 |
46 |
#endif
|
47 |
|
48 |
#ifdef CONFIG_GETAUXVAL
|
49 |
#include <sys/auxv.h> |
50 |
static bool have_isa_2_06; |
51 |
#define HAVE_ISA_2_06 have_isa_2_06
|
52 |
#define HAVE_ISEL have_isa_2_06
|
53 |
#else
|
54 |
#define HAVE_ISA_2_06 0 |
55 |
#define HAVE_ISEL 0 |
56 |
#endif
|
57 |
|
58 |
#ifdef CONFIG_USE_GUEST_BASE
|
59 |
#define TCG_GUEST_BASE_REG 30 |
60 |
#else
|
61 |
#define TCG_GUEST_BASE_REG 0 |
62 |
#endif
|
63 |
|
64 |
#ifndef NDEBUG
|
65 |
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
66 |
"r0",
|
67 |
"r1",
|
68 |
"r2",
|
69 |
"r3",
|
70 |
"r4",
|
71 |
"r5",
|
72 |
"r6",
|
73 |
"r7",
|
74 |
"r8",
|
75 |
"r9",
|
76 |
"r10",
|
77 |
"r11",
|
78 |
"r12",
|
79 |
"r13",
|
80 |
"r14",
|
81 |
"r15",
|
82 |
"r16",
|
83 |
"r17",
|
84 |
"r18",
|
85 |
"r19",
|
86 |
"r20",
|
87 |
"r21",
|
88 |
"r22",
|
89 |
"r23",
|
90 |
"r24",
|
91 |
"r25",
|
92 |
"r26",
|
93 |
"r27",
|
94 |
"r28",
|
95 |
"r29",
|
96 |
"r30",
|
97 |
"r31"
|
98 |
}; |
99 |
#endif
|
100 |
|
101 |
static const int tcg_target_reg_alloc_order[] = { |
102 |
TCG_REG_R14, |
103 |
TCG_REG_R15, |
104 |
TCG_REG_R16, |
105 |
TCG_REG_R17, |
106 |
TCG_REG_R18, |
107 |
TCG_REG_R19, |
108 |
TCG_REG_R20, |
109 |
TCG_REG_R21, |
110 |
TCG_REG_R22, |
111 |
TCG_REG_R23, |
112 |
TCG_REG_R28, |
113 |
TCG_REG_R29, |
114 |
TCG_REG_R30, |
115 |
TCG_REG_R31, |
116 |
#ifdef __APPLE__
|
117 |
TCG_REG_R2, |
118 |
#endif
|
119 |
TCG_REG_R3, |
120 |
TCG_REG_R4, |
121 |
TCG_REG_R5, |
122 |
TCG_REG_R6, |
123 |
TCG_REG_R7, |
124 |
TCG_REG_R8, |
125 |
TCG_REG_R9, |
126 |
TCG_REG_R10, |
127 |
#ifndef __APPLE__
|
128 |
TCG_REG_R11, |
129 |
#endif
|
130 |
TCG_REG_R12, |
131 |
TCG_REG_R24, |
132 |
TCG_REG_R25, |
133 |
TCG_REG_R26, |
134 |
TCG_REG_R27 |
135 |
}; |
136 |
|
137 |
static const int tcg_target_call_iarg_regs[] = { |
138 |
TCG_REG_R3, |
139 |
TCG_REG_R4, |
140 |
TCG_REG_R5, |
141 |
TCG_REG_R6, |
142 |
TCG_REG_R7, |
143 |
TCG_REG_R8, |
144 |
TCG_REG_R9, |
145 |
TCG_REG_R10 |
146 |
}; |
147 |
|
148 |
static const int tcg_target_call_oarg_regs[] = { |
149 |
TCG_REG_R3 |
150 |
}; |
151 |
|
152 |
static const int tcg_target_callee_save_regs[] = { |
153 |
#ifdef __APPLE__
|
154 |
TCG_REG_R11, |
155 |
#endif
|
156 |
TCG_REG_R14, |
157 |
TCG_REG_R15, |
158 |
TCG_REG_R16, |
159 |
TCG_REG_R17, |
160 |
TCG_REG_R18, |
161 |
TCG_REG_R19, |
162 |
TCG_REG_R20, |
163 |
TCG_REG_R21, |
164 |
TCG_REG_R22, |
165 |
TCG_REG_R23, |
166 |
TCG_REG_R24, |
167 |
TCG_REG_R25, |
168 |
TCG_REG_R26, |
169 |
TCG_REG_R27, /* currently used for the global env */
|
170 |
TCG_REG_R28, |
171 |
TCG_REG_R29, |
172 |
TCG_REG_R30, |
173 |
TCG_REG_R31 |
174 |
}; |
175 |
|
176 |
static uint32_t reloc_pc24_val(void *pc, tcg_target_long target) |
177 |
{ |
178 |
tcg_target_long disp; |
179 |
|
180 |
disp = target - (tcg_target_long)pc; |
181 |
if ((disp << 38) >> 38 != disp) { |
182 |
tcg_abort(); |
183 |
} |
184 |
|
185 |
return disp & 0x3fffffc; |
186 |
} |
187 |
|
188 |
static void reloc_pc24(void *pc, tcg_target_long target) |
189 |
{ |
190 |
*(uint32_t *)pc = (*(uint32_t *)pc & ~0x3fffffc)
|
191 |
| reloc_pc24_val(pc, target); |
192 |
} |
193 |
|
194 |
static uint16_t reloc_pc14_val(void *pc, tcg_target_long target) |
195 |
{ |
196 |
tcg_target_long disp; |
197 |
|
198 |
disp = target - (tcg_target_long)pc; |
199 |
if (disp != (int16_t) disp) {
|
200 |
tcg_abort(); |
201 |
} |
202 |
|
203 |
return disp & 0xfffc; |
204 |
} |
205 |
|
206 |
static void reloc_pc14(void *pc, tcg_target_long target) |
207 |
{ |
208 |
*(uint32_t *)pc = (*(uint32_t *)pc & ~0xfffc) | reloc_pc14_val(pc, target);
|
209 |
} |
210 |
|
211 |
static void patch_reloc(uint8_t *code_ptr, int type, |
212 |
intptr_t value, intptr_t addend) |
213 |
{ |
214 |
value += addend; |
215 |
switch (type) {
|
216 |
case R_PPC_REL14:
|
217 |
reloc_pc14(code_ptr, value); |
218 |
break;
|
219 |
case R_PPC_REL24:
|
220 |
reloc_pc24(code_ptr, value); |
221 |
break;
|
222 |
default:
|
223 |
tcg_abort(); |
224 |
} |
225 |
} |
226 |
|
227 |
/* parse target specific constraints */
|
228 |
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
229 |
{ |
230 |
const char *ct_str; |
231 |
|
232 |
ct_str = *pct_str; |
233 |
switch (ct_str[0]) { |
234 |
case 'A': case 'B': case 'C': case 'D': |
235 |
ct->ct |= TCG_CT_REG; |
236 |
tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A'); |
237 |
break;
|
238 |
case 'r': |
239 |
ct->ct |= TCG_CT_REG; |
240 |
tcg_regset_set32(ct->u.regs, 0, 0xffffffff); |
241 |
break;
|
242 |
case 'L': /* qemu_ld constraint */ |
243 |
ct->ct |= TCG_CT_REG; |
244 |
tcg_regset_set32(ct->u.regs, 0, 0xffffffff); |
245 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); |
246 |
#ifdef CONFIG_SOFTMMU
|
247 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); |
248 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); |
249 |
#endif
|
250 |
break;
|
251 |
case 'S': /* qemu_st constraint */ |
252 |
ct->ct |= TCG_CT_REG; |
253 |
tcg_regset_set32(ct->u.regs, 0, 0xffffffff); |
254 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); |
255 |
#ifdef CONFIG_SOFTMMU
|
256 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); |
257 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); |
258 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); |
259 |
#endif
|
260 |
break;
|
261 |
case 'I': |
262 |
ct->ct |= TCG_CT_CONST_S16; |
263 |
break;
|
264 |
case 'J': |
265 |
ct->ct |= TCG_CT_CONST_U16; |
266 |
break;
|
267 |
case 'M': |
268 |
ct->ct |= TCG_CT_CONST_MONE; |
269 |
break;
|
270 |
case 'T': |
271 |
ct->ct |= TCG_CT_CONST_S32; |
272 |
break;
|
273 |
case 'U': |
274 |
ct->ct |= TCG_CT_CONST_U32; |
275 |
break;
|
276 |
case 'Z': |
277 |
ct->ct |= TCG_CT_CONST_ZERO; |
278 |
break;
|
279 |
default:
|
280 |
return -1; |
281 |
} |
282 |
ct_str++; |
283 |
*pct_str = ct_str; |
284 |
return 0; |
285 |
} |
286 |
|
287 |
/* test if a constant matches the constraint */
|
288 |
static int tcg_target_const_match(tcg_target_long val, |
289 |
const TCGArgConstraint *arg_ct)
|
290 |
{ |
291 |
int ct = arg_ct->ct;
|
292 |
if (ct & TCG_CT_CONST) {
|
293 |
return 1; |
294 |
} else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { |
295 |
return 1; |
296 |
} else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { |
297 |
return 1; |
298 |
} else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { |
299 |
return 1; |
300 |
} else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { |
301 |
return 1; |
302 |
} else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { |
303 |
return 1; |
304 |
} else if ((ct & TCG_CT_CONST_MONE) && val == -1) { |
305 |
return 1; |
306 |
} |
307 |
return 0; |
308 |
} |
309 |
|
310 |
#define OPCD(opc) ((opc)<<26) |
311 |
#define XO19(opc) (OPCD(19)|((opc)<<1)) |
312 |
#define MD30(opc) (OPCD(30)|((opc)<<2)) |
313 |
#define MDS30(opc) (OPCD(30)|((opc)<<1)) |
314 |
#define XO31(opc) (OPCD(31)|((opc)<<1)) |
315 |
#define XO58(opc) (OPCD(58)|(opc)) |
316 |
#define XO62(opc) (OPCD(62)|(opc)) |
317 |
|
318 |
#define B OPCD( 18) |
319 |
#define BC OPCD( 16) |
320 |
#define LBZ OPCD( 34) |
321 |
#define LHZ OPCD( 40) |
322 |
#define LHA OPCD( 42) |
323 |
#define LWZ OPCD( 32) |
324 |
#define STB OPCD( 38) |
325 |
#define STH OPCD( 44) |
326 |
#define STW OPCD( 36) |
327 |
|
328 |
#define STD XO62( 0) |
329 |
#define STDU XO62( 1) |
330 |
#define STDX XO31(149) |
331 |
|
332 |
#define LD XO58( 0) |
333 |
#define LDX XO31( 21) |
334 |
#define LDU XO58( 1) |
335 |
#define LWA XO58( 2) |
336 |
#define LWAX XO31(341) |
337 |
|
338 |
#define ADDIC OPCD( 12) |
339 |
#define ADDI OPCD( 14) |
340 |
#define ADDIS OPCD( 15) |
341 |
#define ORI OPCD( 24) |
342 |
#define ORIS OPCD( 25) |
343 |
#define XORI OPCD( 26) |
344 |
#define XORIS OPCD( 27) |
345 |
#define ANDI OPCD( 28) |
346 |
#define ANDIS OPCD( 29) |
347 |
#define MULLI OPCD( 7) |
348 |
#define CMPLI OPCD( 10) |
349 |
#define CMPI OPCD( 11) |
350 |
#define SUBFIC OPCD( 8) |
351 |
|
352 |
#define LWZU OPCD( 33) |
353 |
#define STWU OPCD( 37) |
354 |
|
355 |
#define RLWIMI OPCD( 20) |
356 |
#define RLWINM OPCD( 21) |
357 |
#define RLWNM OPCD( 23) |
358 |
|
359 |
#define RLDICL MD30( 0) |
360 |
#define RLDICR MD30( 1) |
361 |
#define RLDIMI MD30( 3) |
362 |
#define RLDCL MDS30( 8) |
363 |
|
364 |
#define BCLR XO19( 16) |
365 |
#define BCCTR XO19(528) |
366 |
#define CRAND XO19(257) |
367 |
#define CRANDC XO19(129) |
368 |
#define CRNAND XO19(225) |
369 |
#define CROR XO19(449) |
370 |
#define CRNOR XO19( 33) |
371 |
|
372 |
#define EXTSB XO31(954) |
373 |
#define EXTSH XO31(922) |
374 |
#define EXTSW XO31(986) |
375 |
#define ADD XO31(266) |
376 |
#define ADDE XO31(138) |
377 |
#define ADDME XO31(234) |
378 |
#define ADDZE XO31(202) |
379 |
#define ADDC XO31( 10) |
380 |
#define AND XO31( 28) |
381 |
#define SUBF XO31( 40) |
382 |
#define SUBFC XO31( 8) |
383 |
#define SUBFE XO31(136) |
384 |
#define SUBFME XO31(232) |
385 |
#define SUBFZE XO31(200) |
386 |
#define OR XO31(444) |
387 |
#define XOR XO31(316) |
388 |
#define MULLW XO31(235) |
389 |
#define MULHWU XO31( 11) |
390 |
#define DIVW XO31(491) |
391 |
#define DIVWU XO31(459) |
392 |
#define CMP XO31( 0) |
393 |
#define CMPL XO31( 32) |
394 |
#define LHBRX XO31(790) |
395 |
#define LWBRX XO31(534) |
396 |
#define LDBRX XO31(532) |
397 |
#define STHBRX XO31(918) |
398 |
#define STWBRX XO31(662) |
399 |
#define STDBRX XO31(660) |
400 |
#define MFSPR XO31(339) |
401 |
#define MTSPR XO31(467) |
402 |
#define SRAWI XO31(824) |
403 |
#define NEG XO31(104) |
404 |
#define MFCR XO31( 19) |
405 |
#define MFOCRF (MFCR | (1u << 20)) |
406 |
#define NOR XO31(124) |
407 |
#define CNTLZW XO31( 26) |
408 |
#define CNTLZD XO31( 58) |
409 |
#define ANDC XO31( 60) |
410 |
#define ORC XO31(412) |
411 |
#define EQV XO31(284) |
412 |
#define NAND XO31(476) |
413 |
#define ISEL XO31( 15) |
414 |
|
415 |
#define MULLD XO31(233) |
416 |
#define MULHD XO31( 73) |
417 |
#define MULHDU XO31( 9) |
418 |
#define DIVD XO31(489) |
419 |
#define DIVDU XO31(457) |
420 |
|
421 |
#define LBZX XO31( 87) |
422 |
#define LHZX XO31(279) |
423 |
#define LHAX XO31(343) |
424 |
#define LWZX XO31( 23) |
425 |
#define STBX XO31(215) |
426 |
#define STHX XO31(407) |
427 |
#define STWX XO31(151) |
428 |
|
429 |
#define SPR(a, b) ((((a)<<5)|(b))<<11) |
430 |
#define LR SPR(8, 0) |
431 |
#define CTR SPR(9, 0) |
432 |
|
433 |
#define SLW XO31( 24) |
434 |
#define SRW XO31(536) |
435 |
#define SRAW XO31(792) |
436 |
|
437 |
#define SLD XO31( 27) |
438 |
#define SRD XO31(539) |
439 |
#define SRAD XO31(794) |
440 |
#define SRADI XO31(413<<1) |
441 |
|
442 |
#define TW XO31( 4) |
443 |
#define TRAP (TW | TO(31)) |
444 |
|
445 |
#define RT(r) ((r)<<21) |
446 |
#define RS(r) ((r)<<21) |
447 |
#define RA(r) ((r)<<16) |
448 |
#define RB(r) ((r)<<11) |
449 |
#define TO(t) ((t)<<21) |
450 |
#define SH(s) ((s)<<11) |
451 |
#define MB(b) ((b)<<6) |
452 |
#define ME(e) ((e)<<1) |
453 |
#define BO(o) ((o)<<21) |
454 |
#define MB64(b) ((b)<<5) |
455 |
#define FXM(b) (1 << (19 - (b))) |
456 |
|
457 |
#define LK 1 |
458 |
|
459 |
#define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
|
460 |
#define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
|
461 |
#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff)) |
462 |
#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff)) |
463 |
|
464 |
#define BF(n) ((n)<<23) |
465 |
#define BI(n, c) (((c)+((n)*4))<<16) |
466 |
#define BT(n, c) (((c)+((n)*4))<<21) |
467 |
#define BA(n, c) (((c)+((n)*4))<<16) |
468 |
#define BB(n, c) (((c)+((n)*4))<<11) |
469 |
#define BC_(n, c) (((c)+((n)*4))<<6) |
470 |
|
471 |
#define BO_COND_TRUE BO(12) |
472 |
#define BO_COND_FALSE BO( 4) |
473 |
#define BO_ALWAYS BO(20) |
474 |
|
475 |
enum {
|
476 |
CR_LT, |
477 |
CR_GT, |
478 |
CR_EQ, |
479 |
CR_SO |
480 |
}; |
481 |
|
482 |
static const uint32_t tcg_to_bc[] = { |
483 |
[TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
|
484 |
[TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
|
485 |
[TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
|
486 |
[TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
|
487 |
[TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
|
488 |
[TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
|
489 |
[TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
|
490 |
[TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
|
491 |
[TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
|
492 |
[TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
|
493 |
}; |
494 |
|
495 |
/* The low bit here is set if the RA and RB fields must be inverted. */
|
496 |
static const uint32_t tcg_to_isel[] = { |
497 |
[TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
|
498 |
[TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, |
499 |
[TCG_COND_LT] = ISEL | BC_(7, CR_LT),
|
500 |
[TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, |
501 |
[TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, |
502 |
[TCG_COND_GT] = ISEL | BC_(7, CR_GT),
|
503 |
[TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
|
504 |
[TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, |
505 |
[TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, |
506 |
[TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
|
507 |
}; |
508 |
|
509 |
static inline void tcg_out_mov(TCGContext *s, TCGType type, |
510 |
TCGReg ret, TCGReg arg) |
511 |
{ |
512 |
tcg_out32(s, OR | SAB(arg, ret, arg)); |
513 |
} |
514 |
|
515 |
static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, |
516 |
int sh, int mb) |
517 |
{ |
518 |
sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); |
519 |
mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); |
520 |
tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); |
521 |
} |
522 |
|
523 |
static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, |
524 |
int sh, int mb, int me) |
525 |
{ |
526 |
tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); |
527 |
} |
528 |
|
529 |
static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) |
530 |
{ |
531 |
tcg_out_rld(s, RLDICL, dst, src, 0, 32); |
532 |
} |
533 |
|
534 |
static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) |
535 |
{ |
536 |
tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
|
537 |
} |
538 |
|
539 |
static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) |
540 |
{ |
541 |
tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
|
542 |
} |
543 |
|
544 |
static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg) |
545 |
{ |
546 |
if (arg == (int16_t) arg) {
|
547 |
tcg_out32(s, ADDI | TAI(ret, 0, arg));
|
548 |
} else {
|
549 |
tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); |
550 |
if (arg & 0xffff) { |
551 |
tcg_out32(s, ORI | SAI(ret, ret, arg)); |
552 |
} |
553 |
} |
554 |
} |
555 |
|
556 |
static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, |
557 |
tcg_target_long arg) |
558 |
{ |
559 |
if (type == TCG_TYPE_I32 || arg == (int32_t)arg) {
|
560 |
tcg_out_movi32(s, ret, arg); |
561 |
} else if (arg == (uint32_t)arg && !(arg & 0x8000)) { |
562 |
tcg_out32(s, ADDI | TAI(ret, 0, arg));
|
563 |
tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
|
564 |
} else {
|
565 |
int32_t high = arg >> 32;
|
566 |
tcg_out_movi32(s, ret, high); |
567 |
if (high) {
|
568 |
tcg_out_shli64(s, ret, ret, 32);
|
569 |
} |
570 |
if (arg & 0xffff0000) { |
571 |
tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
|
572 |
} |
573 |
if (arg & 0xffff) { |
574 |
tcg_out32(s, ORI | SAI(ret, ret, arg)); |
575 |
} |
576 |
} |
577 |
} |
578 |
|
579 |
static bool mask_operand(uint32_t c, int *mb, int *me) |
580 |
{ |
581 |
uint32_t lsb, test; |
582 |
|
583 |
/* Accept a bit pattern like:
|
584 |
0....01....1
|
585 |
1....10....0
|
586 |
0..01..10..0
|
587 |
Keep track of the transitions. */
|
588 |
if (c == 0 || c == -1) { |
589 |
return false; |
590 |
} |
591 |
test = c; |
592 |
lsb = test & -test; |
593 |
test += lsb; |
594 |
if (test & (test - 1)) { |
595 |
return false; |
596 |
} |
597 |
|
598 |
*me = clz32(lsb); |
599 |
*mb = test ? clz32(test & -test) + 1 : 0; |
600 |
return true; |
601 |
} |
602 |
|
603 |
static bool mask64_operand(uint64_t c, int *mb, int *me) |
604 |
{ |
605 |
uint64_t lsb; |
606 |
|
607 |
if (c == 0) { |
608 |
return false; |
609 |
} |
610 |
|
611 |
lsb = c & -c; |
612 |
/* Accept 1..10..0. */
|
613 |
if (c == -lsb) {
|
614 |
*mb = 0;
|
615 |
*me = clz64(lsb); |
616 |
return true; |
617 |
} |
618 |
/* Accept 0..01..1. */
|
619 |
if (lsb == 1 && (c & (c + 1)) == 0) { |
620 |
*mb = clz64(c + 1) + 1; |
621 |
*me = 63;
|
622 |
return true; |
623 |
} |
624 |
return false; |
625 |
} |
626 |
|
627 |
static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) |
628 |
{ |
629 |
int mb, me;
|
630 |
|
631 |
if ((c & 0xffff) == c) { |
632 |
tcg_out32(s, ANDI | SAI(src, dst, c)); |
633 |
return;
|
634 |
} else if ((c & 0xffff0000) == c) { |
635 |
tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
|
636 |
return;
|
637 |
} else if (mask_operand(c, &mb, &me)) { |
638 |
tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
|
639 |
} else {
|
640 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); |
641 |
tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); |
642 |
} |
643 |
} |
644 |
|
645 |
static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) |
646 |
{ |
647 |
int mb, me;
|
648 |
|
649 |
if ((c & 0xffff) == c) { |
650 |
tcg_out32(s, ANDI | SAI(src, dst, c)); |
651 |
return;
|
652 |
} else if ((c & 0xffff0000) == c) { |
653 |
tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
|
654 |
return;
|
655 |
} else if (mask64_operand(c, &mb, &me)) { |
656 |
if (mb == 0) { |
657 |
tcg_out_rld(s, RLDICR, dst, src, 0, me);
|
658 |
} else {
|
659 |
tcg_out_rld(s, RLDICL, dst, src, 0, mb);
|
660 |
} |
661 |
} else {
|
662 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); |
663 |
tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); |
664 |
} |
665 |
} |
666 |
|
667 |
static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c, |
668 |
int op_lo, int op_hi) |
669 |
{ |
670 |
if (c >> 16) { |
671 |
tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
|
672 |
src = dst; |
673 |
} |
674 |
if (c & 0xffff) { |
675 |
tcg_out32(s, op_lo | SAI(src, dst, c)); |
676 |
src = dst; |
677 |
} |
678 |
} |
679 |
|
680 |
static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) |
681 |
{ |
682 |
tcg_out_zori32(s, dst, src, c, ORI, ORIS); |
683 |
} |
684 |
|
685 |
static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) |
686 |
{ |
687 |
tcg_out_zori32(s, dst, src, c, XORI, XORIS); |
688 |
} |
689 |
|
690 |
static void tcg_out_b(TCGContext *s, int mask, tcg_target_long target) |
691 |
{ |
692 |
tcg_target_long disp; |
693 |
|
694 |
disp = target - (tcg_target_long)s->code_ptr; |
695 |
if ((disp << 38) >> 38 == disp) { |
696 |
tcg_out32(s, B | (disp & 0x3fffffc) | mask);
|
697 |
} else {
|
698 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, (tcg_target_long)target); |
699 |
tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); |
700 |
tcg_out32(s, BCCTR | BO_ALWAYS | mask); |
701 |
} |
702 |
} |
703 |
|
704 |
static void tcg_out_call(TCGContext *s, tcg_target_long arg, int const_arg) |
705 |
{ |
706 |
#ifdef __APPLE__
|
707 |
if (const_arg) {
|
708 |
tcg_out_b(s, LK, arg); |
709 |
} else {
|
710 |
tcg_out32(s, MTSPR | RS(arg) | LR); |
711 |
tcg_out32(s, BCLR | BO_ALWAYS | LK); |
712 |
} |
713 |
#else
|
714 |
int reg = arg;
|
715 |
|
716 |
if (const_arg) {
|
717 |
reg = TCG_REG_R2; |
718 |
tcg_out_movi(s, TCG_TYPE_I64, reg, arg); |
719 |
} |
720 |
|
721 |
tcg_out32(s, LD | TAI(TCG_REG_R0, reg, 0));
|
722 |
tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); |
723 |
tcg_out32(s, LD | TAI(TCG_REG_R11, reg, 16));
|
724 |
tcg_out32(s, LD | TAI(TCG_REG_R2, reg, 8));
|
725 |
tcg_out32(s, BCCTR | BO_ALWAYS | LK); |
726 |
#endif
|
727 |
} |
728 |
|
729 |
static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, |
730 |
int offset, int op1, int op2) |
731 |
{ |
732 |
if (offset == (int16_t) offset) {
|
733 |
tcg_out32(s, op1 | TAI(ret, addr, offset)); |
734 |
} else {
|
735 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, offset); |
736 |
tcg_out32(s, op2 | TAB(ret, addr, TCG_REG_R0)); |
737 |
} |
738 |
} |
739 |
|
740 |
static void tcg_out_ldsta(TCGContext *s, TCGReg ret, TCGReg addr, |
741 |
int offset, int op1, int op2) |
742 |
{ |
743 |
if (offset == (int16_t)(offset & ~3)) { |
744 |
tcg_out32(s, op1 | TAI(ret, addr, offset)); |
745 |
} else {
|
746 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, offset); |
747 |
tcg_out32(s, op2 | TAB(ret, addr, TCG_REG_R0)); |
748 |
} |
749 |
} |
750 |
|
751 |
#if defined(CONFIG_SOFTMMU)
|
752 |
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
|
753 |
int mmu_idx) */
|
754 |
static const void * const qemu_ld_helpers[4] = { |
755 |
helper_ldb_mmu, |
756 |
helper_ldw_mmu, |
757 |
helper_ldl_mmu, |
758 |
helper_ldq_mmu, |
759 |
}; |
760 |
|
761 |
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
|
762 |
uintxx_t val, int mmu_idx) */
|
763 |
static const void * const qemu_st_helpers[4] = { |
764 |
helper_stb_mmu, |
765 |
helper_stw_mmu, |
766 |
helper_stl_mmu, |
767 |
helper_stq_mmu, |
768 |
}; |
769 |
|
770 |
static void tcg_out_tlb_read(TCGContext *s, TCGReg r0, TCGReg r1, TCGReg r2, |
771 |
TCGReg addr_reg, int s_bits, int offset) |
772 |
{ |
773 |
#if TARGET_LONG_BITS == 32 |
774 |
tcg_out_ext32u(s, addr_reg, addr_reg); |
775 |
|
776 |
tcg_out_rlw(s, RLWINM, r0, addr_reg, |
777 |
32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
|
778 |
32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS),
|
779 |
31 - CPU_TLB_ENTRY_BITS);
|
780 |
tcg_out32(s, ADD | TAB(r0, r0, TCG_AREG0)); |
781 |
tcg_out32(s, LWZU | TAI(r1, r0, offset)); |
782 |
tcg_out_rlw(s, RLWINM, r2, addr_reg, 0,
|
783 |
(32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); |
784 |
#else
|
785 |
tcg_out_rld(s, RLDICL, r0, addr_reg, |
786 |
64 - TARGET_PAGE_BITS,
|
787 |
64 - CPU_TLB_BITS);
|
788 |
tcg_out_shli64(s, r0, r0, CPU_TLB_ENTRY_BITS); |
789 |
|
790 |
tcg_out32(s, ADD | TAB(r0, r0, TCG_AREG0)); |
791 |
tcg_out32(s, LD_ADDR | TAI(r1, r0, offset)); |
792 |
|
793 |
if (!s_bits) {
|
794 |
tcg_out_rld(s, RLDICR, r2, addr_reg, 0, 63 - TARGET_PAGE_BITS); |
795 |
} else {
|
796 |
tcg_out_rld(s, RLDICL, r2, addr_reg, |
797 |
64 - TARGET_PAGE_BITS,
|
798 |
TARGET_PAGE_BITS - s_bits); |
799 |
tcg_out_rld(s, RLDICL, r2, r2, TARGET_PAGE_BITS, 0);
|
800 |
} |
801 |
#endif
|
802 |
} |
803 |
#endif
|
804 |
|
805 |
static const uint32_t qemu_ldx_opc[8] = { |
806 |
#ifdef TARGET_WORDS_BIGENDIAN
|
807 |
LBZX, LHZX, LWZX, LDX, |
808 |
0, LHAX, LWAX, LDX
|
809 |
#else
|
810 |
LBZX, LHBRX, LWBRX, LDBRX, |
811 |
0, 0, 0, LDBRX, |
812 |
#endif
|
813 |
}; |
814 |
|
815 |
static const uint32_t qemu_stx_opc[4] = { |
816 |
#ifdef TARGET_WORDS_BIGENDIAN
|
817 |
STBX, STHX, STWX, STDX |
818 |
#else
|
819 |
STBX, STHBRX, STWBRX, STDBRX, |
820 |
#endif
|
821 |
}; |
822 |
|
823 |
static const uint32_t qemu_exts_opc[4] = { |
824 |
EXTSB, EXTSH, EXTSW, 0
|
825 |
}; |
826 |
|
827 |
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) |
828 |
{ |
829 |
TCGReg addr_reg, data_reg, r0, r1, rbase; |
830 |
uint32_t insn, s_bits; |
831 |
#ifdef CONFIG_SOFTMMU
|
832 |
TCGReg r2, ir; |
833 |
int mem_index;
|
834 |
void *label1_ptr, *label2_ptr;
|
835 |
#endif
|
836 |
|
837 |
data_reg = *args++; |
838 |
addr_reg = *args++; |
839 |
s_bits = opc & 3;
|
840 |
|
841 |
#ifdef CONFIG_SOFTMMU
|
842 |
mem_index = *args; |
843 |
|
844 |
r0 = TCG_REG_R3; |
845 |
r1 = TCG_REG_R4; |
846 |
r2 = TCG_REG_R0; |
847 |
rbase = 0;
|
848 |
|
849 |
tcg_out_tlb_read(s, r0, r1, r2, addr_reg, s_bits, |
850 |
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read));
|
851 |
|
852 |
tcg_out32(s, CMP | BF(7) | RA(r2) | RB(r1) | CMP_L);
|
853 |
|
854 |
label1_ptr = s->code_ptr; |
855 |
#ifdef FAST_PATH
|
856 |
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_TRUE);
|
857 |
#endif
|
858 |
|
859 |
/* slow path */
|
860 |
ir = TCG_REG_R3; |
861 |
tcg_out_mov(s, TCG_TYPE_I64, ir++, TCG_AREG0); |
862 |
tcg_out_mov(s, TCG_TYPE_I64, ir++, addr_reg); |
863 |
tcg_out_movi(s, TCG_TYPE_I64, ir++, mem_index); |
864 |
|
865 |
tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
|
866 |
|
867 |
if (opc & 4) { |
868 |
insn = qemu_exts_opc[s_bits]; |
869 |
tcg_out32(s, insn | RA(data_reg) | RS(TCG_REG_R3)); |
870 |
} else if (data_reg != TCG_REG_R3) { |
871 |
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R3); |
872 |
} |
873 |
label2_ptr = s->code_ptr; |
874 |
tcg_out32(s, B); |
875 |
|
876 |
/* label1: fast path */
|
877 |
#ifdef FAST_PATH
|
878 |
reloc_pc14(label1_ptr, (tcg_target_long)s->code_ptr); |
879 |
#endif
|
880 |
|
881 |
/* r0 now contains &env->tlb_table[mem_index][index].addr_read */
|
882 |
tcg_out32(s, LD | TAI(r0, r0, |
883 |
offsetof(CPUTLBEntry, addend) |
884 |
- offsetof(CPUTLBEntry, addr_read))); |
885 |
/* r0 = env->tlb_table[mem_index][index].addend */
|
886 |
tcg_out32(s, ADD | TAB(r0, r0, addr_reg)); |
887 |
/* r0 = env->tlb_table[mem_index][index].addend + addr */
|
888 |
|
889 |
#else /* !CONFIG_SOFTMMU */ |
890 |
#if TARGET_LONG_BITS == 32 |
891 |
tcg_out_ext32u(s, addr_reg, addr_reg); |
892 |
#endif
|
893 |
r0 = addr_reg; |
894 |
r1 = TCG_REG_R3; |
895 |
rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
|
896 |
#endif
|
897 |
|
898 |
insn = qemu_ldx_opc[opc]; |
899 |
if (!HAVE_ISA_2_06 && insn == LDBRX) {
|
900 |
tcg_out32(s, ADDI | TAI(r1, r0, 4));
|
901 |
tcg_out32(s, LWBRX | TAB(data_reg, rbase, r0)); |
902 |
tcg_out32(s, LWBRX | TAB( r1, rbase, r1)); |
903 |
tcg_out_rld(s, RLDIMI, data_reg, r1, 32, 0); |
904 |
} else if (insn) { |
905 |
tcg_out32(s, insn | TAB(data_reg, rbase, r0)); |
906 |
} else {
|
907 |
insn = qemu_ldx_opc[s_bits]; |
908 |
tcg_out32(s, insn | TAB(data_reg, rbase, r0)); |
909 |
insn = qemu_exts_opc[s_bits]; |
910 |
tcg_out32(s, insn | RA(data_reg) | RS(data_reg)); |
911 |
} |
912 |
|
913 |
#ifdef CONFIG_SOFTMMU
|
914 |
reloc_pc24(label2_ptr, (tcg_target_long)s->code_ptr); |
915 |
#endif
|
916 |
} |
917 |
|
918 |
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) |
919 |
{ |
920 |
TCGReg addr_reg, r0, r1, rbase, data_reg; |
921 |
uint32_t insn; |
922 |
#ifdef CONFIG_SOFTMMU
|
923 |
TCGReg r2, ir; |
924 |
int mem_index;
|
925 |
void *label1_ptr, *label2_ptr;
|
926 |
#endif
|
927 |
|
928 |
data_reg = *args++; |
929 |
addr_reg = *args++; |
930 |
|
931 |
#ifdef CONFIG_SOFTMMU
|
932 |
mem_index = *args; |
933 |
|
934 |
r0 = TCG_REG_R3; |
935 |
r1 = TCG_REG_R4; |
936 |
r2 = TCG_REG_R0; |
937 |
rbase = 0;
|
938 |
|
939 |
tcg_out_tlb_read(s, r0, r1, r2, addr_reg, opc, |
940 |
offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
|
941 |
|
942 |
tcg_out32(s, CMP | BF(7) | RA(r2) | RB(r1) | CMP_L);
|
943 |
|
944 |
label1_ptr = s->code_ptr; |
945 |
#ifdef FAST_PATH
|
946 |
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_TRUE);
|
947 |
#endif
|
948 |
|
949 |
/* slow path */
|
950 |
ir = TCG_REG_R3; |
951 |
tcg_out_mov(s, TCG_TYPE_I64, ir++, TCG_AREG0); |
952 |
tcg_out_mov(s, TCG_TYPE_I64, ir++, addr_reg); |
953 |
tcg_out_rld(s, RLDICL, ir++, data_reg, 0, 64 - (1 << (3 + opc))); |
954 |
tcg_out_movi(s, TCG_TYPE_I64, ir++, mem_index); |
955 |
|
956 |
tcg_out_call(s, (tcg_target_long)qemu_st_helpers[opc], 1);
|
957 |
|
958 |
label2_ptr = s->code_ptr; |
959 |
tcg_out32(s, B); |
960 |
|
961 |
/* label1: fast path */
|
962 |
#ifdef FAST_PATH
|
963 |
reloc_pc14(label1_ptr, (tcg_target_long)s->code_ptr); |
964 |
#endif
|
965 |
|
966 |
tcg_out32(s, LD | TAI(r0, r0, |
967 |
offsetof(CPUTLBEntry, addend) |
968 |
- offsetof(CPUTLBEntry, addr_write))); |
969 |
/* r0 = env->tlb_table[mem_index][index].addend */
|
970 |
tcg_out32(s, ADD | TAB(r0, r0, addr_reg)); |
971 |
/* r0 = env->tlb_table[mem_index][index].addend + addr */
|
972 |
|
973 |
#else /* !CONFIG_SOFTMMU */ |
974 |
#if TARGET_LONG_BITS == 32 |
975 |
tcg_out_ext32u(s, addr_reg, addr_reg); |
976 |
#endif
|
977 |
r1 = TCG_REG_R3; |
978 |
r0 = addr_reg; |
979 |
rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
|
980 |
#endif
|
981 |
|
982 |
insn = qemu_stx_opc[opc]; |
983 |
if (!HAVE_ISA_2_06 && insn == STDBRX) {
|
984 |
tcg_out32(s, STWBRX | SAB(data_reg, rbase, r0)); |
985 |
tcg_out32(s, ADDI | TAI(r1, r0, 4));
|
986 |
tcg_out_shri64(s, TCG_REG_R0, data_reg, 32);
|
987 |
tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, r1)); |
988 |
} else {
|
989 |
tcg_out32(s, insn | SAB(data_reg, rbase, r0)); |
990 |
} |
991 |
|
992 |
#ifdef CONFIG_SOFTMMU
|
993 |
reloc_pc24(label2_ptr, (tcg_target_long)s->code_ptr); |
994 |
#endif
|
995 |
} |
996 |
|
997 |
static void tcg_target_qemu_prologue(TCGContext *s) |
998 |
{ |
999 |
int i, frame_size;
|
1000 |
|
1001 |
frame_size = 0
|
1002 |
+ 8 /* back chain */ |
1003 |
+ 8 /* CR */ |
1004 |
+ 8 /* LR */ |
1005 |
+ 8 /* compiler doubleword */ |
1006 |
+ 8 /* link editor doubleword */ |
1007 |
+ 8 /* TOC save area */ |
1008 |
+ TCG_STATIC_CALL_ARGS_SIZE |
1009 |
+ ARRAY_SIZE(tcg_target_callee_save_regs) * 8
|
1010 |
+ CPU_TEMP_BUF_NLONGS * sizeof(long) |
1011 |
; |
1012 |
frame_size = (frame_size + 15) & ~15; |
1013 |
|
1014 |
tcg_set_frame(s, TCG_REG_CALL_STACK, frame_size |
1015 |
- CPU_TEMP_BUF_NLONGS * sizeof(long), |
1016 |
CPU_TEMP_BUF_NLONGS * sizeof(long)); |
1017 |
|
1018 |
#ifndef __APPLE__
|
1019 |
/* First emit adhoc function descriptor */
|
1020 |
tcg_out64(s, (uint64_t)s->code_ptr + 24); /* entry point */ |
1021 |
s->code_ptr += 16; /* skip TOC and environment pointer */ |
1022 |
#endif
|
1023 |
|
1024 |
/* Prologue */
|
1025 |
tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR); |
1026 |
tcg_out32(s, STDU | SAI(TCG_REG_R1, TCG_REG_R1, -frame_size)); |
1027 |
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { |
1028 |
tcg_out32(s, STD | SAI(tcg_target_callee_save_regs[i], 1,
|
1029 |
i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)); |
1030 |
} |
1031 |
tcg_out32(s, STD | SAI(TCG_REG_R0, TCG_REG_R1, frame_size + 16));
|
1032 |
|
1033 |
#ifdef CONFIG_USE_GUEST_BASE
|
1034 |
if (GUEST_BASE) {
|
1035 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE); |
1036 |
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
1037 |
} |
1038 |
#endif
|
1039 |
|
1040 |
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
1041 |
tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
|
1042 |
tcg_out32(s, BCCTR | BO_ALWAYS); |
1043 |
|
1044 |
/* Epilogue */
|
1045 |
tb_ret_addr = s->code_ptr; |
1046 |
|
1047 |
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { |
1048 |
tcg_out32(s, LD | TAI(tcg_target_callee_save_regs[i], TCG_REG_R1, |
1049 |
i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)); |
1050 |
} |
1051 |
tcg_out32(s, LD | TAI(TCG_REG_R0, TCG_REG_R1, frame_size + 16));
|
1052 |
tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR); |
1053 |
tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, frame_size)); |
1054 |
tcg_out32(s, BCLR | BO_ALWAYS); |
1055 |
} |
1056 |
|
1057 |
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, |
1058 |
intptr_t arg2) |
1059 |
{ |
1060 |
if (type == TCG_TYPE_I32) {
|
1061 |
tcg_out_ldst(s, ret, arg1, arg2, LWZ, LWZX); |
1062 |
} else {
|
1063 |
tcg_out_ldsta(s, ret, arg1, arg2, LD, LDX); |
1064 |
} |
1065 |
} |
1066 |
|
1067 |
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, |
1068 |
intptr_t arg2) |
1069 |
{ |
1070 |
if (type == TCG_TYPE_I32) {
|
1071 |
tcg_out_ldst(s, arg, arg1, arg2, STW, STWX); |
1072 |
} else {
|
1073 |
tcg_out_ldsta(s, arg, arg1, arg2, STD, STDX); |
1074 |
} |
1075 |
} |
1076 |
|
1077 |
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, |
1078 |
int const_arg2, int cr, TCGType type) |
1079 |
{ |
1080 |
int imm;
|
1081 |
uint32_t op; |
1082 |
|
1083 |
/* Simplify the comparisons below wrt CMPI. */
|
1084 |
if (type == TCG_TYPE_I32) {
|
1085 |
arg2 = (int32_t)arg2; |
1086 |
} |
1087 |
|
1088 |
switch (cond) {
|
1089 |
case TCG_COND_EQ:
|
1090 |
case TCG_COND_NE:
|
1091 |
if (const_arg2) {
|
1092 |
if ((int16_t) arg2 == arg2) {
|
1093 |
op = CMPI; |
1094 |
imm = 1;
|
1095 |
break;
|
1096 |
} else if ((uint16_t) arg2 == arg2) { |
1097 |
op = CMPLI; |
1098 |
imm = 1;
|
1099 |
break;
|
1100 |
} |
1101 |
} |
1102 |
op = CMPL; |
1103 |
imm = 0;
|
1104 |
break;
|
1105 |
|
1106 |
case TCG_COND_LT:
|
1107 |
case TCG_COND_GE:
|
1108 |
case TCG_COND_LE:
|
1109 |
case TCG_COND_GT:
|
1110 |
if (const_arg2) {
|
1111 |
if ((int16_t) arg2 == arg2) {
|
1112 |
op = CMPI; |
1113 |
imm = 1;
|
1114 |
break;
|
1115 |
} |
1116 |
} |
1117 |
op = CMP; |
1118 |
imm = 0;
|
1119 |
break;
|
1120 |
|
1121 |
case TCG_COND_LTU:
|
1122 |
case TCG_COND_GEU:
|
1123 |
case TCG_COND_LEU:
|
1124 |
case TCG_COND_GTU:
|
1125 |
if (const_arg2) {
|
1126 |
if ((uint16_t) arg2 == arg2) {
|
1127 |
op = CMPLI; |
1128 |
imm = 1;
|
1129 |
break;
|
1130 |
} |
1131 |
} |
1132 |
op = CMPL; |
1133 |
imm = 0;
|
1134 |
break;
|
1135 |
|
1136 |
default:
|
1137 |
tcg_abort(); |
1138 |
} |
1139 |
op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
|
1140 |
|
1141 |
if (imm) {
|
1142 |
tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
|
1143 |
} else {
|
1144 |
if (const_arg2) {
|
1145 |
tcg_out_movi(s, type, TCG_REG_R0, arg2); |
1146 |
arg2 = TCG_REG_R0; |
1147 |
} |
1148 |
tcg_out32(s, op | RA(arg1) | RB(arg2)); |
1149 |
} |
1150 |
} |
1151 |
|
1152 |
static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, |
1153 |
TCGReg dst, TCGReg src) |
1154 |
{ |
1155 |
tcg_out32(s, (type == TCG_TYPE_I64 ? CNTLZD : CNTLZW) | RS(src) | RA(dst)); |
1156 |
tcg_out_shri64(s, dst, dst, type == TCG_TYPE_I64 ? 6 : 5); |
1157 |
} |
1158 |
|
1159 |
static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) |
1160 |
{ |
1161 |
/* X != 0 implies X + -1 generates a carry. Extra addition
|
1162 |
trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
|
1163 |
if (dst != src) {
|
1164 |
tcg_out32(s, ADDIC | TAI(dst, src, -1));
|
1165 |
tcg_out32(s, SUBFE | TAB(dst, dst, src)); |
1166 |
} else {
|
1167 |
tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
|
1168 |
tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src)); |
1169 |
} |
1170 |
} |
1171 |
|
1172 |
static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
|
1173 |
bool const_arg2)
|
1174 |
{ |
1175 |
if (const_arg2) {
|
1176 |
if ((uint32_t)arg2 == arg2) {
|
1177 |
tcg_out_xori32(s, TCG_REG_R0, arg1, arg2); |
1178 |
} else {
|
1179 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2); |
1180 |
tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0)); |
1181 |
} |
1182 |
} else {
|
1183 |
tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2)); |
1184 |
} |
1185 |
return TCG_REG_R0;
|
1186 |
} |
1187 |
|
1188 |
static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, |
1189 |
TCGArg arg0, TCGArg arg1, TCGArg arg2, |
1190 |
int const_arg2)
|
1191 |
{ |
1192 |
int crop, sh;
|
1193 |
|
1194 |
/* Ignore high bits of a potential constant arg2. */
|
1195 |
if (type == TCG_TYPE_I32) {
|
1196 |
arg2 = (uint32_t)arg2; |
1197 |
} |
1198 |
|
1199 |
/* Handle common and trivial cases before handling anything else. */
|
1200 |
if (arg2 == 0) { |
1201 |
switch (cond) {
|
1202 |
case TCG_COND_EQ:
|
1203 |
tcg_out_setcond_eq0(s, type, arg0, arg1); |
1204 |
return;
|
1205 |
case TCG_COND_NE:
|
1206 |
if (type == TCG_TYPE_I32) {
|
1207 |
tcg_out_ext32u(s, TCG_REG_R0, arg1); |
1208 |
arg1 = TCG_REG_R0; |
1209 |
} |
1210 |
tcg_out_setcond_ne0(s, arg0, arg1); |
1211 |
return;
|
1212 |
case TCG_COND_GE:
|
1213 |
tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); |
1214 |
arg1 = arg0; |
1215 |
/* FALLTHRU */
|
1216 |
case TCG_COND_LT:
|
1217 |
/* Extract the sign bit. */
|
1218 |
tcg_out_rld(s, RLDICL, arg0, arg1, |
1219 |
type == TCG_TYPE_I64 ? 1 : 33, 63); |
1220 |
return;
|
1221 |
default:
|
1222 |
break;
|
1223 |
} |
1224 |
} |
1225 |
|
1226 |
/* If we have ISEL, we can implement everything with 3 or 4 insns.
|
1227 |
All other cases below are also at least 3 insns, so speed up the
|
1228 |
code generator by not considering them and always using ISEL. */
|
1229 |
if (HAVE_ISEL) {
|
1230 |
int isel, tab;
|
1231 |
|
1232 |
tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
|
1233 |
|
1234 |
isel = tcg_to_isel[cond]; |
1235 |
|
1236 |
tcg_out_movi(s, type, arg0, 1);
|
1237 |
if (isel & 1) { |
1238 |
/* arg0 = (bc ? 0 : 1) */
|
1239 |
tab = TAB(arg0, 0, arg0);
|
1240 |
isel &= ~1;
|
1241 |
} else {
|
1242 |
/* arg0 = (bc ? 1 : 0) */
|
1243 |
tcg_out_movi(s, type, TCG_REG_R0, 0);
|
1244 |
tab = TAB(arg0, arg0, TCG_REG_R0); |
1245 |
} |
1246 |
tcg_out32(s, isel | tab); |
1247 |
return;
|
1248 |
} |
1249 |
|
1250 |
switch (cond) {
|
1251 |
case TCG_COND_EQ:
|
1252 |
arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); |
1253 |
tcg_out_setcond_eq0(s, type, arg0, arg1); |
1254 |
return;
|
1255 |
|
1256 |
case TCG_COND_NE:
|
1257 |
arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); |
1258 |
/* Discard the high bits only once, rather than both inputs. */
|
1259 |
if (type == TCG_TYPE_I32) {
|
1260 |
tcg_out_ext32u(s, TCG_REG_R0, arg1); |
1261 |
arg1 = TCG_REG_R0; |
1262 |
} |
1263 |
tcg_out_setcond_ne0(s, arg0, arg1); |
1264 |
return;
|
1265 |
|
1266 |
case TCG_COND_GT:
|
1267 |
case TCG_COND_GTU:
|
1268 |
sh = 30;
|
1269 |
crop = 0;
|
1270 |
goto crtest;
|
1271 |
|
1272 |
case TCG_COND_LT:
|
1273 |
case TCG_COND_LTU:
|
1274 |
sh = 29;
|
1275 |
crop = 0;
|
1276 |
goto crtest;
|
1277 |
|
1278 |
case TCG_COND_GE:
|
1279 |
case TCG_COND_GEU:
|
1280 |
sh = 31;
|
1281 |
crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT); |
1282 |
goto crtest;
|
1283 |
|
1284 |
case TCG_COND_LE:
|
1285 |
case TCG_COND_LEU:
|
1286 |
sh = 31;
|
1287 |
crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT); |
1288 |
crtest:
|
1289 |
tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
|
1290 |
if (crop) {
|
1291 |
tcg_out32(s, crop); |
1292 |
} |
1293 |
tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
|
1294 |
tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31); |
1295 |
break;
|
1296 |
|
1297 |
default:
|
1298 |
tcg_abort(); |
1299 |
} |
1300 |
} |
1301 |
|
1302 |
static void tcg_out_bc(TCGContext *s, int bc, int label_index) |
1303 |
{ |
1304 |
TCGLabel *l = &s->labels[label_index]; |
1305 |
|
1306 |
if (l->has_value) {
|
1307 |
tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value)); |
1308 |
} else {
|
1309 |
uint16_t val = *(uint16_t *) &s->code_ptr[2];
|
1310 |
|
1311 |
/* Thanks to Andrzej Zaborowski */
|
1312 |
tcg_out32(s, bc | (val & 0xfffc));
|
1313 |
tcg_out_reloc(s, s->code_ptr - 4, R_PPC_REL14, label_index, 0); |
1314 |
} |
1315 |
} |
1316 |
|
1317 |
static void tcg_out_brcond(TCGContext *s, TCGCond cond, |
1318 |
TCGArg arg1, TCGArg arg2, int const_arg2,
|
1319 |
int label_index, TCGType type)
|
1320 |
{ |
1321 |
tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
|
1322 |
tcg_out_bc(s, tcg_to_bc[cond], label_index); |
1323 |
} |
1324 |
|
1325 |
static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, |
1326 |
TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, |
1327 |
TCGArg v2, bool const_c2)
|
1328 |
{ |
1329 |
/* If for some reason both inputs are zero, don't produce bad code. */
|
1330 |
if (v1 == 0 && v2 == 0) { |
1331 |
tcg_out_movi(s, type, dest, 0);
|
1332 |
return;
|
1333 |
} |
1334 |
|
1335 |
tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
|
1336 |
|
1337 |
if (HAVE_ISEL) {
|
1338 |
int isel = tcg_to_isel[cond];
|
1339 |
|
1340 |
/* Swap the V operands if the operation indicates inversion. */
|
1341 |
if (isel & 1) { |
1342 |
int t = v1;
|
1343 |
v1 = v2; |
1344 |
v2 = t; |
1345 |
isel &= ~1;
|
1346 |
} |
1347 |
/* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
|
1348 |
if (v2 == 0) { |
1349 |
tcg_out_movi(s, type, TCG_REG_R0, 0);
|
1350 |
} |
1351 |
tcg_out32(s, isel | TAB(dest, v1, v2)); |
1352 |
} else {
|
1353 |
if (dest == v2) {
|
1354 |
cond = tcg_invert_cond(cond); |
1355 |
v2 = v1; |
1356 |
} else if (dest != v1) { |
1357 |
if (v1 == 0) { |
1358 |
tcg_out_movi(s, type, dest, 0);
|
1359 |
} else {
|
1360 |
tcg_out_mov(s, type, dest, v1); |
1361 |
} |
1362 |
} |
1363 |
/* Branch forward over one insn */
|
1364 |
tcg_out32(s, tcg_to_bc[cond] | 8);
|
1365 |
if (v2 == 0) { |
1366 |
tcg_out_movi(s, type, dest, 0);
|
1367 |
} else {
|
1368 |
tcg_out_mov(s, type, dest, v2); |
1369 |
} |
1370 |
} |
1371 |
} |
1372 |
|
1373 |
void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr) |
1374 |
{ |
1375 |
TCGContext s; |
1376 |
unsigned long patch_size; |
1377 |
|
1378 |
s.code_ptr = (uint8_t *) jmp_addr; |
1379 |
tcg_out_b(&s, 0, addr);
|
1380 |
patch_size = s.code_ptr - (uint8_t *) jmp_addr; |
1381 |
flush_icache_range(jmp_addr, jmp_addr + patch_size); |
1382 |
} |
1383 |
|
1384 |
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, |
1385 |
const int *const_args) |
1386 |
{ |
1387 |
TCGArg a0, a1, a2; |
1388 |
int c;
|
1389 |
|
1390 |
switch (opc) {
|
1391 |
case INDEX_op_exit_tb:
|
1392 |
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
|
1393 |
tcg_out_b(s, 0, (tcg_target_long)tb_ret_addr);
|
1394 |
break;
|
1395 |
case INDEX_op_goto_tb:
|
1396 |
if (s->tb_jmp_offset) {
|
1397 |
/* Direct jump method. */
|
1398 |
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
|
1399 |
s->code_ptr += 28;
|
1400 |
} else {
|
1401 |
/* Indirect jump method. */
|
1402 |
tcg_abort(); |
1403 |
} |
1404 |
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
|
1405 |
break;
|
1406 |
case INDEX_op_br:
|
1407 |
{ |
1408 |
TCGLabel *l = &s->labels[args[0]];
|
1409 |
|
1410 |
if (l->has_value) {
|
1411 |
tcg_out_b(s, 0, l->u.value);
|
1412 |
} else {
|
1413 |
uint32_t val = *(uint32_t *) s->code_ptr; |
1414 |
|
1415 |
/* Thanks to Andrzej Zaborowski */
|
1416 |
tcg_out32(s, B | (val & 0x3fffffc));
|
1417 |
tcg_out_reloc(s, s->code_ptr - 4, R_PPC_REL24, args[0], 0); |
1418 |
} |
1419 |
} |
1420 |
break;
|
1421 |
case INDEX_op_call:
|
1422 |
tcg_out_call(s, args[0], const_args[0]); |
1423 |
break;
|
1424 |
case INDEX_op_movi_i32:
|
1425 |
tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]); |
1426 |
break;
|
1427 |
case INDEX_op_movi_i64:
|
1428 |
tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); |
1429 |
break;
|
1430 |
case INDEX_op_ld8u_i32:
|
1431 |
case INDEX_op_ld8u_i64:
|
1432 |
tcg_out_ldst(s, args[0], args[1], args[2], LBZ, LBZX); |
1433 |
break;
|
1434 |
case INDEX_op_ld8s_i32:
|
1435 |
case INDEX_op_ld8s_i64:
|
1436 |
tcg_out_ldst(s, args[0], args[1], args[2], LBZ, LBZX); |
1437 |
tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0])); |
1438 |
break;
|
1439 |
case INDEX_op_ld16u_i32:
|
1440 |
case INDEX_op_ld16u_i64:
|
1441 |
tcg_out_ldst(s, args[0], args[1], args[2], LHZ, LHZX); |
1442 |
break;
|
1443 |
case INDEX_op_ld16s_i32:
|
1444 |
case INDEX_op_ld16s_i64:
|
1445 |
tcg_out_ldst(s, args[0], args[1], args[2], LHA, LHAX); |
1446 |
break;
|
1447 |
case INDEX_op_ld_i32:
|
1448 |
case INDEX_op_ld32u_i64:
|
1449 |
tcg_out_ldst(s, args[0], args[1], args[2], LWZ, LWZX); |
1450 |
break;
|
1451 |
case INDEX_op_ld32s_i64:
|
1452 |
tcg_out_ldsta(s, args[0], args[1], args[2], LWA, LWAX); |
1453 |
break;
|
1454 |
case INDEX_op_ld_i64:
|
1455 |
tcg_out_ldsta(s, args[0], args[1], args[2], LD, LDX); |
1456 |
break;
|
1457 |
case INDEX_op_st8_i32:
|
1458 |
case INDEX_op_st8_i64:
|
1459 |
tcg_out_ldst(s, args[0], args[1], args[2], STB, STBX); |
1460 |
break;
|
1461 |
case INDEX_op_st16_i32:
|
1462 |
case INDEX_op_st16_i64:
|
1463 |
tcg_out_ldst(s, args[0], args[1], args[2], STH, STHX); |
1464 |
break;
|
1465 |
case INDEX_op_st_i32:
|
1466 |
case INDEX_op_st32_i64:
|
1467 |
tcg_out_ldst(s, args[0], args[1], args[2], STW, STWX); |
1468 |
break;
|
1469 |
case INDEX_op_st_i64:
|
1470 |
tcg_out_ldsta(s, args[0], args[1], args[2], STD, STDX); |
1471 |
break;
|
1472 |
|
1473 |
case INDEX_op_add_i32:
|
1474 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1475 |
if (const_args[2]) { |
1476 |
int32_t l, h; |
1477 |
do_addi_32:
|
1478 |
l = (int16_t)a2; |
1479 |
h = a2 - l; |
1480 |
if (h) {
|
1481 |
tcg_out32(s, ADDIS | TAI(a0, a1, h >> 16));
|
1482 |
a1 = a0; |
1483 |
} |
1484 |
if (l || a0 != a1) {
|
1485 |
tcg_out32(s, ADDI | TAI(a0, a1, l)); |
1486 |
} |
1487 |
} else {
|
1488 |
tcg_out32(s, ADD | TAB(a0, a1, a2)); |
1489 |
} |
1490 |
break;
|
1491 |
case INDEX_op_sub_i32:
|
1492 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1493 |
if (const_args[1]) { |
1494 |
if (const_args[2]) { |
1495 |
tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); |
1496 |
} else {
|
1497 |
tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); |
1498 |
} |
1499 |
} else if (const_args[2]) { |
1500 |
a2 = -a2; |
1501 |
goto do_addi_32;
|
1502 |
} else {
|
1503 |
tcg_out32(s, SUBF | TAB(a0, a2, a1)); |
1504 |
} |
1505 |
break;
|
1506 |
|
1507 |
case INDEX_op_and_i32:
|
1508 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1509 |
if (const_args[2]) { |
1510 |
tcg_out_andi32(s, a0, a1, a2); |
1511 |
} else {
|
1512 |
tcg_out32(s, AND | SAB(a1, a0, a2)); |
1513 |
} |
1514 |
break;
|
1515 |
case INDEX_op_and_i64:
|
1516 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1517 |
if (const_args[2]) { |
1518 |
tcg_out_andi64(s, a0, a1, a2); |
1519 |
} else {
|
1520 |
tcg_out32(s, AND | SAB(a1, a0, a2)); |
1521 |
} |
1522 |
break;
|
1523 |
case INDEX_op_or_i64:
|
1524 |
case INDEX_op_or_i32:
|
1525 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1526 |
if (const_args[2]) { |
1527 |
tcg_out_ori32(s, a0, a1, a2); |
1528 |
} else {
|
1529 |
tcg_out32(s, OR | SAB(a1, a0, a2)); |
1530 |
} |
1531 |
break;
|
1532 |
case INDEX_op_xor_i64:
|
1533 |
case INDEX_op_xor_i32:
|
1534 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1535 |
if (const_args[2]) { |
1536 |
tcg_out_xori32(s, a0, a1, a2); |
1537 |
} else {
|
1538 |
tcg_out32(s, XOR | SAB(a1, a0, a2)); |
1539 |
} |
1540 |
break;
|
1541 |
case INDEX_op_andc_i32:
|
1542 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1543 |
if (const_args[2]) { |
1544 |
tcg_out_andi32(s, a0, a1, ~a2); |
1545 |
} else {
|
1546 |
tcg_out32(s, ANDC | SAB(a1, a0, a2)); |
1547 |
} |
1548 |
break;
|
1549 |
case INDEX_op_andc_i64:
|
1550 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1551 |
if (const_args[2]) { |
1552 |
tcg_out_andi64(s, a0, a1, ~a2); |
1553 |
} else {
|
1554 |
tcg_out32(s, ANDC | SAB(a1, a0, a2)); |
1555 |
} |
1556 |
break;
|
1557 |
case INDEX_op_orc_i32:
|
1558 |
if (const_args[2]) { |
1559 |
tcg_out_ori32(s, args[0], args[1], ~args[2]); |
1560 |
break;
|
1561 |
} |
1562 |
/* FALLTHRU */
|
1563 |
case INDEX_op_orc_i64:
|
1564 |
tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); |
1565 |
break;
|
1566 |
case INDEX_op_eqv_i32:
|
1567 |
if (const_args[2]) { |
1568 |
tcg_out_xori32(s, args[0], args[1], ~args[2]); |
1569 |
break;
|
1570 |
} |
1571 |
/* FALLTHRU */
|
1572 |
case INDEX_op_eqv_i64:
|
1573 |
tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); |
1574 |
break;
|
1575 |
case INDEX_op_nand_i32:
|
1576 |
case INDEX_op_nand_i64:
|
1577 |
tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); |
1578 |
break;
|
1579 |
case INDEX_op_nor_i32:
|
1580 |
case INDEX_op_nor_i64:
|
1581 |
tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); |
1582 |
break;
|
1583 |
|
1584 |
case INDEX_op_mul_i32:
|
1585 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1586 |
if (const_args[2]) { |
1587 |
tcg_out32(s, MULLI | TAI(a0, a1, a2)); |
1588 |
} else {
|
1589 |
tcg_out32(s, MULLW | TAB(a0, a1, a2)); |
1590 |
} |
1591 |
break;
|
1592 |
|
1593 |
case INDEX_op_div_i32:
|
1594 |
tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); |
1595 |
break;
|
1596 |
|
1597 |
case INDEX_op_divu_i32:
|
1598 |
tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); |
1599 |
break;
|
1600 |
|
1601 |
case INDEX_op_shl_i32:
|
1602 |
if (const_args[2]) { |
1603 |
tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31 - args[2]); |
1604 |
} else {
|
1605 |
tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); |
1606 |
} |
1607 |
break;
|
1608 |
case INDEX_op_shr_i32:
|
1609 |
if (const_args[2]) { |
1610 |
tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], args[2], 31); |
1611 |
} else {
|
1612 |
tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); |
1613 |
} |
1614 |
break;
|
1615 |
case INDEX_op_sar_i32:
|
1616 |
if (const_args[2]) { |
1617 |
tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); |
1618 |
} else {
|
1619 |
tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); |
1620 |
} |
1621 |
break;
|
1622 |
case INDEX_op_rotl_i32:
|
1623 |
if (const_args[2]) { |
1624 |
tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); |
1625 |
} else {
|
1626 |
tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) |
1627 |
| MB(0) | ME(31)); |
1628 |
} |
1629 |
break;
|
1630 |
case INDEX_op_rotr_i32:
|
1631 |
if (const_args[2]) { |
1632 |
tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); |
1633 |
} else {
|
1634 |
tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); |
1635 |
tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) |
1636 |
| MB(0) | ME(31)); |
1637 |
} |
1638 |
break;
|
1639 |
|
1640 |
case INDEX_op_brcond_i32:
|
1641 |
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], |
1642 |
args[3], TCG_TYPE_I32);
|
1643 |
break;
|
1644 |
|
1645 |
case INDEX_op_brcond_i64:
|
1646 |
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], |
1647 |
args[3], TCG_TYPE_I64);
|
1648 |
break;
|
1649 |
|
1650 |
case INDEX_op_neg_i32:
|
1651 |
case INDEX_op_neg_i64:
|
1652 |
tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); |
1653 |
break;
|
1654 |
|
1655 |
case INDEX_op_not_i32:
|
1656 |
case INDEX_op_not_i64:
|
1657 |
tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); |
1658 |
break;
|
1659 |
|
1660 |
case INDEX_op_add_i64:
|
1661 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1662 |
if (const_args[2]) { |
1663 |
int32_t l0, h1, h2; |
1664 |
do_addi_64:
|
1665 |
/* We can always split any 32-bit signed constant into 3 pieces.
|
1666 |
Note the positive 0x80000000 coming from the sub_i64 path,
|
1667 |
handled with the same code we need for eg 0x7fff8000. */
|
1668 |
assert(a2 == (int32_t)a2 || a2 == 0x80000000);
|
1669 |
l0 = (int16_t)a2; |
1670 |
h1 = a2 - l0; |
1671 |
h2 = 0;
|
1672 |
if (h1 < 0 && (int64_t)a2 > 0) { |
1673 |
h2 = 0x40000000;
|
1674 |
h1 = a2 - h2 - l0; |
1675 |
} |
1676 |
assert((TCGArg)h2 + h1 + l0 == a2); |
1677 |
|
1678 |
if (h2) {
|
1679 |
tcg_out32(s, ADDIS | TAI(a0, a1, h2 >> 16));
|
1680 |
a1 = a0; |
1681 |
} |
1682 |
if (h1) {
|
1683 |
tcg_out32(s, ADDIS | TAI(a0, a1, h1 >> 16));
|
1684 |
a1 = a0; |
1685 |
} |
1686 |
if (l0 || a0 != a1) {
|
1687 |
tcg_out32(s, ADDI | TAI(a0, a1, l0)); |
1688 |
} |
1689 |
} else {
|
1690 |
tcg_out32(s, ADD | TAB(a0, a1, a2)); |
1691 |
} |
1692 |
break;
|
1693 |
case INDEX_op_sub_i64:
|
1694 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1695 |
if (const_args[1]) { |
1696 |
if (const_args[2]) { |
1697 |
tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); |
1698 |
} else {
|
1699 |
tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); |
1700 |
} |
1701 |
} else if (const_args[2]) { |
1702 |
a2 = -a2; |
1703 |
goto do_addi_64;
|
1704 |
} else {
|
1705 |
tcg_out32(s, SUBF | TAB(a0, a2, a1)); |
1706 |
} |
1707 |
break;
|
1708 |
|
1709 |
case INDEX_op_shl_i64:
|
1710 |
if (const_args[2]) { |
1711 |
tcg_out_shli64(s, args[0], args[1], args[2]); |
1712 |
} else {
|
1713 |
tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); |
1714 |
} |
1715 |
break;
|
1716 |
case INDEX_op_shr_i64:
|
1717 |
if (const_args[2]) { |
1718 |
tcg_out_shri64(s, args[0], args[1], args[2]); |
1719 |
} else {
|
1720 |
tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); |
1721 |
} |
1722 |
break;
|
1723 |
case INDEX_op_sar_i64:
|
1724 |
if (const_args[2]) { |
1725 |
int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); |
1726 |
tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh); |
1727 |
} else {
|
1728 |
tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); |
1729 |
} |
1730 |
break;
|
1731 |
case INDEX_op_rotl_i64:
|
1732 |
if (const_args[2]) { |
1733 |
tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); |
1734 |
} else {
|
1735 |
tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); |
1736 |
} |
1737 |
break;
|
1738 |
case INDEX_op_rotr_i64:
|
1739 |
if (const_args[2]) { |
1740 |
tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); |
1741 |
} else {
|
1742 |
tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); |
1743 |
tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); |
1744 |
} |
1745 |
break;
|
1746 |
|
1747 |
case INDEX_op_mul_i64:
|
1748 |
a0 = args[0], a1 = args[1], a2 = args[2]; |
1749 |
if (const_args[2]) { |
1750 |
tcg_out32(s, MULLI | TAI(a0, a1, a2)); |
1751 |
} else {
|
1752 |
tcg_out32(s, MULLD | TAB(a0, a1, a2)); |
1753 |
} |
1754 |
break;
|
1755 |
case INDEX_op_div_i64:
|
1756 |
tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); |
1757 |
break;
|
1758 |
case INDEX_op_divu_i64:
|
1759 |
tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); |
1760 |
break;
|
1761 |
|
1762 |
case INDEX_op_qemu_ld8u:
|
1763 |
tcg_out_qemu_ld(s, args, 0);
|
1764 |
break;
|
1765 |
case INDEX_op_qemu_ld8s:
|
1766 |
tcg_out_qemu_ld(s, args, 0 | 4); |
1767 |
break;
|
1768 |
case INDEX_op_qemu_ld16u:
|
1769 |
tcg_out_qemu_ld(s, args, 1);
|
1770 |
break;
|
1771 |
case INDEX_op_qemu_ld16s:
|
1772 |
tcg_out_qemu_ld(s, args, 1 | 4); |
1773 |
break;
|
1774 |
case INDEX_op_qemu_ld32:
|
1775 |
case INDEX_op_qemu_ld32u:
|
1776 |
tcg_out_qemu_ld(s, args, 2);
|
1777 |
break;
|
1778 |
case INDEX_op_qemu_ld32s:
|
1779 |
tcg_out_qemu_ld(s, args, 2 | 4); |
1780 |
break;
|
1781 |
case INDEX_op_qemu_ld64:
|
1782 |
tcg_out_qemu_ld(s, args, 3);
|
1783 |
break;
|
1784 |
case INDEX_op_qemu_st8:
|
1785 |
tcg_out_qemu_st(s, args, 0);
|
1786 |
break;
|
1787 |
case INDEX_op_qemu_st16:
|
1788 |
tcg_out_qemu_st(s, args, 1);
|
1789 |
break;
|
1790 |
case INDEX_op_qemu_st32:
|
1791 |
tcg_out_qemu_st(s, args, 2);
|
1792 |
break;
|
1793 |
case INDEX_op_qemu_st64:
|
1794 |
tcg_out_qemu_st(s, args, 3);
|
1795 |
break;
|
1796 |
|
1797 |
case INDEX_op_ext8s_i32:
|
1798 |
case INDEX_op_ext8s_i64:
|
1799 |
c = EXTSB; |
1800 |
goto gen_ext;
|
1801 |
case INDEX_op_ext16s_i32:
|
1802 |
case INDEX_op_ext16s_i64:
|
1803 |
c = EXTSH; |
1804 |
goto gen_ext;
|
1805 |
case INDEX_op_ext32s_i64:
|
1806 |
c = EXTSW; |
1807 |
goto gen_ext;
|
1808 |
gen_ext:
|
1809 |
tcg_out32(s, c | RS(args[1]) | RA(args[0])); |
1810 |
break;
|
1811 |
|
1812 |
case INDEX_op_setcond_i32:
|
1813 |
tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], |
1814 |
const_args[2]);
|
1815 |
break;
|
1816 |
case INDEX_op_setcond_i64:
|
1817 |
tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], |
1818 |
const_args[2]);
|
1819 |
break;
|
1820 |
|
1821 |
case INDEX_op_bswap16_i32:
|
1822 |
case INDEX_op_bswap16_i64:
|
1823 |
a0 = args[0], a1 = args[1]; |
1824 |
/* a1 = abcd */
|
1825 |
if (a0 != a1) {
|
1826 |
/* a0 = (a1 r<< 24) & 0xff # 000c */
|
1827 |
tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); |
1828 |
/* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
|
1829 |
tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23); |
1830 |
} else {
|
1831 |
/* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
|
1832 |
tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23); |
1833 |
/* a0 = (a1 r<< 24) & 0xff # 000c */
|
1834 |
tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); |
1835 |
/* a0 = a0 | r0 # 00dc */
|
1836 |
tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0)); |
1837 |
} |
1838 |
break;
|
1839 |
|
1840 |
case INDEX_op_bswap32_i32:
|
1841 |
case INDEX_op_bswap32_i64:
|
1842 |
/* Stolen from gcc's builtin_bswap32 */
|
1843 |
a1 = args[1];
|
1844 |
a0 = args[0] == a1 ? TCG_REG_R0 : args[0]; |
1845 |
|
1846 |
/* a1 = args[1] # abcd */
|
1847 |
/* a0 = rotate_left (a1, 8) # bcda */
|
1848 |
tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); |
1849 |
/* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
|
1850 |
tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); |
1851 |
/* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
|
1852 |
tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); |
1853 |
|
1854 |
if (a0 == TCG_REG_R0) {
|
1855 |
tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
|
1856 |
} |
1857 |
break;
|
1858 |
|
1859 |
case INDEX_op_bswap64_i64:
|
1860 |
a0 = args[0], a1 = args[1], a2 = TCG_REG_R0; |
1861 |
if (a0 == a1) {
|
1862 |
a0 = TCG_REG_R0; |
1863 |
a2 = a1; |
1864 |
} |
1865 |
|
1866 |
/* a1 = # abcd efgh */
|
1867 |
/* a0 = rl32(a1, 8) # 0000 fghe */
|
1868 |
tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); |
1869 |
/* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
|
1870 |
tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); |
1871 |
/* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
|
1872 |
tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); |
1873 |
|
1874 |
/* a0 = rl64(a0, 32) # hgfe 0000 */
|
1875 |
/* a2 = rl64(a1, 32) # efgh abcd */
|
1876 |
tcg_out_rld(s, RLDICL, a0, a0, 32, 0); |
1877 |
tcg_out_rld(s, RLDICL, a2, a1, 32, 0); |
1878 |
|
1879 |
/* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
|
1880 |
tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31); |
1881 |
/* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
|
1882 |
tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7); |
1883 |
/* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
|
1884 |
tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23); |
1885 |
|
1886 |
if (a0 == 0) { |
1887 |
tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
|
1888 |
} |
1889 |
break;
|
1890 |
|
1891 |
case INDEX_op_deposit_i32:
|
1892 |
if (const_args[2]) { |
1893 |
uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; |
1894 |
tcg_out_andi32(s, args[0], args[0], ~mask); |
1895 |
} else {
|
1896 |
tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], |
1897 |
32 - args[3] - args[4], 31 - args[3]); |
1898 |
} |
1899 |
break;
|
1900 |
case INDEX_op_deposit_i64:
|
1901 |
if (const_args[2]) { |
1902 |
uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; |
1903 |
tcg_out_andi64(s, args[0], args[0], ~mask); |
1904 |
} else {
|
1905 |
tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], |
1906 |
64 - args[3] - args[4]); |
1907 |
} |
1908 |
break;
|
1909 |
|
1910 |
case INDEX_op_movcond_i32:
|
1911 |
tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], |
1912 |
args[3], args[4], const_args[2]); |
1913 |
break;
|
1914 |
case INDEX_op_movcond_i64:
|
1915 |
tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], |
1916 |
args[3], args[4], const_args[2]); |
1917 |
break;
|
1918 |
|
1919 |
case INDEX_op_add2_i64:
|
1920 |
/* Note that the CA bit is defined based on the word size of the
|
1921 |
environment. So in 64-bit mode it's always carry-out of bit 63.
|
1922 |
The fallback code using deposit works just as well for 32-bit. */
|
1923 |
a0 = args[0], a1 = args[1]; |
1924 |
if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { |
1925 |
a0 = TCG_REG_R0; |
1926 |
} |
1927 |
if (const_args[4]) { |
1928 |
tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); |
1929 |
} else {
|
1930 |
tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); |
1931 |
} |
1932 |
if (const_args[5]) { |
1933 |
tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); |
1934 |
} else {
|
1935 |
tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); |
1936 |
} |
1937 |
if (a0 != args[0]) { |
1938 |
tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
|
1939 |
} |
1940 |
break;
|
1941 |
|
1942 |
case INDEX_op_sub2_i64:
|
1943 |
a0 = args[0], a1 = args[1]; |
1944 |
if (a0 == args[5] || (!const_args[4] && a0 == args[4])) { |
1945 |
a0 = TCG_REG_R0; |
1946 |
} |
1947 |
if (const_args[2]) { |
1948 |
tcg_out32(s, SUBFIC | TAI(a0, args[3], args[2])); |
1949 |
} else {
|
1950 |
tcg_out32(s, SUBFC | TAB(a0, args[3], args[2])); |
1951 |
} |
1952 |
if (const_args[4]) { |
1953 |
tcg_out32(s, (args[4] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); |
1954 |
} else {
|
1955 |
tcg_out32(s, SUBFE | TAB(a1, args[5], args[4])); |
1956 |
} |
1957 |
if (a0 != args[0]) { |
1958 |
tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
|
1959 |
} |
1960 |
break;
|
1961 |
|
1962 |
case INDEX_op_muluh_i64:
|
1963 |
tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); |
1964 |
break;
|
1965 |
case INDEX_op_mulsh_i64:
|
1966 |
tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); |
1967 |
break;
|
1968 |
|
1969 |
default:
|
1970 |
tcg_dump_ops(s); |
1971 |
tcg_abort(); |
1972 |
} |
1973 |
} |
1974 |
|
1975 |
static const TCGTargetOpDef ppc_op_defs[] = { |
1976 |
{ INDEX_op_exit_tb, { } }, |
1977 |
{ INDEX_op_goto_tb, { } }, |
1978 |
{ INDEX_op_call, { "ri" } },
|
1979 |
{ INDEX_op_br, { } }, |
1980 |
|
1981 |
{ INDEX_op_mov_i32, { "r", "r" } }, |
1982 |
{ INDEX_op_mov_i64, { "r", "r" } }, |
1983 |
{ INDEX_op_movi_i32, { "r" } },
|
1984 |
{ INDEX_op_movi_i64, { "r" } },
|
1985 |
|
1986 |
{ INDEX_op_ld8u_i32, { "r", "r" } }, |
1987 |
{ INDEX_op_ld8s_i32, { "r", "r" } }, |
1988 |
{ INDEX_op_ld16u_i32, { "r", "r" } }, |
1989 |
{ INDEX_op_ld16s_i32, { "r", "r" } }, |
1990 |
{ INDEX_op_ld_i32, { "r", "r" } }, |
1991 |
{ INDEX_op_ld_i64, { "r", "r" } }, |
1992 |
{ INDEX_op_st8_i32, { "r", "r" } }, |
1993 |
{ INDEX_op_st8_i64, { "r", "r" } }, |
1994 |
{ INDEX_op_st16_i32, { "r", "r" } }, |
1995 |
{ INDEX_op_st16_i64, { "r", "r" } }, |
1996 |
{ INDEX_op_st_i32, { "r", "r" } }, |
1997 |
{ INDEX_op_st_i64, { "r", "r" } }, |
1998 |
{ INDEX_op_st32_i64, { "r", "r" } }, |
1999 |
|
2000 |
{ INDEX_op_ld8u_i64, { "r", "r" } }, |
2001 |
{ INDEX_op_ld8s_i64, { "r", "r" } }, |
2002 |
{ INDEX_op_ld16u_i64, { "r", "r" } }, |
2003 |
{ INDEX_op_ld16s_i64, { "r", "r" } }, |
2004 |
{ INDEX_op_ld32u_i64, { "r", "r" } }, |
2005 |
{ INDEX_op_ld32s_i64, { "r", "r" } }, |
2006 |
|
2007 |
{ INDEX_op_add_i32, { "r", "r", "ri" } }, |
2008 |
{ INDEX_op_mul_i32, { "r", "r", "rI" } }, |
2009 |
{ INDEX_op_div_i32, { "r", "r", "r" } }, |
2010 |
{ INDEX_op_divu_i32, { "r", "r", "r" } }, |
2011 |
{ INDEX_op_sub_i32, { "r", "rI", "ri" } }, |
2012 |
{ INDEX_op_and_i32, { "r", "r", "ri" } }, |
2013 |
{ INDEX_op_or_i32, { "r", "r", "ri" } }, |
2014 |
{ INDEX_op_xor_i32, { "r", "r", "ri" } }, |
2015 |
{ INDEX_op_andc_i32, { "r", "r", "ri" } }, |
2016 |
{ INDEX_op_orc_i32, { "r", "r", "ri" } }, |
2017 |
{ INDEX_op_eqv_i32, { "r", "r", "ri" } }, |
2018 |
{ INDEX_op_nand_i32, { "r", "r", "r" } }, |
2019 |
{ INDEX_op_nor_i32, { "r", "r", "r" } }, |
2020 |
|
2021 |
{ INDEX_op_shl_i32, { "r", "r", "ri" } }, |
2022 |
{ INDEX_op_shr_i32, { "r", "r", "ri" } }, |
2023 |
{ INDEX_op_sar_i32, { "r", "r", "ri" } }, |
2024 |
{ INDEX_op_rotl_i32, { "r", "r", "ri" } }, |
2025 |
{ INDEX_op_rotr_i32, { "r", "r", "ri" } }, |
2026 |
|
2027 |
{ INDEX_op_brcond_i32, { "r", "ri" } }, |
2028 |
{ INDEX_op_brcond_i64, { "r", "ri" } }, |
2029 |
|
2030 |
{ INDEX_op_neg_i32, { "r", "r" } }, |
2031 |
{ INDEX_op_not_i32, { "r", "r" } }, |
2032 |
|
2033 |
{ INDEX_op_add_i64, { "r", "r", "rT" } }, |
2034 |
{ INDEX_op_sub_i64, { "r", "rI", "rT" } }, |
2035 |
{ INDEX_op_and_i64, { "r", "r", "ri" } }, |
2036 |
{ INDEX_op_or_i64, { "r", "r", "rU" } }, |
2037 |
{ INDEX_op_xor_i64, { "r", "r", "rU" } }, |
2038 |
{ INDEX_op_andc_i64, { "r", "r", "ri" } }, |
2039 |
{ INDEX_op_orc_i64, { "r", "r", "r" } }, |
2040 |
{ INDEX_op_eqv_i64, { "r", "r", "r" } }, |
2041 |
{ INDEX_op_nand_i64, { "r", "r", "r" } }, |
2042 |
{ INDEX_op_nor_i64, { "r", "r", "r" } }, |
2043 |
|
2044 |
{ INDEX_op_shl_i64, { "r", "r", "ri" } }, |
2045 |
{ INDEX_op_shr_i64, { "r", "r", "ri" } }, |
2046 |
{ INDEX_op_sar_i64, { "r", "r", "ri" } }, |
2047 |
{ INDEX_op_rotl_i64, { "r", "r", "ri" } }, |
2048 |
{ INDEX_op_rotr_i64, { "r", "r", "ri" } }, |
2049 |
|
2050 |
{ INDEX_op_mul_i64, { "r", "r", "rI" } }, |
2051 |
{ INDEX_op_div_i64, { "r", "r", "r" } }, |
2052 |
{ INDEX_op_divu_i64, { "r", "r", "r" } }, |
2053 |
|
2054 |
{ INDEX_op_neg_i64, { "r", "r" } }, |
2055 |
{ INDEX_op_not_i64, { "r", "r" } }, |
2056 |
|
2057 |
{ INDEX_op_qemu_ld8u, { "r", "L" } }, |
2058 |
{ INDEX_op_qemu_ld8s, { "r", "L" } }, |
2059 |
{ INDEX_op_qemu_ld16u, { "r", "L" } }, |
2060 |
{ INDEX_op_qemu_ld16s, { "r", "L" } }, |
2061 |
{ INDEX_op_qemu_ld32, { "r", "L" } }, |
2062 |
{ INDEX_op_qemu_ld32u, { "r", "L" } }, |
2063 |
{ INDEX_op_qemu_ld32s, { "r", "L" } }, |
2064 |
{ INDEX_op_qemu_ld64, { "r", "L" } }, |
2065 |
|
2066 |
{ INDEX_op_qemu_st8, { "S", "S" } }, |
2067 |
{ INDEX_op_qemu_st16, { "S", "S" } }, |
2068 |
{ INDEX_op_qemu_st32, { "S", "S" } }, |
2069 |
{ INDEX_op_qemu_st64, { "S", "S" } }, |
2070 |
|
2071 |
{ INDEX_op_ext8s_i32, { "r", "r" } }, |
2072 |
{ INDEX_op_ext16s_i32, { "r", "r" } }, |
2073 |
{ INDEX_op_ext8s_i64, { "r", "r" } }, |
2074 |
{ INDEX_op_ext16s_i64, { "r", "r" } }, |
2075 |
{ INDEX_op_ext32s_i64, { "r", "r" } }, |
2076 |
|
2077 |
{ INDEX_op_setcond_i32, { "r", "r", "ri" } }, |
2078 |
{ INDEX_op_setcond_i64, { "r", "r", "ri" } }, |
2079 |
{ INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } }, |
2080 |
{ INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } }, |
2081 |
|
2082 |
{ INDEX_op_bswap16_i32, { "r", "r" } }, |
2083 |
{ INDEX_op_bswap16_i64, { "r", "r" } }, |
2084 |
{ INDEX_op_bswap32_i32, { "r", "r" } }, |
2085 |
{ INDEX_op_bswap32_i64, { "r", "r" } }, |
2086 |
{ INDEX_op_bswap64_i64, { "r", "r" } }, |
2087 |
|
2088 |
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } }, |
2089 |
{ INDEX_op_deposit_i64, { "r", "0", "rZ" } }, |
2090 |
|
2091 |
{ INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } }, |
2092 |
{ INDEX_op_sub2_i64, { "r", "r", "rI", "r", "rZM", "r" } }, |
2093 |
{ INDEX_op_mulsh_i64, { "r", "r", "r" } }, |
2094 |
{ INDEX_op_muluh_i64, { "r", "r", "r" } }, |
2095 |
|
2096 |
{ -1 },
|
2097 |
}; |
2098 |
|
2099 |
static void tcg_target_init(TCGContext *s) |
2100 |
{ |
2101 |
#ifdef CONFIG_GETAUXVAL
|
2102 |
unsigned long hwcap = getauxval(AT_HWCAP); |
2103 |
if (hwcap & PPC_FEATURE_ARCH_2_06) {
|
2104 |
have_isa_2_06 = true;
|
2105 |
} |
2106 |
#endif
|
2107 |
|
2108 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); |
2109 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); |
2110 |
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
|
2111 |
(1 << TCG_REG_R0) |
|
2112 |
#ifdef __APPLE__
|
2113 |
(1 << TCG_REG_R2) |
|
2114 |
#endif
|
2115 |
(1 << TCG_REG_R3) |
|
2116 |
(1 << TCG_REG_R4) |
|
2117 |
(1 << TCG_REG_R5) |
|
2118 |
(1 << TCG_REG_R6) |
|
2119 |
(1 << TCG_REG_R7) |
|
2120 |
(1 << TCG_REG_R8) |
|
2121 |
(1 << TCG_REG_R9) |
|
2122 |
(1 << TCG_REG_R10) |
|
2123 |
(1 << TCG_REG_R11) |
|
2124 |
(1 << TCG_REG_R12)
|
2125 |
); |
2126 |
|
2127 |
tcg_regset_clear(s->reserved_regs); |
2128 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); |
2129 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); |
2130 |
#ifndef __APPLE__
|
2131 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); |
2132 |
#endif
|
2133 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); |
2134 |
|
2135 |
tcg_add_target_add_op_defs(ppc_op_defs); |
2136 |
} |