24 |
24 |
|
25 |
25 |
#ifndef NDEBUG
|
26 |
26 |
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
27 |
|
"%r0",
|
28 |
|
"%r1",
|
29 |
|
"%rp",
|
30 |
|
"%r3",
|
31 |
|
"%r4",
|
32 |
|
"%r5",
|
33 |
|
"%r6",
|
34 |
|
"%r7",
|
35 |
|
"%r8",
|
36 |
|
"%r9",
|
37 |
|
"%r10",
|
38 |
|
"%r11",
|
39 |
|
"%r12",
|
40 |
|
"%r13",
|
41 |
|
"%r14",
|
42 |
|
"%r15",
|
43 |
|
"%r16",
|
44 |
|
"%r17",
|
45 |
|
"%r18",
|
46 |
|
"%r19",
|
47 |
|
"%r20",
|
48 |
|
"%r21",
|
49 |
|
"%r22",
|
50 |
|
"%r23",
|
51 |
|
"%r24",
|
52 |
|
"%r25",
|
53 |
|
"%r26",
|
54 |
|
"%dp",
|
55 |
|
"%ret0",
|
56 |
|
"%ret1",
|
57 |
|
"%sp",
|
58 |
|
"%r31",
|
|
27 |
"%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
|
|
28 |
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
|
|
29 |
"%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
|
|
30 |
"%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
|
59 |
31 |
};
|
60 |
32 |
#endif
|
61 |
33 |
|
|
34 |
/* This is an 8 byte temp slot in the stack frame. */
|
|
35 |
#define STACK_TEMP_OFS -16
|
|
36 |
|
|
37 |
#ifndef GUEST_BASE
|
|
38 |
#define GUEST_BASE 0
|
|
39 |
#endif
|
|
40 |
|
|
41 |
#ifdef CONFIG_USE_GUEST_BASE
|
|
42 |
#define TCG_GUEST_BASE_REG TCG_REG_R16
|
|
43 |
#else
|
|
44 |
#define TCG_GUEST_BASE_REG TCG_REG_R0
|
|
45 |
#endif
|
|
46 |
|
62 |
47 |
static const int tcg_target_reg_alloc_order[] = {
|
63 |
48 |
TCG_REG_R4,
|
64 |
49 |
TCG_REG_R5,
|
... | ... | |
75 |
60 |
TCG_REG_R14,
|
76 |
61 |
TCG_REG_R15,
|
77 |
62 |
TCG_REG_R16,
|
|
63 |
|
|
64 |
TCG_REG_R26,
|
|
65 |
TCG_REG_R25,
|
|
66 |
TCG_REG_R24,
|
|
67 |
TCG_REG_R23,
|
|
68 |
|
|
69 |
TCG_REG_RET0,
|
|
70 |
TCG_REG_RET1,
|
78 |
71 |
};
|
79 |
72 |
|
80 |
73 |
static const int tcg_target_call_iarg_regs[4] = {
|
... | ... | |
89 |
82 |
TCG_REG_RET1,
|
90 |
83 |
};
|
91 |
84 |
|
|
85 |
/* True iff val fits a signed field of width BITS. */
|
|
86 |
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
|
|
87 |
{
|
|
88 |
return (val << ((sizeof(tcg_target_long) * 8 - bits))
|
|
89 |
>> (sizeof(tcg_target_long) * 8 - bits)) == val;
|
|
90 |
}
|
|
91 |
|
|
92 |
/* True iff depi can be used to compute (reg | MASK).
|
|
93 |
Accept a bit pattern like:
|
|
94 |
0....01....1
|
|
95 |
1....10....0
|
|
96 |
0..01..10..0
|
|
97 |
Copied from gcc sources. */
|
|
98 |
static inline int or_mask_p(tcg_target_ulong mask)
|
|
99 |
{
|
|
100 |
mask += mask & -mask;
|
|
101 |
return (mask & (mask - 1)) == 0;
|
|
102 |
}
|
|
103 |
|
|
104 |
/* True iff depi or extru can be used to compute (reg & mask).
|
|
105 |
Accept a bit pattern like these:
|
|
106 |
0....01....1
|
|
107 |
1....10....0
|
|
108 |
1..10..01..1
|
|
109 |
Copied from gcc sources. */
|
|
110 |
static inline int and_mask_p(tcg_target_ulong mask)
|
|
111 |
{
|
|
112 |
return or_mask_p(~mask);
|
|
113 |
}
|
|
114 |
|
|
115 |
static int low_sign_ext(int val, int len)
|
|
116 |
{
|
|
117 |
return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
|
|
118 |
}
|
|
119 |
|
|
120 |
static int reassemble_12(int as12)
|
|
121 |
{
|
|
122 |
return (((as12 & 0x800) >> 11) |
|
|
123 |
((as12 & 0x400) >> 8) |
|
|
124 |
((as12 & 0x3ff) << 3));
|
|
125 |
}
|
|
126 |
|
|
127 |
static int reassemble_17(int as17)
|
|
128 |
{
|
|
129 |
return (((as17 & 0x10000) >> 16) |
|
|
130 |
((as17 & 0x0f800) << 5) |
|
|
131 |
((as17 & 0x00400) >> 8) |
|
|
132 |
((as17 & 0x003ff) << 3));
|
|
133 |
}
|
|
134 |
|
|
135 |
static int reassemble_21(int as21)
|
|
136 |
{
|
|
137 |
return (((as21 & 0x100000) >> 20) |
|
|
138 |
((as21 & 0x0ffe00) >> 8) |
|
|
139 |
((as21 & 0x000180) << 7) |
|
|
140 |
((as21 & 0x00007c) << 14) |
|
|
141 |
((as21 & 0x000003) << 12));
|
|
142 |
}
|
|
143 |
|
|
144 |
/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
|
|
145 |
such relocations are simply fully handled by the assembler. */
|
|
146 |
#define R_PARISC_PCREL12F R_PARISC_NONE
|
|
147 |
|
92 |
148 |
static void patch_reloc(uint8_t *code_ptr, int type,
|
93 |
149 |
tcg_target_long value, tcg_target_long addend)
|
94 |
150 |
{
|
|
151 |
uint32_t *insn_ptr = (uint32_t *)code_ptr;
|
|
152 |
uint32_t insn = *insn_ptr;
|
|
153 |
tcg_target_long pcrel;
|
|
154 |
|
|
155 |
value += addend;
|
|
156 |
pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
|
|
157 |
|
95 |
158 |
switch (type) {
|
|
159 |
case R_PARISC_PCREL12F:
|
|
160 |
assert(check_fit_tl(pcrel, 12));
|
|
161 |
/* ??? We assume all patches are forward. See tcg_out_brcond
|
|
162 |
re setting the NUL bit on the branch and eliding the nop. */
|
|
163 |
assert(pcrel >= 0);
|
|
164 |
insn &= ~0x1ffdu;
|
|
165 |
insn |= reassemble_12(pcrel);
|
|
166 |
break;
|
96 |
167 |
case R_PARISC_PCREL17F:
|
97 |
|
hppa_patch17f((uint32_t *)code_ptr, value, addend);
|
|
168 |
assert(check_fit_tl(pcrel, 17));
|
|
169 |
insn &= ~0x1f1ffdu;
|
|
170 |
insn |= reassemble_17(pcrel);
|
98 |
171 |
break;
|
99 |
172 |
default:
|
100 |
173 |
tcg_abort();
|
101 |
174 |
}
|
|
175 |
|
|
176 |
*insn_ptr = insn;
|
102 |
177 |
}
|
103 |
178 |
|
104 |
179 |
/* maximum number of register used for input function arguments */
|
... | ... | |
126 |
201 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
|
127 |
202 |
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
|
128 |
203 |
break;
|
|
204 |
case 'Z':
|
|
205 |
ct->ct |= TCG_CT_CONST_0;
|
|
206 |
break;
|
|
207 |
case 'I':
|
|
208 |
ct->ct |= TCG_CT_CONST_S11;
|
|
209 |
break;
|
|
210 |
case 'J':
|
|
211 |
ct->ct |= TCG_CT_CONST_S5;
|
|
212 |
break;
|
129 |
213 |
default:
|
130 |
214 |
return -1;
|
131 |
215 |
}
|
... | ... | |
135 |
219 |
}
|
136 |
220 |
|
137 |
221 |
/* test if a constant matches the constraint */
|
138 |
|
static inline int tcg_target_const_match(tcg_target_long val,
|
139 |
|
const TCGArgConstraint *arg_ct)
|
|
222 |
static int tcg_target_const_match(tcg_target_long val,
|
|
223 |
const TCGArgConstraint *arg_ct)
|
140 |
224 |
{
|
141 |
|
int ct;
|
142 |
|
|
143 |
|
ct = arg_ct->ct;
|
144 |
|
|
145 |
|
/* TODO */
|
146 |
|
|
|
225 |
int ct = arg_ct->ct;
|
|
226 |
if (ct & TCG_CT_CONST) {
|
|
227 |
return 1;
|
|
228 |
} else if (ct & TCG_CT_CONST_0) {
|
|
229 |
return val == 0;
|
|
230 |
} else if (ct & TCG_CT_CONST_S5) {
|
|
231 |
return check_fit_tl(val, 5);
|
|
232 |
} else if (ct & TCG_CT_CONST_S11) {
|
|
233 |
return check_fit_tl(val, 11);
|
|
234 |
}
|
147 |
235 |
return 0;
|
148 |
236 |
}
|
149 |
237 |
|
... | ... | |
163 |
251 |
#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
|
164 |
252 |
#define INSN_SHDEP_P(x) ((x) << 5)
|
165 |
253 |
#define INSN_COND(x) ((x) << 13)
|
|
254 |
#define INSN_IM11(x) low_sign_ext(x, 11)
|
|
255 |
#define INSN_IM14(x) low_sign_ext(x, 14)
|
|
256 |
#define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
|
|
257 |
|
|
258 |
#define COND_NEVER 0
|
|
259 |
#define COND_EQ 1
|
|
260 |
#define COND_LT 2
|
|
261 |
#define COND_LE 3
|
|
262 |
#define COND_LTU 4
|
|
263 |
#define COND_LEU 5
|
|
264 |
#define COND_SV 6
|
|
265 |
#define COND_OD 7
|
|
266 |
#define COND_FALSE 8
|
|
267 |
|
|
268 |
#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
|
|
269 |
#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
|
|
270 |
#define INSN_ADDI (INSN_OP(0x2d))
|
|
271 |
#define INSN_ADDIL (INSN_OP(0x0a))
|
|
272 |
#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
|
|
273 |
#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
|
|
274 |
#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
|
|
275 |
#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
|
|
276 |
#define INSN_COMICLR (INSN_OP(0x24))
|
|
277 |
#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
|
|
278 |
#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
|
|
279 |
#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
|
|
280 |
#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
|
|
281 |
#define INSN_LDIL (INSN_OP(0x08))
|
|
282 |
#define INSN_LDO (INSN_OP(0x0d))
|
|
283 |
#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
|
|
284 |
#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
|
|
285 |
#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
|
|
286 |
#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
|
|
287 |
#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
|
|
288 |
#define INSN_SUBI (INSN_OP(0x25))
|
|
289 |
#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
|
|
290 |
#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
|
|
291 |
#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
|
|
292 |
#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
|
|
293 |
#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
|
|
294 |
#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
|
|
295 |
|
|
296 |
#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
|
|
297 |
#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
|
|
298 |
#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
|
|
299 |
#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
|
|
300 |
#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
|
|
301 |
#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
|
|
302 |
|
|
303 |
#define INSN_LDB (INSN_OP(0x10))
|
|
304 |
#define INSN_LDH (INSN_OP(0x11))
|
|
305 |
#define INSN_LDW (INSN_OP(0x12))
|
|
306 |
#define INSN_LDWM (INSN_OP(0x13))
|
|
307 |
#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
|
|
308 |
|
|
309 |
#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
|
|
310 |
#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
|
|
311 |
#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
|
|
312 |
|
|
313 |
#define INSN_STB (INSN_OP(0x18))
|
|
314 |
#define INSN_STH (INSN_OP(0x19))
|
|
315 |
#define INSN_STW (INSN_OP(0x1a))
|
|
316 |
#define INSN_STWM (INSN_OP(0x1b))
|
|
317 |
#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
|
|
318 |
|
|
319 |
#define INSN_COMBT (INSN_OP(0x20))
|
|
320 |
#define INSN_COMBF (INSN_OP(0x22))
|
|
321 |
#define INSN_COMIBT (INSN_OP(0x21))
|
|
322 |
#define INSN_COMIBF (INSN_OP(0x23))
|
|
323 |
|
|
324 |
/* supplied by libgcc */
|
|
325 |
extern void *__canonicalize_funcptr_for_compare(void *);
|
|
326 |
|
|
327 |
static void tcg_out_mov(TCGContext *s, int ret, int arg)
|
|
328 |
{
|
|
329 |
/* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
|
|
330 |
but hppa-dis.c is unaware of this definition */
|
|
331 |
if (ret != arg) {
|
|
332 |
tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
|
|
333 |
| INSN_R2(TCG_REG_R0));
|
|
334 |
}
|
|
335 |
}
|
166 |
336 |
|
167 |
|
#define COND_NEVER 0
|
168 |
|
#define COND_EQUAL 1
|
169 |
|
#define COND_LT 2
|
170 |
|
#define COND_LTEQ 3
|
171 |
|
#define COND_LTU 4
|
172 |
|
#define COND_LTUEQ 5
|
173 |
|
#define COND_SV 6
|
174 |
|
#define COND_OD 7
|
|
337 |
static void tcg_out_movi(TCGContext *s, TCGType type,
|
|
338 |
int ret, tcg_target_long arg)
|
|
339 |
{
|
|
340 |
if (check_fit_tl(arg, 14)) {
|
|
341 |
tcg_out32(s, INSN_LDO | INSN_R1(ret)
|
|
342 |
| INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
|
|
343 |
} else {
|
|
344 |
uint32_t hi, lo;
|
|
345 |
hi = arg >> 11;
|
|
346 |
lo = arg & 0x7ff;
|
|
347 |
|
|
348 |
tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
|
|
349 |
if (lo) {
|
|
350 |
tcg_out32(s, INSN_LDO | INSN_R1(ret)
|
|
351 |
| INSN_R2(ret) | INSN_IM14(lo));
|
|
352 |
}
|
|
353 |
}
|
|
354 |
}
|
175 |
355 |
|
|
356 |
static void tcg_out_ldst(TCGContext *s, int ret, int addr,
|
|
357 |
tcg_target_long offset, int op)
|
|
358 |
{
|
|
359 |
if (!check_fit_tl(offset, 14)) {
|
|
360 |
uint32_t hi, lo, op;
|
176 |
361 |
|
177 |
|
/* Logical ADD */
|
178 |
|
#define ARITH_ADD (INSN_OP(0x02) | INSN_EXT6(0x28))
|
179 |
|
#define ARITH_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
|
180 |
|
#define ARITH_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
|
181 |
|
#define ARITH_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
|
182 |
|
#define ARITH_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
|
|
362 |
hi = offset >> 11;
|
|
363 |
lo = offset & 0x7ff;
|
183 |
364 |
|
184 |
|
#define SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
|
185 |
|
#define VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
|
186 |
|
#define DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
|
187 |
|
#define ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
|
188 |
|
#define ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
|
189 |
|
#define EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
|
190 |
|
#define EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
|
191 |
|
#define VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
|
|
365 |
if (addr == TCG_REG_R0) {
|
|
366 |
op = INSN_LDIL | INSN_R2(TCG_REG_R1);
|
|
367 |
} else {
|
|
368 |
op = INSN_ADDIL | INSN_R2(addr);
|
|
369 |
}
|
|
370 |
tcg_out32(s, op | reassemble_21(hi));
|
192 |
371 |
|
193 |
|
#define SUBI (INSN_OP(0x25))
|
194 |
|
#define MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
|
|
372 |
addr = TCG_REG_R1;
|
|
373 |
offset = lo;
|
|
374 |
}
|
195 |
375 |
|
196 |
|
#define BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
|
197 |
|
#define BLE_SR4 (INSN_OP(0x39) | (1 << 13))
|
198 |
|
#define BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
|
199 |
|
#define BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
|
200 |
|
#define LDIL (INSN_OP(0x08))
|
201 |
|
#define LDO (INSN_OP(0x0d))
|
|
376 |
if (ret != addr || offset != 0 || op != INSN_LDO) {
|
|
377 |
tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
|
|
378 |
}
|
|
379 |
}
|
202 |
380 |
|
203 |
|
#define LDB (INSN_OP(0x10))
|
204 |
|
#define LDH (INSN_OP(0x11))
|
205 |
|
#define LDW (INSN_OP(0x12))
|
206 |
|
#define LDWM (INSN_OP(0x13))
|
|
381 |
/* This function is required by tcg.c. */
|
|
382 |
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
|
|
383 |
int arg1, tcg_target_long arg2)
|
|
384 |
{
|
|
385 |
tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
|
|
386 |
}
|
|
387 |
|
|
388 |
/* This function is required by tcg.c. */
|
|
389 |
static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
|
|
390 |
int arg1, tcg_target_long arg2)
|
|
391 |
{
|
|
392 |
tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
|
|
393 |
}
|
|
394 |
|
|
395 |
static void tcg_out_ldst_index(TCGContext *s, int data,
|
|
396 |
int base, int index, int op)
|
|
397 |
{
|
|
398 |
tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
|
|
399 |
}
|
|
400 |
|
|
401 |
static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
|
|
402 |
tcg_target_long val)
|
|
403 |
{
|
|
404 |
tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
|
|
405 |
}
|
207 |
406 |
|
208 |
|
#define STB (INSN_OP(0x18))
|
209 |
|
#define STH (INSN_OP(0x19))
|
210 |
|
#define STW (INSN_OP(0x1a))
|
211 |
|
#define STWM (INSN_OP(0x1b))
|
|
407 |
/* This function is required by tcg.c. */
|
|
408 |
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
|
|
409 |
{
|
|
410 |
tcg_out_addi2(s, reg, reg, val);
|
|
411 |
}
|
212 |
412 |
|
213 |
|
#define COMBT (INSN_OP(0x20))
|
214 |
|
#define COMBF (INSN_OP(0x22))
|
|
413 |
static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
|
|
414 |
{
|
|
415 |
tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
|
|
416 |
}
|
215 |
417 |
|
216 |
|
static int lowsignext(uint32_t val, int start, int length)
|
|
418 |
static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
|
|
419 |
tcg_target_long val, int op)
|
217 |
420 |
{
|
218 |
|
return (((val << 1) & ~(~0 << length)) |
|
219 |
|
((val >> (length - 1)) & 1)) << start;
|
|
421 |
assert(check_fit_tl(val, 11));
|
|
422 |
tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
|
220 |
423 |
}
|
221 |
424 |
|
222 |
|
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
|
|
425 |
static inline void tcg_out_nop(TCGContext *s)
|
223 |
426 |
{
|
224 |
|
/* PA1.1 defines COPY as OR r,0,t */
|
225 |
|
tcg_out32(s, ARITH_OR | INSN_T(ret) | INSN_R1(arg) | INSN_R2(TCG_REG_R0));
|
|
427 |
tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
|
|
428 |
}
|
226 |
429 |
|
227 |
|
/* PA2.0 defines COPY as LDO 0(r),t
|
228 |
|
* but hppa-dis.c is unaware of this definition */
|
229 |
|
/* tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(arg) | reassemble_14(0)); */
|
|
430 |
static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
|
|
431 |
{
|
|
432 |
tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
|
|
433 |
}
|
|
434 |
|
|
435 |
/* Extract LEN bits at position OFS from ARG and place in RET.
|
|
436 |
Note that here the bit ordering is reversed from the PA-RISC
|
|
437 |
standard, such that the right-most bit is 0. */
|
|
438 |
static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
|
|
439 |
unsigned ofs, unsigned len, int sign)
|
|
440 |
{
|
|
441 |
assert(ofs < 32 && len <= 32 - ofs);
|
|
442 |
tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
|
|
443 |
| INSN_R1(ret) | INSN_R2(arg)
|
|
444 |
| INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
|
230 |
445 |
}
|
231 |
446 |
|
232 |
|
static inline void tcg_out_movi(TCGContext *s, TCGType type,
|
233 |
|
int ret, tcg_target_long arg)
|
|
447 |
/* Likewise with OFS interpreted little-endian. */
|
|
448 |
static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
|
|
449 |
unsigned ofs, unsigned len)
|
234 |
450 |
{
|
235 |
|
if (arg == (arg & 0x1fff)) {
|
236 |
|
tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(TCG_REG_R0) |
|
237 |
|
reassemble_14(arg));
|
|
451 |
assert(ofs < 32 && len <= 32 - ofs);
|
|
452 |
tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
|
|
453 |
| INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
|
|
454 |
}
|
|
455 |
|
|
456 |
static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
|
|
457 |
unsigned count)
|
|
458 |
{
|
|
459 |
assert(count < 32);
|
|
460 |
tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
|
|
461 |
| INSN_SHDEP_CP(count));
|
|
462 |
}
|
|
463 |
|
|
464 |
static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
|
|
465 |
{
|
|
466 |
tcg_out_mtctl_sar(s, creg);
|
|
467 |
tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
|
|
468 |
}
|
|
469 |
|
|
470 |
static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
|
|
471 |
{
|
|
472 |
if (m == 0) {
|
|
473 |
tcg_out_mov(s, ret, arg);
|
|
474 |
} else if (m == -1) {
|
|
475 |
tcg_out_movi(s, TCG_TYPE_I32, ret, -1);
|
|
476 |
} else if (or_mask_p(m)) {
|
|
477 |
int bs0, bs1;
|
|
478 |
|
|
479 |
for (bs0 = 0; bs0 < 32; bs0++) {
|
|
480 |
if ((m & (1u << bs0)) != 0) {
|
|
481 |
break;
|
|
482 |
}
|
|
483 |
}
|
|
484 |
for (bs1 = bs0; bs1 < 32; bs1++) {
|
|
485 |
if ((m & (1u << bs1)) == 0) {
|
|
486 |
break;
|
|
487 |
}
|
|
488 |
}
|
|
489 |
assert(bs1 == 32 || (1ul << bs1) > m);
|
|
490 |
|
|
491 |
tcg_out_mov(s, ret, arg);
|
|
492 |
tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
|
|
493 |
| INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
|
|
494 |
} else {
|
|
495 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R1, m);
|
|
496 |
tcg_out_arith(s, ret, arg, TCG_REG_R1, INSN_OR);
|
|
497 |
}
|
|
498 |
}
|
|
499 |
|
|
500 |
static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
|
|
501 |
{
|
|
502 |
if (m == 0) {
|
|
503 |
tcg_out_mov(s, ret, TCG_REG_R0);
|
|
504 |
} else if (m == -1) {
|
|
505 |
tcg_out_mov(s, ret, arg);
|
|
506 |
} else if (and_mask_p(m)) {
|
|
507 |
int ls0, ls1, ms0;
|
|
508 |
|
|
509 |
for (ls0 = 0; ls0 < 32; ls0++) {
|
|
510 |
if ((m & (1u << ls0)) == 0) {
|
|
511 |
break;
|
|
512 |
}
|
|
513 |
}
|
|
514 |
for (ls1 = ls0; ls1 < 32; ls1++) {
|
|
515 |
if ((m & (1u << ls1)) != 0) {
|
|
516 |
break;
|
|
517 |
}
|
|
518 |
}
|
|
519 |
for (ms0 = ls1; ms0 < 32; ms0++) {
|
|
520 |
if ((m & (1u << ms0)) == 0) {
|
|
521 |
break;
|
|
522 |
}
|
|
523 |
}
|
|
524 |
assert (ms0 == 32);
|
|
525 |
|
|
526 |
if (ls1 == 32) {
|
|
527 |
tcg_out_extr(s, ret, arg, 0, ls0, 0);
|
|
528 |
} else {
|
|
529 |
tcg_out_mov(s, ret, arg);
|
|
530 |
tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
|
|
531 |
| INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
|
|
532 |
}
|
238 |
533 |
} else {
|
239 |
|
tcg_out32(s, LDIL | INSN_R2(ret) |
|
240 |
|
reassemble_21(lrsel((uint32_t)arg, 0)));
|
241 |
|
if (arg & 0x7ff)
|
242 |
|
tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(ret) |
|
243 |
|
reassemble_14(rrsel((uint32_t)arg, 0)));
|
|
534 |
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R1, m);
|
|
535 |
tcg_out_arith(s, ret, arg, TCG_REG_R1, INSN_AND);
|
244 |
536 |
}
|
245 |
537 |
}
|
246 |
538 |
|
247 |
|
static inline void tcg_out_ld_raw(TCGContext *s, int ret,
|
248 |
|
tcg_target_long arg)
|
|
539 |
static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
|
249 |
540 |
{
|
250 |
|
tcg_out32(s, LDIL | INSN_R2(ret) |
|
251 |
|
reassemble_21(lrsel((uint32_t)arg, 0)));
|
252 |
|
tcg_out32(s, LDW | INSN_R1(ret) | INSN_R2(ret) |
|
253 |
|
reassemble_14(rrsel((uint32_t)arg, 0)));
|
|
541 |
tcg_out_extr(s, ret, arg, 0, 8, 1);
|
254 |
542 |
}
|
255 |
543 |
|
256 |
|
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
|
257 |
|
tcg_target_long arg)
|
|
544 |
static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
|
258 |
545 |
{
|
259 |
|
tcg_out_ld_raw(s, ret, arg);
|
|
546 |
tcg_out_extr(s, ret, arg, 0, 16, 1);
|
260 |
547 |
}
|
261 |
548 |
|
262 |
|
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset,
|
263 |
|
int op)
|
|
549 |
static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
|
264 |
550 |
{
|
265 |
|
if (offset == (offset & 0xfff))
|
266 |
|
tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) |
|
267 |
|
reassemble_14(offset));
|
268 |
|
else {
|
269 |
|
fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
|
270 |
|
tcg_abort();
|
271 |
|
}
|
|
551 |
count &= 31;
|
|
552 |
tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
|
|
553 |
| INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
|
272 |
554 |
}
|
273 |
555 |
|
274 |
|
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
|
275 |
|
int arg1, tcg_target_long arg2)
|
|
556 |
static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
|
276 |
557 |
{
|
277 |
|
fprintf(stderr, "unimplemented %s\n", __func__);
|
278 |
|
tcg_abort();
|
|
558 |
tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
|
|
559 |
tcg_out_mtctl_sar(s, TCG_REG_R20);
|
|
560 |
tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
|
279 |
561 |
}
|
280 |
562 |
|
281 |
|
static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
|
282 |
|
int arg1, tcg_target_long arg2)
|
|
563 |
static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
|
283 |
564 |
{
|
284 |
|
fprintf(stderr, "unimplemented %s\n", __func__);
|
285 |
|
tcg_abort();
|
|
565 |
count &= 31;
|
|
566 |
tcg_out_extr(s, ret, arg, count, 32 - count, 0);
|
286 |
567 |
}
|
287 |
568 |
|
288 |
|
static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
|
|
569 |
static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
|
289 |
570 |
{
|
290 |
|
tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
|
|
571 |
tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
|
291 |
572 |
}
|
292 |
573 |
|
293 |
|
static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
|
294 |
|
tcg_target_long val, int op)
|
|
574 |
static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
|
295 |
575 |
{
|
296 |
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, val);
|
297 |
|
tcg_out_arith(s, t, r1, TCG_REG_R20, op);
|
|
576 |
count &= 31;
|
|
577 |
tcg_out_extr(s, ret, arg, count, 32 - count, 1);
|
298 |
578 |
}
|
299 |
579 |
|
300 |
|
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
|
|
580 |
static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
|
301 |
581 |
{
|
302 |
|
tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
|
|
582 |
tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
|
|
583 |
tcg_out_mtctl_sar(s, TCG_REG_R20);
|
|
584 |
tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
|
303 |
585 |
}
|
304 |
586 |
|
305 |
|
static inline void tcg_out_nop(TCGContext *s)
|
|
587 |
static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
|
306 |
588 |
{
|
307 |
|
tcg_out32(s, ARITH_OR | INSN_T(TCG_REG_R0) | INSN_R1(TCG_REG_R0) |
|
308 |
|
INSN_R2(TCG_REG_R0));
|
|
589 |
count &= 31;
|
|
590 |
tcg_out_shd(s, ret, arg, arg, 32 - count);
|
309 |
591 |
}
|
310 |
592 |
|
311 |
|
static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) {
|
312 |
|
tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
|
313 |
|
INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
|
|
593 |
static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
|
|
594 |
{
|
|
595 |
tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
|
|
596 |
tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
|
314 |
597 |
}
|
315 |
598 |
|
316 |
|
static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) {
|
317 |
|
tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
|
318 |
|
INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
|
|
599 |
static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
|
|
600 |
{
|
|
601 |
count &= 31;
|
|
602 |
tcg_out_shd(s, ret, arg, arg, count);
|
319 |
603 |
}
|
320 |
604 |
|
321 |
|
static inline void tcg_out_bswap16(TCGContext *s, int ret, int arg) {
|
322 |
|
if(ret != arg)
|
323 |
|
tcg_out_mov(s, ret, arg);
|
324 |
|
tcg_out32(s, DEP | INSN_R2(ret) | INSN_R1(ret) |
|
325 |
|
INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
|
326 |
|
tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(TCG_REG_R0) |
|
327 |
|
INSN_R2(ret) | INSN_SHDEP_CP(8));
|
|
605 |
static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
|
|
606 |
{
|
|
607 |
tcg_out_vshd(s, ret, arg, arg, creg);
|
328 |
608 |
}
|
329 |
609 |
|
330 |
|
static inline void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) {
|
331 |
|
tcg_out32(s, SHD | INSN_T(temp) | INSN_R1(arg) |
|
332 |
|
INSN_R2(arg) | INSN_SHDEP_CP(16));
|
333 |
|
tcg_out32(s, DEP | INSN_R2(temp) | INSN_R1(temp) |
|
334 |
|
INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
|
335 |
|
tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(arg) |
|
336 |
|
INSN_R2(temp) | INSN_SHDEP_CP(8));
|
|
610 |
static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
|
|
611 |
{
|
|
612 |
if (ret != arg) {
|
|
613 |
tcg_out_mov(s, ret, arg); /* arg = xxAB */
|
|
614 |
}
|
|
615 |
tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
|
|
616 |
tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
|
337 |
617 |
}
|
338 |
618 |
|
339 |
|
static inline void tcg_out_call(TCGContext *s, void *func)
|
|
619 |
static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
|
340 |
620 |
{
|
341 |
|
uint32_t val = (uint32_t)__canonicalize_funcptr_for_compare(func);
|
342 |
|
tcg_out32(s, LDIL | INSN_R2(TCG_REG_R20) |
|
343 |
|
reassemble_21(lrsel(val, 0)));
|
344 |
|
tcg_out32(s, BLE_SR4 | INSN_R2(TCG_REG_R20) |
|
345 |
|
reassemble_17(rrsel(val, 0) >> 2));
|
346 |
|
tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
|
|
621 |
/* arg = ABCD */
|
|
622 |
tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
|
|
623 |
tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
|
|
624 |
tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
|
347 |
625 |
}
|
348 |
626 |
|
349 |
|
#if defined(CONFIG_SOFTMMU)
|
|
627 |
static void tcg_out_call(TCGContext *s, void *func)
|
|
628 |
{
|
|
629 |
tcg_target_long val, hi, lo, disp;
|
|
630 |
|
|
631 |
val = (uint32_t)__canonicalize_funcptr_for_compare(func);
|
|
632 |
disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
|
|
633 |
|
|
634 |
if (check_fit_tl(disp, 17)) {
|
|
635 |
tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
|
|
636 |
} else {
|
|
637 |
hi = val >> 11;
|
|
638 |
lo = val & 0x7ff;
|
|
639 |
|
|
640 |
tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
|
|
641 |
tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
|
|
642 |
| reassemble_17(lo >> 2));
|
|
643 |
tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
|
|
644 |
}
|
|
645 |
}
|
350 |
646 |
|
|
647 |
static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
|
|
648 |
int arg1, int arg2)
|
|
649 |
{
|
|
650 |
/* Store both words into the stack for copy to the FPU. */
|
|
651 |
tcg_out_ldst(s, arg1, TCG_REG_SP, STACK_TEMP_OFS, INSN_STW);
|
|
652 |
tcg_out_ldst(s, arg2, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_STW);
|
|
653 |
|
|
654 |
/* Load both words into the FPU at the same time. We get away
|
|
655 |
with this because we can address the left and right half of the
|
|
656 |
FPU registers individually once loaded. */
|
|
657 |
/* fldds stack_temp(sp),fr22 */
|
|
658 |
tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_SP)
|
|
659 |
| INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
|
|
660 |
|
|
661 |
/* xmpyu fr22r,fr22,fr22 */
|
|
662 |
tcg_out32(s, 0x3ad64796);
|
|
663 |
|
|
664 |
/* Store the 64-bit result back into the stack. */
|
|
665 |
/* fstds stack_temp(sp),fr22 */
|
|
666 |
tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_SP)
|
|
667 |
| INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
|
|
668 |
|
|
669 |
/* Load the pieces of the result that the caller requested. */
|
|
670 |
if (reth) {
|
|
671 |
tcg_out_ldst(s, reth, TCG_REG_SP, STACK_TEMP_OFS, INSN_LDW);
|
|
672 |
}
|
|
673 |
if (retl) {
|
|
674 |
tcg_out_ldst(s, retl, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_LDW);
|
|
675 |
}
|
|
676 |
}
|
|
677 |
|
|
678 |
static void tcg_out_branch(TCGContext *s, int label_index, int nul)
|
|
679 |
{
|
|
680 |
TCGLabel *l = &s->labels[label_index];
|
|
681 |
uint32_t op = nul ? INSN_BL_N : INSN_BL;
|
|
682 |
|
|
683 |
if (l->has_value) {
|
|
684 |
tcg_target_long val = l->u.value;
|
|
685 |
|
|
686 |
val -= (tcg_target_long)s->code_ptr + 8;
|
|
687 |
val >>= 2;
|
|
688 |
assert(check_fit_tl(val, 17));
|
|
689 |
|
|
690 |
tcg_out32(s, op | reassemble_17(val));
|
|
691 |
} else {
|
|
692 |
tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
|
|
693 |
tcg_out32(s, op);
|
|
694 |
}
|
|
695 |
}
|
|
696 |
|
|
697 |
static const uint8_t tcg_cond_to_cmp_cond[10] =
|
|
698 |
{
|
|
699 |
[TCG_COND_EQ] = COND_EQ,
|
|
700 |
[TCG_COND_NE] = COND_EQ | COND_FALSE,
|
|
701 |
[TCG_COND_LT] = COND_LT,
|
|
702 |
[TCG_COND_GE] = COND_LT | COND_FALSE,
|
|
703 |
[TCG_COND_LE] = COND_LE,
|
|
704 |
[TCG_COND_GT] = COND_LE | COND_FALSE,
|
|
705 |
[TCG_COND_LTU] = COND_LTU,
|
|
706 |
[TCG_COND_GEU] = COND_LTU | COND_FALSE,
|
|
707 |
[TCG_COND_LEU] = COND_LEU,
|
|
708 |
[TCG_COND_GTU] = COND_LEU | COND_FALSE,
|
|
709 |
};
|
|
710 |
|
|
711 |
static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
|
|
712 |
TCGArg c2, int c2const, int label_index)
|
|
713 |
{
|
|
714 |
TCGLabel *l = &s->labels[label_index];
|
|
715 |
int op, pacond;
|
|
716 |
|
|
717 |
/* Note that COMIB operates as if the immediate is the first
|
|
718 |
operand. We model brcond with the immediate in the second
|
|
719 |
to better match what targets are likely to give us. For
|
|
720 |
consistency, model COMB with reversed operands as well. */
|
|
721 |
pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
|
|
722 |
|
|
723 |
if (c2const) {
|
|
724 |
op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
|
|
725 |
op |= INSN_IM5(c2);
|
|
726 |
} else {
|
|
727 |
op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
|
|
728 |
op |= INSN_R1(c2);
|
|
729 |
}
|
|
730 |
op |= INSN_R2(c1);
|
|
731 |
op |= INSN_COND(pacond & 7);
|
|
732 |
|
|
733 |
if (l->has_value) {
|
|
734 |
tcg_target_long val = l->u.value;
|
|
735 |
|
|
736 |
val -= (tcg_target_long)s->code_ptr + 8;
|
|
737 |
val >>= 2;
|
|
738 |
assert(check_fit_tl(val, 12));
|
|
739 |
|
|
740 |
/* ??? Assume that all branches to defined labels are backward.
|
|
741 |
Which means that if the nul bit is set, the delay slot is
|
|
742 |
executed if the branch is taken, and not executed in fallthru. */
|
|
743 |
tcg_out32(s, op | reassemble_12(val));
|
|
744 |
tcg_out_nop(s);
|
|
745 |
} else {
|
|
746 |
tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
|
|
747 |
/* ??? Assume that all branches to undefined labels are forward.
|
|
748 |
Which means that if the nul bit is set, the delay slot is
|
|
749 |
not executed if the branch is taken, which is what we want. */
|
|
750 |
tcg_out32(s, op | 2);
|
|
751 |
}
|
|
752 |
}
|
|
753 |
|
|
754 |
static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
|
|
755 |
TCGArg c1, TCGArg c2, int c2const)
|
|
756 |
{
|
|
757 |
int op, pacond;
|
|
758 |
|
|
759 |
/* Note that COMICLR operates as if the immediate is the first
|
|
760 |
operand. We model setcond with the immediate in the second
|
|
761 |
to better match what targets are likely to give us. For
|
|
762 |
consistency, model COMCLR with reversed operands as well. */
|
|
763 |
pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
|
|
764 |
|
|
765 |
if (c2const) {
|
|
766 |
op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
|
|
767 |
} else {
|
|
768 |
op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
|
|
769 |
}
|
|
770 |
op |= INSN_COND(pacond & 7);
|
|
771 |
op |= pacond & COND_FALSE ? 1 << 12 : 0;
|
|
772 |
|
|
773 |
tcg_out32(s, op);
|
|
774 |
}
|
|
775 |
|
|
776 |
static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
|
|
777 |
TCGArg bl, int blconst, TCGArg bh, int bhconst,
|
|
778 |
int label_index)
|
|
779 |
{
|
|
780 |
switch (cond) {
|
|
781 |
case TCG_COND_EQ:
|
|
782 |
case TCG_COND_NE:
|
|
783 |
tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
|
|
784 |
tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
|
|
785 |
break;
|
|
786 |
|
|
787 |
default:
|
|
788 |
tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
|
|
789 |
tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
|
|
790 |
tcg_out_brcond(s, tcg_unsigned_cond(cond),
|
|
791 |
al, bl, blconst, label_index);
|
|
792 |
break;
|
|
793 |
}
|
|
794 |
}
|
|
795 |
|
|
796 |
static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
|
|
797 |
TCGArg c1, TCGArg c2, int c2const)
|
|
798 |
{
|
|
799 |
tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
|
|
800 |
tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
|
|
801 |
}
|
|
802 |
|
|
803 |
static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
|
|
804 |
TCGArg al, TCGArg ah, TCGArg bl, int blconst,
|
|
805 |
TCGArg bh, int bhconst)
|
|
806 |
{
|
|
807 |
int scratch = TCG_REG_R20;
|
|
808 |
|
|
809 |
if (ret != al && ret != ah
|
|
810 |
&& (blconst || ret != bl)
|
|
811 |
&& (bhconst || ret != bh)) {
|
|
812 |
scratch = ret;
|
|
813 |
}
|
|
814 |
|
|
815 |
switch (cond) {
|
|
816 |
case TCG_COND_EQ:
|
|
817 |
case TCG_COND_NE:
|
|
818 |
tcg_out_setcond(s, cond, scratch, al, bl, blconst);
|
|
819 |
tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
|
|
820 |
tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
|
|
821 |
break;
|
|
822 |
|
|
823 |
default:
|
|
824 |
tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
|
|
825 |
tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
|
|
826 |
tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
|
|
827 |
tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
|
|
828 |
tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
|
|
829 |
break;
|
|
830 |
}
|
|
831 |
|
|
832 |
tcg_out_mov(s, ret, scratch);
|
|
833 |
}
|
|
834 |
|
|
835 |
#if defined(CONFIG_SOFTMMU)
|
351 |
836 |
#include "../../softmmu_defs.h"
|
352 |
837 |
|
353 |
838 |
static void *qemu_ld_helpers[4] = {
|
... | ... | |
363 |
848 |
__stl_mmu,
|
364 |
849 |
__stq_mmu,
|
365 |
850 |
};
|
|
851 |
|
|
852 |
/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
|
|
853 |
the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
|
|
854 |
TLB for the memory index. The return value is the offset from ENV
|
|
855 |
contained in R1 afterward (to be used when loading ADDEND); if the
|
|
856 |
return value is 0, R1 is not used. */
|
|
857 |
|
|
858 |
static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
|
|
859 |
int addrhi, int s_bits, int lab_miss, int offset)
|
|
860 |
{
|
|
861 |
int ret;
|
|
862 |
|
|
863 |
/* Extracting the index into the TLB. The "normal C operation" is
|
|
864 |
r1 = addr_reg >> TARGET_PAGE_BITS;
|
|
865 |
r1 &= CPU_TLB_SIZE - 1;
|
|
866 |
r1 <<= CPU_TLB_ENTRY_BITS;
|
|
867 |
What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
|
|
868 |
and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
|
|
869 |
operations with an EXTRU. Unfortunately, the current value of
|
|
870 |
CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
|
|
871 |
add that follows. */
|
|
872 |
tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
|
|
873 |
tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
|
|
874 |
tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
|
|
875 |
tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
|
|
876 |
|
|
877 |
/* Make sure that both the addr_{read,write} and addend can be
|
|
878 |
read with a 14-bit offset from the same base register. */
|
|
879 |
if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
|
|
880 |
ret = 0;
|
|
881 |
} else {
|
|
882 |
ret = (offset + 0x400) & ~0x7ff;
|
|
883 |
offset = ret - offset;
|
|
884 |
tcg_out_addi2(s, TCG_REG_R1, r1, ret);
|
|
885 |
r1 = TCG_REG_R1;
|
|
886 |
}
|
|
887 |
|
|
888 |
/* Load the entry from the computed slot. */
|
|
889 |
if (TARGET_LONG_BITS == 64) {
|
|
890 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
|
|
891 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
|
|
892 |
} else {
|
|
893 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
|
|
894 |
}
|
|
895 |
|
|
896 |
/* If not equal, jump to lab_miss. */
|
|
897 |
if (TARGET_LONG_BITS == 64) {
|
|
898 |
tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
|
|
899 |
r0, 0, addrhi, 0, lab_miss);
|
|
900 |
} else {
|
|
901 |
tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
|
|
902 |
}
|
|
903 |
|
|
904 |
return ret;
|
|
905 |
}
|
366 |
906 |
#endif
|
367 |
907 |
|
368 |
908 |
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
|
369 |
909 |
{
|
370 |
|
int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
|
|
910 |
int addr_reg, addr_reg2;
|
|
911 |
int data_reg, data_reg2;
|
|
912 |
int r0, r1, mem_index, s_bits, bswap;
|
|
913 |
tcg_target_long offset;
|
371 |
914 |
#if defined(CONFIG_SOFTMMU)
|
372 |
|
uint32_t *label1_ptr, *label2_ptr;
|
373 |
|
#endif
|
374 |
|
#if TARGET_LONG_BITS == 64
|
375 |
|
#if defined(CONFIG_SOFTMMU)
|
376 |
|
uint32_t *label3_ptr;
|
377 |
|
#endif
|
378 |
|
int addr_reg2;
|
|
915 |
int lab1, lab2, argreg;
|
379 |
916 |
#endif
|
380 |
917 |
|
381 |
918 |
data_reg = *args++;
|
382 |
|
if (opc == 3)
|
383 |
|
data_reg2 = *args++;
|
384 |
|
else
|
385 |
|
data_reg2 = 0; /* suppress warning */
|
|
919 |
data_reg2 = (opc == 3 ? *args++ : TCG_REG_R0);
|
386 |
920 |
addr_reg = *args++;
|
387 |
|
#if TARGET_LONG_BITS == 64
|
388 |
|
addr_reg2 = *args++;
|
389 |
|
#endif
|
|
921 |
addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
|
390 |
922 |
mem_index = *args;
|
391 |
923 |
s_bits = opc & 3;
|
392 |
924 |
|
... | ... | |
394 |
926 |
r1 = TCG_REG_R25;
|
395 |
927 |
|
396 |
928 |
#if defined(CONFIG_SOFTMMU)
|
397 |
|
tcg_out_mov(s, r1, addr_reg);
|
|
929 |
lab1 = gen_new_label();
|
|
930 |
lab2 = gen_new_label();
|
398 |
931 |
|
399 |
|
tcg_out_mov(s, r0, addr_reg);
|
|
932 |
offset = tcg_out_tlb_read(s, r0, r1, addr_reg, addr_reg2, s_bits, lab1,
|
|
933 |
offsetof(CPUState,
|
|
934 |
tlb_table[mem_index][0].addr_read));
|
400 |
935 |
|
401 |
|
tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
|
402 |
|
INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
|
|
936 |
/* TLB Hit. */
|
|
937 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : r1),
|
|
938 |
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
|
403 |
939 |
|
404 |
|
tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
|
405 |
|
ARITH_AND);
|
406 |
|
|
407 |
|
tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
|
408 |
|
ARITH_AND);
|
409 |
|
|
410 |
|
tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
|
411 |
|
tcg_out_arithi(s, r1, r1,
|
412 |
|
offsetof(CPUState, tlb_table[mem_index][0].addr_read),
|
413 |
|
ARITH_ADD);
|
414 |
|
|
415 |
|
tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
|
416 |
|
|
417 |
|
#if TARGET_LONG_BITS == 32
|
418 |
|
/* if equal, jump to label1 */
|
419 |
|
label1_ptr = (uint32_t *)s->code_ptr;
|
420 |
|
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
|
421 |
|
INSN_COND(COND_EQUAL));
|
422 |
|
tcg_out_mov(s, r0, addr_reg); /* delay slot */
|
423 |
|
#else
|
424 |
|
/* if not equal, jump to label3 */
|
425 |
|
label3_ptr = (uint32_t *)s->code_ptr;
|
426 |
|
tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
|
427 |
|
INSN_COND(COND_EQUAL));
|
428 |
|
tcg_out_mov(s, r0, addr_reg); /* delay slot */
|
429 |
|
|
430 |
|
tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
|
431 |
|
|
432 |
|
/* if equal, jump to label1 */
|
433 |
|
label1_ptr = (uint32_t *)s->code_ptr;
|
434 |
|
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
|
435 |
|
INSN_COND(COND_EQUAL));
|
436 |
|
tcg_out_nop(s); /* delay slot */
|
437 |
|
|
438 |
|
/* label3: */
|
439 |
|
*label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
|
440 |
|
#endif
|
441 |
|
|
442 |
|
#if TARGET_LONG_BITS == 32
|
443 |
|
tcg_out_mov(s, TCG_REG_R26, addr_reg);
|
444 |
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R25, mem_index);
|
445 |
|
#else
|
446 |
|
tcg_out_mov(s, TCG_REG_R26, addr_reg);
|
447 |
|
tcg_out_mov(s, TCG_REG_R25, addr_reg2);
|
448 |
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
|
449 |
|
#endif
|
450 |
|
|
451 |
|
tcg_out_call(s, qemu_ld_helpers[s_bits]);
|
452 |
|
|
453 |
|
switch(opc) {
|
454 |
|
case 0 | 4:
|
455 |
|
tcg_out_ext8s(s, data_reg, TCG_REG_RET0);
|
456 |
|
break;
|
457 |
|
case 1 | 4:
|
458 |
|
tcg_out_ext16s(s, data_reg, TCG_REG_RET0);
|
459 |
|
break;
|
460 |
|
case 0:
|
461 |
|
case 1:
|
462 |
|
case 2:
|
463 |
|
default:
|
464 |
|
tcg_out_mov(s, data_reg, TCG_REG_RET0);
|
465 |
|
break;
|
466 |
|
case 3:
|
467 |
|
tcg_abort();
|
468 |
|
tcg_out_mov(s, data_reg, TCG_REG_RET0);
|
469 |
|
tcg_out_mov(s, data_reg2, TCG_REG_RET1);
|
470 |
|
break;
|
471 |
|
}
|
472 |
|
|
473 |
|
/* jump to label2 */
|
474 |
|
label2_ptr = (uint32_t *)s->code_ptr;
|
475 |
|
tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
|
476 |
|
|
477 |
|
/* label1: */
|
478 |
|
*label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
|
479 |
|
|
480 |
|
tcg_out_arithi(s, TCG_REG_R20, r1,
|
481 |
|
offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_read),
|
482 |
|
ARITH_ADD);
|
483 |
|
tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
|
484 |
|
tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
|
|
940 |
tcg_out_arith(s, r0, addr_reg, TCG_REG_R20, INSN_ADDL);
|
|
941 |
offset = TCG_REG_R0;
|
485 |
942 |
#else
|
486 |
943 |
r0 = addr_reg;
|
|
944 |
offset = GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0;
|
487 |
945 |
#endif
|
488 |
946 |
|
489 |
947 |
#ifdef TARGET_WORDS_BIGENDIAN
|
... | ... | |
492 |
950 |
bswap = 1;
|
493 |
951 |
#endif
|
494 |
952 |
switch (opc) {
|
495 |
|
case 0:
|
496 |
|
tcg_out_ldst(s, data_reg, r0, 0, LDB);
|
497 |
|
break;
|
498 |
|
case 0 | 4:
|
499 |
|
tcg_out_ldst(s, data_reg, r0, 0, LDB);
|
500 |
|
tcg_out_ext8s(s, data_reg, data_reg);
|
501 |
|
break;
|
502 |
|
case 1:
|
503 |
|
tcg_out_ldst(s, data_reg, r0, 0, LDH);
|
504 |
|
if (bswap)
|
505 |
|
tcg_out_bswap16(s, data_reg, data_reg);
|
506 |
|
break;
|
507 |
|
case 1 | 4:
|
508 |
|
tcg_out_ldst(s, data_reg, r0, 0, LDH);
|
509 |
|
if (bswap)
|
510 |
|
tcg_out_bswap16(s, data_reg, data_reg);
|
|
953 |
case 0:
|
|
954 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDBX);
|
|
955 |
break;
|
|
956 |
case 0 | 4:
|
|
957 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDBX);
|
|
958 |
tcg_out_ext8s(s, data_reg, data_reg);
|
|
959 |
break;
|
|
960 |
case 1:
|
|
961 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDHX);
|
|
962 |
if (bswap) {
|
|
963 |
tcg_out_bswap16(s, data_reg, data_reg, 0);
|
|
964 |
}
|
|
965 |
break;
|
|
966 |
case 1 | 4:
|
|
967 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDHX);
|
|
968 |
if (bswap) {
|
|
969 |
tcg_out_bswap16(s, data_reg, data_reg, 1);
|
|
970 |
} else {
|
511 |
971 |
tcg_out_ext16s(s, data_reg, data_reg);
|
512 |
|
break;
|
513 |
|
case 2:
|
514 |
|
tcg_out_ldst(s, data_reg, r0, 0, LDW);
|
515 |
|
if (bswap)
|
516 |
|
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
|
517 |
|
break;
|
518 |
|
case 3:
|
519 |
|
tcg_abort();
|
520 |
|
if (!bswap) {
|
521 |
|
tcg_out_ldst(s, data_reg, r0, 0, LDW);
|
522 |
|
tcg_out_ldst(s, data_reg2, r0, 4, LDW);
|
|
972 |
}
|
|
973 |
break;
|
|
974 |
case 2:
|
|
975 |
tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDWX);
|
|
976 |
if (bswap) {
|
|
977 |
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
|
|
978 |
}
|
|
979 |
break;
|
|
980 |
case 3:
|
|
981 |
if (bswap) {
|
|
982 |
int t = data_reg2;
|
|
983 |
data_reg2 = data_reg;
|
|
984 |
data_reg = t;
|
|
985 |
}
|
|
986 |
if (offset == TCG_REG_R0) {
|
|
987 |
/* Make sure not to clobber the base register. */
|
|
988 |
if (data_reg2 == r0) {
|
|
989 |
tcg_out_ldst(s, data_reg, r0, 4, INSN_LDW);
|
|
990 |
tcg_out_ldst(s, data_reg2, r0, 0, INSN_LDW);
|
523 |
991 |
} else {
|
524 |
|
tcg_out_ldst(s, data_reg, r0, 4, LDW);
|
525 |
|
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
|
526 |
|
tcg_out_ldst(s, data_reg2, r0, 0, LDW);
|
527 |
|
tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20);
|
|
992 |
tcg_out_ldst(s, data_reg2, r0, 0, INSN_LDW);
|
|
993 |
tcg_out_ldst(s, data_reg, r0, 4, INSN_LDW);
|
528 |
994 |
}
|
529 |
|
break;
|
530 |
|
default:
|
531 |
|
tcg_abort();
|
|
995 |
} else {
|
|
996 |
tcg_out_addi2(s, TCG_REG_R20, r0, 4);
|
|
997 |
tcg_out_ldst_index(s, data_reg2, r0, offset, INSN_LDWX);
|
|
998 |
tcg_out_ldst_index(s, data_reg, TCG_REG_R20, offset, INSN_LDWX);
|
|
999 |
}
|
|
1000 |
if (bswap) {
|
|
1001 |
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
|
|
1002 |
tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20);
|
|
1003 |
}
|
|
1004 |
break;
|
|
1005 |
default:
|
|
1006 |
tcg_abort();
|
532 |
1007 |
}
|
533 |
1008 |
|
534 |
1009 |
#if defined(CONFIG_SOFTMMU)
|
|
1010 |
tcg_out_branch(s, lab2, 1);
|
|
1011 |
|
|
1012 |
/* TLB Miss. */
|
|
1013 |
/* label1: */
|
|
1014 |
tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
|
|
1015 |
|
|
1016 |
argreg = TCG_REG_R26;
|
|
1017 |
tcg_out_mov(s, argreg--, addr_reg);
|
|
1018 |
if (TARGET_LONG_BITS == 64) {
|
|
1019 |
tcg_out_mov(s, argreg--, addr_reg2);
|
|
1020 |
}
|
|
1021 |
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
|
|
1022 |
|
|
1023 |
tcg_out_call(s, qemu_ld_helpers[s_bits]);
|
|
1024 |
|
|
1025 |
switch (opc) {
|
|
1026 |
case 0:
|
|
1027 |
tcg_out_andi(s, data_reg, TCG_REG_RET0, 0xff);
|
|
1028 |
break;
|
|
1029 |
case 0 | 4:
|
|
1030 |
tcg_out_ext8s(s, data_reg, TCG_REG_RET0);
|
|
1031 |
break;
|
|
1032 |
case 1:
|
|
1033 |
tcg_out_andi(s, data_reg, TCG_REG_RET0, 0xffff);
|
|
1034 |
break;
|
|
1035 |
case 1 | 4:
|
|
1036 |
tcg_out_ext16s(s, data_reg, TCG_REG_RET0);
|
|
1037 |
break;
|
|
1038 |
case 2:
|
|
1039 |
case 2 | 4:
|
|
1040 |
tcg_out_mov(s, data_reg, TCG_REG_RET0);
|
|
1041 |
break;
|
|
1042 |
case 3:
|
|
1043 |
tcg_out_mov(s, data_reg, TCG_REG_RET0);
|
|
1044 |
tcg_out_mov(s, data_reg2, TCG_REG_RET1);
|
|
1045 |
break;
|
|
1046 |
default:
|
|
1047 |
tcg_abort();
|
|
1048 |
}
|
|
1049 |
|
535 |
1050 |
/* label2: */
|
536 |
|
*label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
|
|
1051 |
tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
|
537 |
1052 |
#endif
|
538 |
1053 |
}
|
539 |
1054 |
|
540 |
1055 |
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
|
541 |
1056 |
{
|
542 |
|
int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
|
543 |
|
#if defined(CONFIG_SOFTMMU)
|
544 |
|
uint32_t *label1_ptr, *label2_ptr;
|
545 |
|
#endif
|
546 |
|
#if TARGET_LONG_BITS == 64
|
|
1057 |
int addr_reg, addr_reg2;
|
|
1058 |
int data_reg, data_reg2;
|
|
1059 |
int r0, r1, mem_index, s_bits, bswap;
|
547 |
1060 |
#if defined(CONFIG_SOFTMMU)
|
548 |
|
uint32_t *label3_ptr;
|
549 |
|
#endif
|
550 |
|
int addr_reg2;
|
|
1061 |
tcg_target_long offset;
|
|
1062 |
int lab1, lab2, argreg;
|
551 |
1063 |
#endif
|
552 |
1064 |
|
553 |
1065 |
data_reg = *args++;
|
554 |
|
if (opc == 3)
|
555 |
|
data_reg2 = *args++;
|
556 |
|
else
|
557 |
|
data_reg2 = 0; /* suppress warning */
|
|
1066 |
data_reg2 = (opc == 3 ? *args++ : 0);
|
558 |
1067 |
addr_reg = *args++;
|
559 |
|
#if TARGET_LONG_BITS == 64
|
560 |
|
addr_reg2 = *args++;
|
561 |
|
#endif
|
|
1068 |
addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
|
562 |
1069 |
mem_index = *args;
|
563 |
|
|
564 |
1070 |
s_bits = opc;
|
565 |
1071 |
|
566 |
1072 |
r0 = TCG_REG_R26;
|
567 |
1073 |
r1 = TCG_REG_R25;
|
568 |
1074 |
|
569 |
1075 |
#if defined(CONFIG_SOFTMMU)
|
570 |
|
tcg_out_mov(s, r1, addr_reg);
|
571 |
|
|
572 |
|
tcg_out_mov(s, r0, addr_reg);
|
573 |
|
|
574 |
|
tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
|
575 |
|
INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
|
576 |
|
|
577 |
|
tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
|
578 |
|
ARITH_AND);
|
579 |
|
|
580 |
|
tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
|
581 |
|
ARITH_AND);
|
|
1076 |
lab1 = gen_new_label();
|
|
1077 |
lab2 = gen_new_label();
|
582 |
1078 |
|
583 |
|
tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
|
584 |
|
tcg_out_arithi(s, r1, r1,
|
585 |
|
offsetof(CPUState, tlb_table[mem_index][0].addr_write),
|
586 |
|
ARITH_ADD);
|
|
1079 |
offset = tcg_out_tlb_read(s, r0, r1, addr_reg, addr_reg2, s_bits, lab1,
|
|
1080 |
offsetof(CPUState,
|
|
1081 |
tlb_table[mem_index][0].addr_write));
|
587 |
1082 |
|
588 |
|
tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
|
|
1083 |
/* TLB Hit. */
|
|
1084 |
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : r1),
|
|
1085 |
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
|
589 |
1086 |
|
590 |
|
#if TARGET_LONG_BITS == 32
|
591 |
|
/* if equal, jump to label1 */
|
592 |
|
label1_ptr = (uint32_t *)s->code_ptr;
|
593 |
|
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
|
594 |
|
INSN_COND(COND_EQUAL));
|
595 |
|
tcg_out_mov(s, r0, addr_reg); /* delay slot */
|
|
1087 |
tcg_out_arith(s, r0, addr_reg, TCG_REG_R20, INSN_ADDL);
|
596 |
1088 |
#else
|
597 |
|
/* if not equal, jump to label3 */
|
598 |
|
label3_ptr = (uint32_t *)s->code_ptr;
|
599 |
|
tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
|
600 |
|
INSN_COND(COND_EQUAL));
|
601 |
|
tcg_out_mov(s, r0, addr_reg); /* delay slot */
|
602 |
|
|
603 |
|
tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
|
604 |
|
|
605 |
|
/* if equal, jump to label1 */
|
606 |
|
label1_ptr = (uint32_t *)s->code_ptr;
|
607 |
|
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
|
608 |
|
INSN_COND(COND_EQUAL));
|
609 |
|
tcg_out_nop(s); /* delay slot */
|
610 |
|
|
611 |
|
/* label3: */
|
612 |
|
*label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
|
613 |
|
#endif
|
614 |
|
|
615 |
|
tcg_out_mov(s, TCG_REG_R26, addr_reg);
|
616 |
|
#if TARGET_LONG_BITS == 64
|
617 |
|
tcg_out_mov(s, TCG_REG_R25, addr_reg2);
|
618 |
|
if (opc == 3) {
|
619 |
|
tcg_abort();
|
620 |
|
tcg_out_mov(s, TCG_REG_R24, data_reg);
|
621 |
|
tcg_out_mov(s, TCG_REG_R23, data_reg2);
|
622 |
|
/* TODO: push mem_index */
|
623 |
|
tcg_abort();
|
|
1089 |
/* There are no indexed stores, so if GUEST_BASE is set
|
|
1090 |
we must do the add explicitly. Careful to avoid R20,
|
|
1091 |
which is used for the bswaps to follow. */
|
|
1092 |
if (GUEST_BASE == 0) {
|
|
1093 |
r0 = addr_reg;
|
624 |
1094 |
} else {
|
625 |
|
switch(opc) {
|
626 |
|
case 0:
|
627 |
|
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
|
628 |
|
INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
|
629 |
|
break;
|
630 |
|
case 1:
|
631 |
|
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
|
632 |
|
INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
|
633 |
|
break;
|
634 |
|
case 2:
|
635 |
|
tcg_out_mov(s, TCG_REG_R24, data_reg);
|
636 |
|
break;
|
637 |
|
}
|
638 |
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
|
|
1095 |
tcg_out_arith(s, TCG_REG_R31, addr_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
|
|
1096 |
r0 = TCG_REG_R31;
|
639 |
1097 |
}
|
640 |
|
#else
|
641 |
|
if (opc == 3) {
|
642 |
|
tcg_abort();
|
643 |
|
tcg_out_mov(s, TCG_REG_R25, data_reg);
|
644 |
|
tcg_out_mov(s, TCG_REG_R24, data_reg2);
|
645 |
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
|
646 |
|
} else {
|
647 |
|
switch(opc) {
|
648 |
|
case 0:
|
649 |
|
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
|
650 |
|
INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
|
651 |
|
break;
|
652 |
|
case 1:
|
653 |
|
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
|
654 |
|
INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
|
655 |
|
break;
|
656 |
|
case 2:
|
657 |
|
tcg_out_mov(s, TCG_REG_R25, data_reg);
|
658 |
|
break;
|
659 |
|
}
|
660 |
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
|
661 |
|
}
|
662 |
|
#endif
|
663 |
|
tcg_out_call(s, qemu_st_helpers[s_bits]);
|
664 |
|
|
665 |
|
/* jump to label2 */
|
666 |
|
label2_ptr = (uint32_t *)s->code_ptr;
|
667 |
|
tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
|
668 |
|
|
669 |
|
/* label1: */
|
670 |
|
*label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
|
671 |
|
|
672 |
|
tcg_out_arithi(s, TCG_REG_R20, r1,
|
673 |
|
offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_write),
|
674 |
|
ARITH_ADD);
|
675 |
|
tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
|
676 |
|
tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
|
677 |
|
#else
|
678 |
|
r0 = addr_reg;
|
679 |
1098 |
#endif
|
680 |
1099 |
|
681 |
1100 |
#ifdef TARGET_WORDS_BIGENDIAN
|
... | ... | |
685 |
1104 |
#endif
|
686 |
1105 |
switch (opc) {
|
687 |
1106 |
case 0:
|
688 |
|
tcg_out_ldst(s, data_reg, r0, 0, STB);
|
|
1107 |
tcg_out_ldst(s, data_reg, r0, 0, INSN_STB);
|
689 |
1108 |
break;
|
690 |
1109 |
case 1:
|
691 |
1110 |
if (bswap) {
|
692 |
|
tcg_out_bswap16(s, TCG_REG_R20, data_reg);
|
|
1111 |
tcg_out_bswap16(s, TCG_REG_R20, data_reg, 0);
|
693 |
1112 |
data_reg = TCG_REG_R20;
|
694 |
1113 |
}
|
695 |
|
tcg_out_ldst(s, data_reg, r0, 0, STH);
|
|
1114 |
tcg_out_ldst(s, data_reg, r0, 0, INSN_STH);
|
696 |
1115 |
break;
|
697 |
1116 |
case 2:
|