Revision 8984bd2e
b/target-arm/exec.h | ||
---|---|---|
61 | 61 |
|
62 | 62 |
/* In op_helper.c */ |
63 | 63 |
|
64 |
void helper_set_cp(CPUState *, uint32_t, uint32_t); |
|
65 |
uint32_t helper_get_cp(CPUState *, uint32_t); |
|
66 |
void helper_set_cp15(CPUState *, uint32_t, uint32_t); |
|
67 |
uint32_t helper_get_cp15(CPUState *, uint32_t); |
|
68 |
uint32_t helper_v7m_mrs(CPUState *env, int reg); |
|
69 |
void helper_v7m_msr(CPUState *env, int reg, uint32_t val); |
|
70 |
|
|
71 | 64 |
void helper_mark_exclusive(CPUARMState *, uint32_t addr); |
72 | 65 |
int helper_test_exclusive(CPUARMState *, uint32_t addr); |
73 | 66 |
void helper_clrex(CPUARMState *env); |
b/target-arm/helper.c | ||
---|---|---|
470 | 470 |
} |
471 | 471 |
|
472 | 472 |
/* These should probably raise undefined insn exceptions. */ |
473 |
void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
|
|
473 |
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
|
|
474 | 474 |
{ |
475 | 475 |
int op1 = (insn >> 8) & 0xf; |
476 | 476 |
cpu_abort(env, "cp%i insn %08x\n", op1, insn); |
477 | 477 |
return; |
478 | 478 |
} |
479 | 479 |
|
480 |
uint32_t helper_get_cp(CPUState *env, uint32_t insn)
|
|
480 |
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
|
|
481 | 481 |
{ |
482 | 482 |
int op1 = (insn >> 8) & 0xf; |
483 | 483 |
cpu_abort(env, "cp%i insn %08x\n", op1, insn); |
484 | 484 |
return 0; |
485 | 485 |
} |
486 | 486 |
|
487 |
void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
|
|
487 |
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
|
|
488 | 488 |
{ |
489 | 489 |
cpu_abort(env, "cp15 insn %08x\n", insn); |
490 | 490 |
} |
491 | 491 |
|
492 |
uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
|
|
492 |
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
|
|
493 | 493 |
{ |
494 | 494 |
cpu_abort(env, "cp15 insn %08x\n", insn); |
495 | 495 |
return 0; |
496 | 496 |
} |
497 | 497 |
|
498 | 498 |
/* These should probably raise undefined insn exceptions. */ |
499 |
void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
|
|
499 |
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
|
|
500 | 500 |
{ |
501 | 501 |
cpu_abort(env, "v7m_mrs %d\n", reg); |
502 | 502 |
} |
503 | 503 |
|
504 |
uint32_t helper_v7m_mrs(CPUState *env, int reg)
|
|
504 |
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
|
|
505 | 505 |
{ |
506 | 506 |
cpu_abort(env, "v7m_mrs %d\n", reg); |
507 | 507 |
return 0; |
... | ... | |
1191 | 1191 |
env->mmon_addr = -1; |
1192 | 1192 |
} |
1193 | 1193 |
|
1194 |
void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
|
|
1194 |
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
|
|
1195 | 1195 |
{ |
1196 | 1196 |
int cp_num = (insn >> 8) & 0xf; |
1197 | 1197 |
int cp_info = (insn >> 5) & 7; |
... | ... | |
1203 | 1203 |
cp_info, src, operand, val); |
1204 | 1204 |
} |
1205 | 1205 |
|
1206 |
uint32_t helper_get_cp(CPUState *env, uint32_t insn)
|
|
1206 |
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
|
|
1207 | 1207 |
{ |
1208 | 1208 |
int cp_num = (insn >> 8) & 0xf; |
1209 | 1209 |
int cp_info = (insn >> 5) & 7; |
... | ... | |
1246 | 1246 |
return ret; |
1247 | 1247 |
} |
1248 | 1248 |
|
1249 |
void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
|
|
1249 |
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
|
|
1250 | 1250 |
{ |
1251 | 1251 |
int op1; |
1252 | 1252 |
int op2; |
... | ... | |
1530 | 1530 |
(insn >> 16) & 0xf, crm, op1, op2); |
1531 | 1531 |
} |
1532 | 1532 |
|
1533 |
uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
|
|
1533 |
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
|
|
1534 | 1534 |
{ |
1535 | 1535 |
int op1; |
1536 | 1536 |
int op2; |
... | ... | |
1803 | 1803 |
return env->banked_r13[bank_number(mode)]; |
1804 | 1804 |
} |
1805 | 1805 |
|
1806 |
uint32_t helper_v7m_mrs(CPUState *env, int reg)
|
|
1806 |
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
|
|
1807 | 1807 |
{ |
1808 | 1808 |
switch (reg) { |
1809 | 1809 |
case 0: /* APSR */ |
... | ... | |
1840 | 1840 |
} |
1841 | 1841 |
} |
1842 | 1842 |
|
1843 |
void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
|
|
1843 |
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
|
|
1844 | 1844 |
{ |
1845 | 1845 |
switch (reg) { |
1846 | 1846 |
case 0: /* APSR */ |
b/target-arm/helpers.h | ||
---|---|---|
118 | 118 |
DEF_HELPER_0_2(cpsr_write, void, (uint32_t, uint32_t)) |
119 | 119 |
DEF_HELPER_1_0(cpsr_read, uint32_t, (void)) |
120 | 120 |
|
121 |
DEF_HELPER_0_3(v7m_msr, void, (CPUState *, uint32_t, uint32_t)) |
|
122 |
DEF_HELPER_1_2(v7m_mrs, uint32_t, (CPUState *, uint32_t)) |
|
123 |
|
|
124 |
DEF_HELPER_0_3(set_cp15, void, (CPUState *, uint32_t, uint32_t)) |
|
125 |
DEF_HELPER_1_2(get_cp15, uint32_t, (CPUState *, uint32_t)) |
|
126 |
|
|
127 |
DEF_HELPER_0_3(set_cp, void, (CPUState *, uint32_t, uint32_t)) |
|
128 |
DEF_HELPER_1_2(get_cp, uint32_t, (CPUState *, uint32_t)) |
|
129 |
|
|
121 | 130 |
DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t)) |
122 | 131 |
DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t)) |
123 | 132 |
|
... | ... | |
187 | 196 |
DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *)) |
188 | 197 |
DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *)) |
189 | 198 |
|
199 |
DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t)) |
|
200 |
DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t)) |
|
201 |
DEF_HELPER_1_2(sub_cc, uint32_t, (uint32_t, uint32_t)) |
|
202 |
DEF_HELPER_1_2(sbc_cc, uint32_t, (uint32_t, uint32_t)) |
|
203 |
|
|
204 |
DEF_HELPER_1_2(shl, uint32_t, (uint32_t, uint32_t)) |
|
205 |
DEF_HELPER_1_2(shr, uint32_t, (uint32_t, uint32_t)) |
|
206 |
DEF_HELPER_1_2(sar, uint32_t, (uint32_t, uint32_t)) |
|
207 |
DEF_HELPER_1_2(ror, uint32_t, (uint32_t, uint32_t)) |
|
208 |
DEF_HELPER_1_2(shl_cc, uint32_t, (uint32_t, uint32_t)) |
|
209 |
DEF_HELPER_1_2(shr_cc, uint32_t, (uint32_t, uint32_t)) |
|
210 |
DEF_HELPER_1_2(sar_cc, uint32_t, (uint32_t, uint32_t)) |
|
211 |
DEF_HELPER_1_2(ror_cc, uint32_t, (uint32_t, uint32_t)) |
|
212 |
|
|
190 | 213 |
#undef DEF_HELPER |
191 | 214 |
#undef DEF_HELPER_0_0 |
192 | 215 |
#undef DEF_HELPER_0_1 |
b/target-arm/op.c | ||
---|---|---|
20 | 20 |
*/ |
21 | 21 |
#include "exec.h" |
22 | 22 |
|
23 |
void OPPROTO op_addl_T0_T1_cc(void) |
|
24 |
{ |
|
25 |
unsigned int src1; |
|
26 |
src1 = T0; |
|
27 |
T0 += T1; |
|
28 |
env->NZF = T0; |
|
29 |
env->CF = T0 < src1; |
|
30 |
env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0); |
|
31 |
} |
|
32 |
|
|
33 |
void OPPROTO op_adcl_T0_T1_cc(void) |
|
34 |
{ |
|
35 |
unsigned int src1; |
|
36 |
src1 = T0; |
|
37 |
if (!env->CF) { |
|
38 |
T0 += T1; |
|
39 |
env->CF = T0 < src1; |
|
40 |
} else { |
|
41 |
T0 += T1 + 1; |
|
42 |
env->CF = T0 <= src1; |
|
43 |
} |
|
44 |
env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0); |
|
45 |
env->NZF = T0; |
|
46 |
FORCE_RET(); |
|
47 |
} |
|
48 |
|
|
49 |
#define OPSUB(sub, sbc, res, T0, T1) \ |
|
50 |
\ |
|
51 |
void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \ |
|
52 |
{ \ |
|
53 |
unsigned int src1; \ |
|
54 |
src1 = T0; \ |
|
55 |
T0 -= T1; \ |
|
56 |
env->NZF = T0; \ |
|
57 |
env->CF = src1 >= T1; \ |
|
58 |
env->VF = (src1 ^ T1) & (src1 ^ T0); \ |
|
59 |
res = T0; \ |
|
60 |
} \ |
|
61 |
\ |
|
62 |
void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \ |
|
63 |
{ \ |
|
64 |
unsigned int src1; \ |
|
65 |
src1 = T0; \ |
|
66 |
if (!env->CF) { \ |
|
67 |
T0 = T0 - T1 - 1; \ |
|
68 |
env->CF = src1 > T1; \ |
|
69 |
} else { \ |
|
70 |
T0 = T0 - T1; \ |
|
71 |
env->CF = src1 >= T1; \ |
|
72 |
} \ |
|
73 |
env->VF = (src1 ^ T1) & (src1 ^ T0); \ |
|
74 |
env->NZF = T0; \ |
|
75 |
res = T0; \ |
|
76 |
FORCE_RET(); \ |
|
77 |
} |
|
78 |
|
|
79 |
OPSUB(sub, sbc, T0, T0, T1) |
|
80 |
|
|
81 |
OPSUB(rsb, rsc, T0, T1, T0) |
|
82 |
|
|
83 | 23 |
/* memory access */ |
84 | 24 |
|
85 | 25 |
#define MEMSUFFIX _raw |
... | ... | |
92 | 32 |
#include "op_mem.h" |
93 | 33 |
#endif |
94 | 34 |
|
95 |
void OPPROTO op_clrex(void) |
|
96 |
{ |
|
97 |
cpu_lock(); |
|
98 |
helper_clrex(env); |
|
99 |
cpu_unlock(); |
|
100 |
} |
|
101 |
|
|
102 |
/* T1 based, use T0 as shift count */ |
|
103 |
|
|
104 |
void OPPROTO op_shll_T1_T0(void) |
|
105 |
{ |
|
106 |
int shift; |
|
107 |
shift = T0 & 0xff; |
|
108 |
if (shift >= 32) |
|
109 |
T1 = 0; |
|
110 |
else |
|
111 |
T1 = T1 << shift; |
|
112 |
FORCE_RET(); |
|
113 |
} |
|
114 |
|
|
115 |
void OPPROTO op_shrl_T1_T0(void) |
|
116 |
{ |
|
117 |
int shift; |
|
118 |
shift = T0 & 0xff; |
|
119 |
if (shift >= 32) |
|
120 |
T1 = 0; |
|
121 |
else |
|
122 |
T1 = (uint32_t)T1 >> shift; |
|
123 |
FORCE_RET(); |
|
124 |
} |
|
125 |
|
|
126 |
void OPPROTO op_sarl_T1_T0(void) |
|
127 |
{ |
|
128 |
int shift; |
|
129 |
shift = T0 & 0xff; |
|
130 |
if (shift >= 32) |
|
131 |
shift = 31; |
|
132 |
T1 = (int32_t)T1 >> shift; |
|
133 |
} |
|
134 |
|
|
135 |
void OPPROTO op_rorl_T1_T0(void) |
|
136 |
{ |
|
137 |
int shift; |
|
138 |
shift = T0 & 0x1f; |
|
139 |
if (shift) { |
|
140 |
T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); |
|
141 |
} |
|
142 |
FORCE_RET(); |
|
143 |
} |
|
144 |
|
|
145 |
/* T1 based, use T0 as shift count and compute CF */ |
|
146 |
|
|
147 |
void OPPROTO op_shll_T1_T0_cc(void) |
|
148 |
{ |
|
149 |
int shift; |
|
150 |
shift = T0 & 0xff; |
|
151 |
if (shift >= 32) { |
|
152 |
if (shift == 32) |
|
153 |
env->CF = T1 & 1; |
|
154 |
else |
|
155 |
env->CF = 0; |
|
156 |
T1 = 0; |
|
157 |
} else if (shift != 0) { |
|
158 |
env->CF = (T1 >> (32 - shift)) & 1; |
|
159 |
T1 = T1 << shift; |
|
160 |
} |
|
161 |
FORCE_RET(); |
|
162 |
} |
|
163 |
|
|
164 |
void OPPROTO op_shrl_T1_T0_cc(void) |
|
165 |
{ |
|
166 |
int shift; |
|
167 |
shift = T0 & 0xff; |
|
168 |
if (shift >= 32) { |
|
169 |
if (shift == 32) |
|
170 |
env->CF = (T1 >> 31) & 1; |
|
171 |
else |
|
172 |
env->CF = 0; |
|
173 |
T1 = 0; |
|
174 |
} else if (shift != 0) { |
|
175 |
env->CF = (T1 >> (shift - 1)) & 1; |
|
176 |
T1 = (uint32_t)T1 >> shift; |
|
177 |
} |
|
178 |
FORCE_RET(); |
|
179 |
} |
|
180 |
|
|
181 |
void OPPROTO op_sarl_T1_T0_cc(void) |
|
182 |
{ |
|
183 |
int shift; |
|
184 |
shift = T0 & 0xff; |
|
185 |
if (shift >= 32) { |
|
186 |
env->CF = (T1 >> 31) & 1; |
|
187 |
T1 = (int32_t)T1 >> 31; |
|
188 |
} else if (shift != 0) { |
|
189 |
env->CF = (T1 >> (shift - 1)) & 1; |
|
190 |
T1 = (int32_t)T1 >> shift; |
|
191 |
} |
|
192 |
FORCE_RET(); |
|
193 |
} |
|
194 |
|
|
195 |
void OPPROTO op_rorl_T1_T0_cc(void) |
|
196 |
{ |
|
197 |
int shift1, shift; |
|
198 |
shift1 = T0 & 0xff; |
|
199 |
shift = shift1 & 0x1f; |
|
200 |
if (shift == 0) { |
|
201 |
if (shift1 != 0) |
|
202 |
env->CF = (T1 >> 31) & 1; |
|
203 |
} else { |
|
204 |
env->CF = (T1 >> (shift - 1)) & 1; |
|
205 |
T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); |
|
206 |
} |
|
207 |
FORCE_RET(); |
|
208 |
} |
|
209 |
|
|
210 |
void OPPROTO op_movl_cp_T0(void) |
|
211 |
{ |
|
212 |
helper_set_cp(env, PARAM1, T0); |
|
213 |
FORCE_RET(); |
|
214 |
} |
|
215 |
|
|
216 |
void OPPROTO op_movl_T0_cp(void) |
|
217 |
{ |
|
218 |
T0 = helper_get_cp(env, PARAM1); |
|
219 |
FORCE_RET(); |
|
220 |
} |
|
221 |
|
|
222 |
void OPPROTO op_movl_cp15_T0(void) |
|
223 |
{ |
|
224 |
helper_set_cp15(env, PARAM1, T0); |
|
225 |
FORCE_RET(); |
|
226 |
} |
|
227 |
|
|
228 |
void OPPROTO op_movl_T0_cp15(void) |
|
229 |
{ |
|
230 |
T0 = helper_get_cp15(env, PARAM1); |
|
231 |
FORCE_RET(); |
|
232 |
} |
|
233 |
|
|
234 |
void OPPROTO op_v7m_mrs_T0(void) |
|
235 |
{ |
|
236 |
T0 = helper_v7m_mrs(env, PARAM1); |
|
237 |
} |
|
238 |
|
|
239 |
void OPPROTO op_v7m_msr_T0(void) |
|
240 |
{ |
|
241 |
helper_v7m_msr(env, PARAM1, T0); |
|
242 |
} |
|
243 |
|
|
244 |
void OPPROTO op_movl_T0_sp(void) |
|
245 |
{ |
|
246 |
if (PARAM1 == env->v7m.current_sp) |
|
247 |
T0 = env->regs[13]; |
|
248 |
else |
|
249 |
T0 = env->v7m.other_sp; |
|
250 |
FORCE_RET(); |
|
251 |
} |
|
252 |
|
|
253 | 35 |
#include "op_neon.h" |
254 | 36 |
|
255 | 37 |
/* iwMMXt support */ |
b/target-arm/op_helper.c | ||
---|---|---|
304 | 304 |
} |
305 | 305 |
} |
306 | 306 |
|
307 |
/* ??? Flag setting arithmetic is awkward because we need to do comparisons. |
|
308 |
The only way to do that in TCG is a conditional branch, which clobbers |
|
309 |
all our temporaries. For now implement these as helper functions. */ |
|
310 |
|
|
311 |
uint32_t HELPER (add_cc)(uint32_t a, uint32_t b) |
|
312 |
{ |
|
313 |
uint32_t result; |
|
314 |
result = T0 + T1; |
|
315 |
env->NZF = result; |
|
316 |
env->CF = result < a; |
|
317 |
env->VF = (a ^ b ^ -1) & (a ^ result); |
|
318 |
return result; |
|
319 |
} |
|
320 |
|
|
321 |
uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b) |
|
322 |
{ |
|
323 |
uint32_t result; |
|
324 |
if (!env->CF) { |
|
325 |
result = a + b; |
|
326 |
env->CF = result < a; |
|
327 |
} else { |
|
328 |
result = a + b + 1; |
|
329 |
env->CF = result <= a; |
|
330 |
} |
|
331 |
env->VF = (a ^ b ^ -1) & (a ^ result); |
|
332 |
env->NZF = result; |
|
333 |
return result; |
|
334 |
} |
|
335 |
|
|
336 |
uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b) |
|
337 |
{ |
|
338 |
uint32_t result; |
|
339 |
result = a - b; |
|
340 |
env->NZF = result; |
|
341 |
env->CF = a >= b; |
|
342 |
env->VF = (a ^ b) & (a ^ result); |
|
343 |
return result; |
|
344 |
} |
|
345 |
|
|
346 |
uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b) |
|
347 |
{ |
|
348 |
uint32_t result; |
|
349 |
if (!env->CF) { |
|
350 |
result = a - b - 1; |
|
351 |
env->CF = a > b; |
|
352 |
} else { |
|
353 |
result = a - b; |
|
354 |
env->CF = a >= b; |
|
355 |
} |
|
356 |
env->VF = (a ^ b) & (a ^ result); |
|
357 |
env->NZF = result; |
|
358 |
return result; |
|
359 |
} |
|
360 |
|
|
361 |
/* Similarly for variable shift instructions. */ |
|
362 |
|
|
363 |
uint32_t HELPER(shl)(uint32_t x, uint32_t i) |
|
364 |
{ |
|
365 |
int shift = i & 0xff; |
|
366 |
if (shift >= 32) |
|
367 |
return 0; |
|
368 |
return x << shift; |
|
369 |
} |
|
370 |
|
|
371 |
uint32_t HELPER(shr)(uint32_t x, uint32_t i) |
|
372 |
{ |
|
373 |
int shift = i & 0xff; |
|
374 |
if (shift >= 32) |
|
375 |
return 0; |
|
376 |
return (uint32_t)x >> shift; |
|
377 |
} |
|
378 |
|
|
379 |
uint32_t HELPER(sar)(uint32_t x, uint32_t i) |
|
380 |
{ |
|
381 |
int shift = i & 0xff; |
|
382 |
if (shift >= 32) |
|
383 |
shift = 31; |
|
384 |
return (int32_t)x >> shift; |
|
385 |
} |
|
386 |
|
|
387 |
uint32_t HELPER(ror)(uint32_t x, uint32_t i) |
|
388 |
{ |
|
389 |
int shift = i & 0xff; |
|
390 |
if (shift == 0) |
|
391 |
return x; |
|
392 |
return (x >> shift) | (x << (32 - shift)); |
|
393 |
} |
|
394 |
|
|
395 |
uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i) |
|
396 |
{ |
|
397 |
int shift = i & 0xff; |
|
398 |
if (shift >= 32) { |
|
399 |
if (shift == 32) |
|
400 |
env->CF = x & 1; |
|
401 |
else |
|
402 |
env->CF = 0; |
|
403 |
return 0; |
|
404 |
} else if (shift != 0) { |
|
405 |
env->CF = (x >> (32 - shift)) & 1; |
|
406 |
return x << shift; |
|
407 |
} |
|
408 |
return x; |
|
409 |
} |
|
410 |
|
|
411 |
uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i) |
|
412 |
{ |
|
413 |
int shift = i & 0xff; |
|
414 |
if (shift >= 32) { |
|
415 |
if (shift == 32) |
|
416 |
env->CF = (x >> 31) & 1; |
|
417 |
else |
|
418 |
env->CF = 0; |
|
419 |
return 0; |
|
420 |
} else if (shift != 0) { |
|
421 |
env->CF = (x >> (shift - 1)) & 1; |
|
422 |
return x >> shift; |
|
423 |
} |
|
424 |
return x; |
|
425 |
} |
|
426 |
|
|
427 |
uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i) |
|
428 |
{ |
|
429 |
int shift = i & 0xff; |
|
430 |
if (shift >= 32) { |
|
431 |
env->CF = (x >> 31) & 1; |
|
432 |
return (int32_t)x >> 31; |
|
433 |
} else if (shift != 0) { |
|
434 |
env->CF = (x >> (shift - 1)) & 1; |
|
435 |
return (int32_t)x >> shift; |
|
436 |
} |
|
437 |
return x; |
|
438 |
} |
|
439 |
|
|
440 |
uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i) |
|
441 |
{ |
|
442 |
int shift1, shift; |
|
443 |
shift1 = i & 0xff; |
|
444 |
shift = shift1 & 0x1f; |
|
445 |
if (shift == 0) { |
|
446 |
if (shift1 != 0) |
|
447 |
env->CF = (x >> 31) & 1; |
|
448 |
return x; |
|
449 |
} else { |
|
450 |
env->CF = (x >> (shift - 1)) & 1; |
|
451 |
return ((uint32_t)x >> shift) | (x << (32 - shift)); |
|
452 |
} |
|
453 |
} |
|
454 |
|
b/target-arm/op_mem.h | ||
---|---|---|
1 | 1 |
/* ARM memory operations. */ |
2 | 2 |
|
3 |
/* Swap T0 with memory at address T1. */ |
|
4 |
/* ??? Is this exception safe? */ |
|
5 |
#define MEM_SWP_OP(name, lname) \ |
|
6 |
void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \ |
|
7 |
{ \ |
|
8 |
uint32_t tmp; \ |
|
9 |
cpu_lock(); \ |
|
10 |
tmp = glue(ld##lname,MEMSUFFIX)(T1); \ |
|
11 |
glue(st##name,MEMSUFFIX)(T1, T0); \ |
|
12 |
T0 = tmp; \ |
|
13 |
cpu_unlock(); \ |
|
14 |
FORCE_RET(); \ |
|
15 |
} |
|
16 |
|
|
17 |
MEM_SWP_OP(b, ub) |
|
18 |
MEM_SWP_OP(l, l) |
|
19 |
|
|
20 |
#undef MEM_SWP_OP |
|
21 |
|
|
22 | 3 |
/* Load-locked, store exclusive. */ |
23 | 4 |
#define EXCLUSIVE_OP(suffix, ldsuffix) \ |
24 | 5 |
void OPPROTO glue(op_ld##suffix##ex,MEMSUFFIX)(void) \ |
b/target-arm/translate.c | ||
---|---|---|
201 | 201 |
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
202 | 202 |
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) |
203 | 203 |
|
204 |
#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
205 |
#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
206 |
#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
207 |
#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
208 |
#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0]) |
|
209 |
#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0]) |
|
210 |
|
|
204 | 211 |
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
205 | 212 |
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
206 | 213 |
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
... | ... | |
538 | 545 |
} |
539 | 546 |
}; |
540 | 547 |
|
548 |
static inline void gen_arm_shift_reg(TCGv var, int shiftop, |
|
549 |
TCGv shift, int flags) |
|
550 |
{ |
|
551 |
if (flags) { |
|
552 |
switch (shiftop) { |
|
553 |
case 0: gen_helper_shl_cc(var, var, shift); break; |
|
554 |
case 1: gen_helper_shr_cc(var, var, shift); break; |
|
555 |
case 2: gen_helper_sar_cc(var, var, shift); break; |
|
556 |
case 3: gen_helper_ror_cc(var, var, shift); break; |
|
557 |
} |
|
558 |
} else { |
|
559 |
switch (shiftop) { |
|
560 |
case 0: gen_helper_shl(var, var, shift); break; |
|
561 |
case 1: gen_helper_shr(var, var, shift); break; |
|
562 |
case 2: gen_helper_sar(var, var, shift); break; |
|
563 |
case 3: gen_helper_ror(var, var, shift); break; |
|
564 |
} |
|
565 |
} |
|
566 |
dead_tmp(shift); |
|
567 |
} |
|
568 |
|
|
541 | 569 |
#define PAS_OP(pfx) \ |
542 | 570 |
switch (op2) { \ |
543 | 571 |
case 0: gen_pas_helper(glue(pfx,add16)); break; \ |
... | ... | |
746 | 774 |
1, /* mvn */ |
747 | 775 |
}; |
748 | 776 |
|
749 |
static GenOpFunc *gen_shift_T1_T0[4] = { |
|
750 |
gen_op_shll_T1_T0, |
|
751 |
gen_op_shrl_T1_T0, |
|
752 |
gen_op_sarl_T1_T0, |
|
753 |
gen_op_rorl_T1_T0, |
|
754 |
}; |
|
755 |
|
|
756 |
static GenOpFunc *gen_shift_T1_T0_cc[4] = { |
|
757 |
gen_op_shll_T1_T0_cc, |
|
758 |
gen_op_shrl_T1_T0_cc, |
|
759 |
gen_op_sarl_T1_T0_cc, |
|
760 |
gen_op_rorl_T1_T0_cc, |
|
761 |
}; |
|
762 |
|
|
763 | 777 |
/* Set PC and Thumb state from an immediate address. */ |
764 | 778 |
static inline void gen_bx_im(DisasContext *s, uint32_t addr) |
765 | 779 |
{ |
... | ... | |
2249 | 2263 |
instruction is not defined. */ |
2250 | 2264 |
static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn) |
2251 | 2265 |
{ |
2266 |
TCGv tmp; |
|
2252 | 2267 |
uint32_t rd = (insn >> 12) & 0xf; |
2253 | 2268 |
uint32_t cp = (insn >> 8) & 0xf; |
2254 | 2269 |
if (IS_USER(s)) { |
... | ... | |
2258 | 2273 |
if (insn & ARM_CP_RW_BIT) { |
2259 | 2274 |
if (!env->cp[cp].cp_read) |
2260 | 2275 |
return 1; |
2261 |
gen_op_movl_T0_im((uint32_t) s->pc);
|
|
2262 |
gen_set_pc_T0();
|
|
2263 |
gen_op_movl_T0_cp(insn);
|
|
2264 |
gen_movl_reg_T0(s, rd);
|
|
2276 |
gen_set_pc_im(s->pc);
|
|
2277 |
tmp = new_tmp();
|
|
2278 |
gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
|
|
2279 |
store_reg(s, rd, tmp);
|
|
2265 | 2280 |
} else { |
2266 | 2281 |
if (!env->cp[cp].cp_write) |
2267 | 2282 |
return 1; |
2268 |
gen_op_movl_T0_im((uint32_t) s->pc); |
|
2269 |
gen_set_pc_T0(); |
|
2270 |
gen_movl_T0_reg(s, rd); |
|
2271 |
gen_op_movl_cp_T0(insn); |
|
2283 |
gen_set_pc_im(s->pc); |
|
2284 |
tmp = load_reg(s, rd); |
|
2285 |
gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp); |
|
2272 | 2286 |
} |
2273 | 2287 |
return 0; |
2274 | 2288 |
} |
... | ... | |
2298 | 2312 |
static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn) |
2299 | 2313 |
{ |
2300 | 2314 |
uint32_t rd; |
2315 |
TCGv tmp; |
|
2301 | 2316 |
|
2302 | 2317 |
/* M profile cores use memory mapped registers instead of cp15. */ |
2303 | 2318 |
if (arm_feature(env, ARM_FEATURE_M)) |
... | ... | |
2321 | 2336 |
if ((insn & 0x0fff0fff) == 0x0e070f90 |
2322 | 2337 |
|| (insn & 0x0fff0fff) == 0x0e070f58) { |
2323 | 2338 |
/* Wait for interrupt. */ |
2324 |
gen_op_movl_T0_im((long)s->pc); |
|
2325 |
gen_set_pc_T0(); |
|
2339 |
gen_set_pc_im(s->pc); |
|
2326 | 2340 |
s->is_jmp = DISAS_WFI; |
2327 | 2341 |
return 0; |
2328 | 2342 |
} |
2329 | 2343 |
rd = (insn >> 12) & 0xf; |
2330 | 2344 |
if (insn & ARM_CP_RW_BIT) { |
2331 |
gen_op_movl_T0_cp15(insn); |
|
2345 |
tmp = new_tmp(); |
|
2346 |
gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn)); |
|
2332 | 2347 |
/* If the destination register is r15 then sets condition codes. */ |
2333 | 2348 |
if (rd != 15) |
2334 |
gen_movl_reg_T0(s, rd); |
|
2349 |
store_reg(s, rd, tmp); |
|
2350 |
else |
|
2351 |
dead_tmp(tmp); |
|
2335 | 2352 |
} else { |
2336 |
gen_movl_T0_reg(s, rd); |
|
2337 |
gen_op_movl_cp15_T0(insn); |
|
2353 |
tmp = load_reg(s, rd); |
|
2354 |
gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp); |
|
2355 |
dead_tmp(tmp); |
|
2338 | 2356 |
/* Normally we would always end the TB here, but Linux |
2339 | 2357 |
* arch/arm/mach-pxa/sleep.S expects two instructions following |
2340 | 2358 |
* an MMU enable to execute from cache. Imitate this behaviour. */ |
... | ... | |
3052 | 3070 |
tb = s->tb; |
3053 | 3071 |
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
3054 | 3072 |
tcg_gen_goto_tb(n); |
3055 |
gen_op_movl_T0_im(dest); |
|
3056 |
gen_set_pc_T0(); |
|
3073 |
gen_set_pc_im(dest); |
|
3057 | 3074 |
tcg_gen_exit_tb((long)tb + n); |
3058 | 3075 |
} else { |
3059 |
gen_op_movl_T0_im(dest); |
|
3060 |
gen_set_pc_T0(); |
|
3076 |
gen_set_pc_im(dest); |
|
3061 | 3077 |
tcg_gen_exit_tb(0); |
3062 | 3078 |
} |
3063 | 3079 |
} |
... | ... | |
3173 | 3189 |
{ |
3174 | 3190 |
switch (val) { |
3175 | 3191 |
case 3: /* wfi */ |
3176 |
gen_op_movl_T0_im((long)s->pc); |
|
3177 |
gen_set_pc_T0(); |
|
3192 |
gen_set_pc_im(s->pc); |
|
3178 | 3193 |
s->is_jmp = DISAS_WFI; |
3179 | 3194 |
break; |
3180 | 3195 |
case 2: /* wfe */ |
... | ... | |
5770 | 5785 |
gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc); |
5771 | 5786 |
} else { |
5772 | 5787 |
rs = (insn >> 8) & 0xf; |
5773 |
gen_movl_T0_reg(s, rs); |
|
5774 |
if (logic_cc) { |
|
5775 |
gen_shift_T1_T0_cc[shiftop](); |
|
5776 |
} else { |
|
5777 |
gen_shift_T1_T0[shiftop](); |
|
5778 |
} |
|
5788 |
tmp = load_reg(s, rs); |
|
5789 |
gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc); |
|
5779 | 5790 |
} |
5780 | 5791 |
} |
5781 | 5792 |
if (op1 != 0x0f && op1 != 0x0d) { |
... | ... | |
5977 | 5988 |
/* SWP instruction */ |
5978 | 5989 |
rm = (insn) & 0xf; |
5979 | 5990 |
|
5980 |
gen_movl_T0_reg(s, rm); |
|
5981 |
gen_movl_T1_reg(s, rn); |
|
5991 |
/* ??? This is not really atomic. However we know |
|
5992 |
we never have multiple CPUs running in parallel, |
|
5993 |
so it is good enough. */ |
|
5994 |
addr = load_reg(s, rn); |
|
5995 |
tmp = load_reg(s, rm); |
|
5982 | 5996 |
if (insn & (1 << 22)) { |
5983 |
gen_ldst(swpb, s); |
|
5997 |
tmp2 = gen_ld8u(addr, IS_USER(s)); |
|
5998 |
gen_st8(tmp, addr, IS_USER(s)); |
|
5984 | 5999 |
} else { |
5985 |
gen_ldst(swpl, s); |
|
6000 |
tmp2 = gen_ld32(addr, IS_USER(s)); |
|
6001 |
gen_st32(tmp, addr, IS_USER(s)); |
|
5986 | 6002 |
} |
5987 |
gen_movl_reg_T0(s, rd); |
|
6003 |
dead_tmp(addr); |
|
6004 |
store_reg(s, rd, tmp2); |
|
5988 | 6005 |
} |
5989 | 6006 |
} |
5990 | 6007 |
} else { |
... | ... | |
6903 | 6920 |
goto illegal_op; |
6904 | 6921 |
switch (op) { |
6905 | 6922 |
case 0: /* Register controlled shift. */ |
6906 |
gen_movl_T0_reg(s, rm);
|
|
6907 |
gen_movl_T1_reg(s, rn);
|
|
6923 |
tmp = load_reg(s, rn);
|
|
6924 |
tmp2 = load_reg(s, rm);
|
|
6908 | 6925 |
if ((insn & 0x70) != 0) |
6909 | 6926 |
goto illegal_op; |
6910 | 6927 |
op = (insn >> 21) & 3; |
6911 |
if (insn & (1 << 20)) { |
|
6912 |
gen_shift_T1_T0_cc[op](); |
|
6913 |
gen_op_logic_T1_cc(); |
|
6914 |
} else { |
|
6915 |
gen_shift_T1_T0[op](); |
|
6916 |
} |
|
6917 |
gen_movl_reg_T1(s, rd); |
|
6928 |
logic_cc = (insn & (1 << 20)) != 0; |
|
6929 |
gen_arm_shift_reg(tmp, op, tmp2, logic_cc); |
|
6930 |
if (logic_cc) |
|
6931 |
gen_logic_CC(tmp); |
|
6932 |
store_reg(s, rd, tmp); |
|
6918 | 6933 |
break; |
6919 | 6934 |
case 1: /* Sign/zero extend. */ |
6920 | 6935 |
tmp = load_reg(s, rm); |
... | ... | |
7208 | 7223 |
switch (op) { |
7209 | 7224 |
case 0: /* msr cpsr. */ |
7210 | 7225 |
if (IS_M(env)) { |
7211 |
gen_op_v7m_msr_T0(insn & 0xff); |
|
7212 |
gen_movl_reg_T0(s, rn); |
|
7226 |
tmp = load_reg(s, rn); |
|
7227 |
addr = tcg_const_i32(insn & 0xff); |
|
7228 |
gen_helper_v7m_msr(cpu_env, addr, tmp); |
|
7213 | 7229 |
gen_lookup_tb(s); |
7214 | 7230 |
break; |
7215 | 7231 |
} |
... | ... | |
7276 | 7292 |
/* Unpredictable in user mode. */ |
7277 | 7293 |
goto illegal_op; |
7278 | 7294 |
case 6: /* mrs cpsr. */ |
7295 |
tmp = new_tmp(); |
|
7279 | 7296 |
if (IS_M(env)) { |
7280 |
gen_op_v7m_mrs_T0(insn & 0xff); |
|
7297 |
addr = tcg_const_i32(insn & 0xff); |
|
7298 |
gen_helper_v7m_mrs(tmp, cpu_env, addr); |
|
7281 | 7299 |
} else { |
7282 |
gen_helper_cpsr_read(cpu_T[0]);
|
|
7300 |
gen_helper_cpsr_read(tmp);
|
|
7283 | 7301 |
} |
7284 |
gen_movl_reg_T0(s, rd);
|
|
7302 |
store_reg(s, rd, tmp);
|
|
7285 | 7303 |
break; |
7286 | 7304 |
case 7: /* mrs spsr. */ |
7287 | 7305 |
/* Not accessible in user mode. */ |
... | ... | |
7753 | 7771 |
break; |
7754 | 7772 |
case 0x2: /* lsl */ |
7755 | 7773 |
if (s->condexec_mask) { |
7756 |
gen_op_shll_T1_T0();
|
|
7774 |
gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7757 | 7775 |
} else { |
7758 |
gen_op_shll_T1_T0_cc();
|
|
7776 |
gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7759 | 7777 |
gen_op_logic_T1_cc(); |
7760 | 7778 |
} |
7761 | 7779 |
break; |
7762 | 7780 |
case 0x3: /* lsr */ |
7763 | 7781 |
if (s->condexec_mask) { |
7764 |
gen_op_shrl_T1_T0();
|
|
7782 |
gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7765 | 7783 |
} else { |
7766 |
gen_op_shrl_T1_T0_cc();
|
|
7784 |
gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7767 | 7785 |
gen_op_logic_T1_cc(); |
7768 | 7786 |
} |
7769 | 7787 |
break; |
7770 | 7788 |
case 0x4: /* asr */ |
7771 | 7789 |
if (s->condexec_mask) { |
7772 |
gen_op_sarl_T1_T0();
|
|
7790 |
gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7773 | 7791 |
} else { |
7774 |
gen_op_sarl_T1_T0_cc();
|
|
7792 |
gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7775 | 7793 |
gen_op_logic_T1_cc(); |
7776 | 7794 |
} |
7777 | 7795 |
break; |
... | ... | |
7789 | 7807 |
break; |
7790 | 7808 |
case 0x7: /* ror */ |
7791 | 7809 |
if (s->condexec_mask) { |
7792 |
gen_op_rorl_T1_T0();
|
|
7810 |
gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7793 | 7811 |
} else { |
7794 |
gen_op_rorl_T1_T0_cc();
|
|
7812 |
gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7795 | 7813 |
gen_op_logic_T1_cc(); |
7796 | 7814 |
} |
7797 | 7815 |
break; |
... | ... | |
8118 | 8136 |
if (IS_USER(s)) |
8119 | 8137 |
break; |
8120 | 8138 |
if (IS_M(env)) { |
8121 |
val = (insn & (1 << 4)) != 0; |
|
8122 |
gen_op_movl_T0_im(val); |
|
8139 |
tmp = tcg_const_i32((insn & (1 << 4)) != 0); |
|
8123 | 8140 |
/* PRIMASK */ |
8124 |
if (insn & 1) |
|
8125 |
gen_op_v7m_msr_T0(16); |
|
8141 |
if (insn & 1) { |
|
8142 |
addr = tcg_const_i32(16); |
|
8143 |
gen_helper_v7m_msr(cpu_env, addr, tmp); |
|
8144 |
} |
|
8126 | 8145 |
/* FAULTMASK */ |
8127 |
if (insn & 2) |
|
8128 |
gen_op_v7m_msr_T0(17); |
|
8129 |
|
|
8146 |
if (insn & 2) { |
|
8147 |
addr = tcg_const_i32(17); |
|
8148 |
gen_helper_v7m_msr(cpu_env, addr, tmp); |
|
8149 |
} |
|
8130 | 8150 |
gen_lookup_tb(s); |
8131 | 8151 |
} else { |
8132 | 8152 |
if (insn & (1 << 4)) |
Also available in: Unified diff