Revision b26eefb6 target-arm/translate.c
b/target-arm/translate.c | ||
---|---|---|
29 | 29 |
#include "exec-all.h" |
30 | 30 |
#include "disas.h" |
31 | 31 |
#include "tcg-op.h" |
32 |
#include "helpers.h" |
|
32 | 33 |
|
33 | 34 |
#define ENABLE_ARCH_5J 0 |
34 | 35 |
#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6) |
... | ... | |
73 | 74 |
extern FILE *logfile; |
74 | 75 |
extern int loglevel; |
75 | 76 |
|
77 |
static TCGv cpu_env; |
|
78 |
/* FIXME: These should be removed. */ |
|
79 |
static TCGv cpu_T[3]; |
|
80 |
|
|
81 |
/* initialize TCG globals. */ |
|
82 |
void arm_translate_init(void) |
|
83 |
{ |
|
84 |
cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); |
|
85 |
|
|
86 |
cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0"); |
|
87 |
cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1"); |
|
88 |
cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2"); |
|
89 |
} |
|
90 |
|
|
91 |
/* The code generator doesn't like lots of temporaries, so maintain our own |
|
92 |
cache for reuse within a function. */ |
|
93 |
#define MAX_TEMPS 8 |
|
94 |
static int num_temps; |
|
95 |
static TCGv temps[MAX_TEMPS]; |
|
96 |
|
|
97 |
/* Allocate a temporary variable. */ |
|
98 |
static TCGv new_tmp(void) |
|
99 |
{ |
|
100 |
TCGv tmp; |
|
101 |
if (num_temps == MAX_TEMPS) |
|
102 |
abort(); |
|
103 |
|
|
104 |
if (GET_TCGV(temps[num_temps])) |
|
105 |
return temps[num_temps++]; |
|
106 |
|
|
107 |
tmp = tcg_temp_new(TCG_TYPE_I32); |
|
108 |
temps[num_temps++] = tmp; |
|
109 |
return tmp; |
|
110 |
} |
|
111 |
|
|
112 |
/* Release a temporary variable. */ |
|
113 |
static void dead_tmp(TCGv tmp) |
|
114 |
{ |
|
115 |
int i; |
|
116 |
num_temps--; |
|
117 |
i = num_temps; |
|
118 |
if (GET_TCGV(temps[i]) == GET_TCGV(tmp)) |
|
119 |
return; |
|
120 |
|
|
121 |
/* Shuffle this temp to the last slot. */ |
|
122 |
while (GET_TCGV(temps[i]) != GET_TCGV(tmp)) |
|
123 |
i--; |
|
124 |
while (i < num_temps) { |
|
125 |
temps[i] = temps[i + 1]; |
|
126 |
i++; |
|
127 |
} |
|
128 |
temps[i] = tmp; |
|
129 |
} |
|
130 |
|
|
131 |
/* Set a variable to the value of a CPU register. */ |
|
132 |
static void load_reg_var(DisasContext *s, TCGv var, int reg) |
|
133 |
{ |
|
134 |
if (reg == 15) { |
|
135 |
uint32_t addr; |
|
136 |
/* normaly, since we updated PC, we need only to add one insn */ |
|
137 |
if (s->thumb) |
|
138 |
addr = (long)s->pc + 2; |
|
139 |
else |
|
140 |
addr = (long)s->pc + 4; |
|
141 |
tcg_gen_movi_i32(var, addr); |
|
142 |
} else { |
|
143 |
tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg])); |
|
144 |
} |
|
145 |
} |
|
146 |
|
|
147 |
/* Create a new temporary and set it to the value of a CPU register. */ |
|
148 |
static inline TCGv load_reg(DisasContext *s, int reg) |
|
149 |
{ |
|
150 |
TCGv tmp = new_tmp(); |
|
151 |
load_reg_var(s, tmp, reg); |
|
152 |
return tmp; |
|
153 |
} |
|
154 |
|
|
155 |
/* Set a CPU register. The source must be a temporary and will be |
|
156 |
marked as dead. */ |
|
157 |
static void store_reg(DisasContext *s, int reg, TCGv var) |
|
158 |
{ |
|
159 |
if (reg == 15) { |
|
160 |
tcg_gen_andi_i32(var, var, ~1); |
|
161 |
s->is_jmp = DISAS_JUMP; |
|
162 |
} |
|
163 |
tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg])); |
|
164 |
dead_tmp(var); |
|
165 |
} |
|
166 |
|
|
167 |
|
|
168 |
/* Basic operations. */ |
|
169 |
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1]) |
|
170 |
#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2]) |
|
171 |
#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0]) |
|
172 |
#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2]) |
|
173 |
#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0]) |
|
174 |
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im) |
|
175 |
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im) |
|
176 |
#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im) |
|
177 |
|
|
178 |
#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im) |
|
179 |
#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
180 |
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
181 |
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) |
|
182 |
|
|
183 |
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
184 |
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
185 |
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
186 |
#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0]) |
|
187 |
#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1]) |
|
188 |
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]); |
|
189 |
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]); |
|
190 |
|
|
191 |
#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im) |
|
192 |
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im) |
|
193 |
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im) |
|
194 |
#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im) |
|
195 |
#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im) |
|
196 |
|
|
197 |
/* Value extensions. */ |
|
198 |
#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff) |
|
199 |
#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff) |
|
200 |
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var) |
|
201 |
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var) |
|
202 |
|
|
203 |
#define HELPER_ADDR(x) helper_##x |
|
204 |
|
|
205 |
#define gen_sxtb16(var) tcg_gen_helper_1_1(HELPER_ADDR(sxtb16), var, var) |
|
206 |
#define gen_uxtb16(var) tcg_gen_helper_1_1(HELPER_ADDR(uxtb16), var, var) |
|
207 |
|
|
208 |
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. |
|
209 |
tmp = (t0 ^ t1) & 0x8000; |
|
210 |
t0 &= ~0x8000; |
|
211 |
t1 &= ~0x8000; |
|
212 |
t0 = (t0 + t1) ^ tmp; |
|
213 |
*/ |
|
214 |
|
|
215 |
static void gen_add16(TCGv t0, TCGv t1) |
|
216 |
{ |
|
217 |
TCGv tmp = new_tmp(); |
|
218 |
tcg_gen_xor_i32(tmp, t0, t1); |
|
219 |
tcg_gen_andi_i32(tmp, tmp, 0x8000); |
|
220 |
tcg_gen_andi_i32(t0, t0, ~0x8000); |
|
221 |
tcg_gen_andi_i32(t1, t1, ~0x8000); |
|
222 |
tcg_gen_add_i32(t0, t0, t1); |
|
223 |
tcg_gen_xor_i32(t0, t0, tmp); |
|
224 |
dead_tmp(tmp); |
|
225 |
dead_tmp(t1); |
|
226 |
} |
|
227 |
|
|
228 |
/* Set CF to the top bit of var. */ |
|
229 |
static void gen_set_CF_bit31(TCGv var) |
|
230 |
{ |
|
231 |
TCGv tmp = new_tmp(); |
|
232 |
tcg_gen_shri_i32(tmp, var, 31); |
|
233 |
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, CF)); |
|
234 |
dead_tmp(tmp); |
|
235 |
} |
|
236 |
|
|
237 |
/* Set N and Z flags from var. */ |
|
238 |
static inline void gen_logic_CC(TCGv var) |
|
239 |
{ |
|
240 |
tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NZF)); |
|
241 |
} |
|
242 |
|
|
243 |
/* T0 += T1 + CF. */ |
|
244 |
static void gen_adc_T0_T1(void) |
|
245 |
{ |
|
246 |
TCGv tmp = new_tmp(); |
|
247 |
gen_op_addl_T0_T1(); |
|
248 |
tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF)); |
|
249 |
tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp); |
|
250 |
dead_tmp(tmp); |
|
251 |
} |
|
252 |
|
|
253 |
/* FIXME: Implement this natively. */ |
|
254 |
static inline void tcg_gen_not_i32(TCGv t0, TCGv t1) |
|
255 |
{ |
|
256 |
tcg_gen_xori_i32(t0, t1, ~0); |
|
257 |
} |
|
258 |
|
|
259 |
/* T0 &= ~T1. Clobbers T1. */ |
|
260 |
/* FIXME: Implement bic natively. */ |
|
261 |
static inline void gen_op_bicl_T0_T1(void) |
|
262 |
{ |
|
263 |
gen_op_notl_T1(); |
|
264 |
gen_op_andl_T0_T1(); |
|
265 |
} |
|
266 |
|
|
267 |
/* FIXME: Implement this natively. */ |
|
268 |
static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i) |
|
269 |
{ |
|
270 |
TCGv tmp; |
|
271 |
|
|
272 |
if (i == 0) |
|
273 |
return; |
|
274 |
|
|
275 |
tmp = new_tmp(); |
|
276 |
tcg_gen_shri_i32(tmp, t1, i); |
|
277 |
tcg_gen_shli_i32(t1, t1, 32 - i); |
|
278 |
tcg_gen_or_i32(t0, t1, tmp); |
|
279 |
dead_tmp(tmp); |
|
280 |
} |
|
281 |
|
|
282 |
/* Shift by immediate. Includes special handling for shift == 0. */ |
|
283 |
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift) |
|
284 |
{ |
|
285 |
if (shift != 0) { |
|
286 |
switch (shiftop) { |
|
287 |
case 0: tcg_gen_shli_i32(var, var, shift); break; |
|
288 |
case 1: tcg_gen_shri_i32(var, var, shift); break; |
|
289 |
case 2: tcg_gen_sari_i32(var, var, shift); break; |
|
290 |
case 3: tcg_gen_rori_i32(var, var, shift); break; |
|
291 |
} |
|
292 |
} else { |
|
293 |
TCGv tmp; |
|
294 |
|
|
295 |
switch (shiftop) { |
|
296 |
case 0: break; |
|
297 |
case 1: tcg_gen_movi_i32(var, 0); break; |
|
298 |
case 2: tcg_gen_sari_i32(var, var, 31); break; |
|
299 |
case 3: /* rrx */ |
|
300 |
tcg_gen_shri_i32(var, var, 1); |
|
301 |
tmp = new_tmp(); |
|
302 |
tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF)); |
|
303 |
tcg_gen_shli_i32(tmp, tmp, 31); |
|
304 |
tcg_gen_or_i32(var, var, tmp); |
|
305 |
dead_tmp(tmp); |
|
306 |
break; |
|
307 |
} |
|
308 |
} |
|
309 |
}; |
|
310 |
|
|
76 | 311 |
#define PAS_OP(pfx) { \ |
77 | 312 |
gen_op_ ## pfx ## add16_T0_T1, \ |
78 | 313 |
gen_op_ ## pfx ## addsubx_T0_T1, \ |
... | ... | |
154 | 389 |
1, /* mvn */ |
155 | 390 |
}; |
156 | 391 |
|
157 |
static GenOpFunc1 *gen_shift_T1_im[4] = { |
|
158 |
gen_op_shll_T1_im, |
|
159 |
gen_op_shrl_T1_im, |
|
160 |
gen_op_sarl_T1_im, |
|
161 |
gen_op_rorl_T1_im, |
|
162 |
}; |
|
163 |
|
|
164 |
static GenOpFunc *gen_shift_T1_0[4] = { |
|
165 |
NULL, |
|
166 |
gen_op_shrl_T1_0, |
|
167 |
gen_op_sarl_T1_0, |
|
168 |
gen_op_rrxl_T1, |
|
169 |
}; |
|
170 |
|
|
171 |
static GenOpFunc1 *gen_shift_T2_im[4] = { |
|
172 |
gen_op_shll_T2_im, |
|
173 |
gen_op_shrl_T2_im, |
|
174 |
gen_op_sarl_T2_im, |
|
175 |
gen_op_rorl_T2_im, |
|
176 |
}; |
|
177 |
|
|
178 |
static GenOpFunc *gen_shift_T2_0[4] = { |
|
179 |
NULL, |
|
180 |
gen_op_shrl_T2_0, |
|
181 |
gen_op_sarl_T2_0, |
|
182 |
gen_op_rrxl_T2, |
|
183 |
}; |
|
184 |
|
|
185 | 392 |
static GenOpFunc1 *gen_shift_T1_im_cc[4] = { |
186 | 393 |
gen_op_shll_T1_im_cc, |
187 | 394 |
gen_op_shrl_T1_im_cc, |
... | ... | |
210 | 417 |
gen_op_rorl_T1_T0_cc, |
211 | 418 |
}; |
212 | 419 |
|
213 |
static GenOpFunc *gen_op_movl_TN_reg[3][16] = { |
|
214 |
{ |
|
215 |
gen_op_movl_T0_r0, |
|
216 |
gen_op_movl_T0_r1, |
|
217 |
gen_op_movl_T0_r2, |
|
218 |
gen_op_movl_T0_r3, |
|
219 |
gen_op_movl_T0_r4, |
|
220 |
gen_op_movl_T0_r5, |
|
221 |
gen_op_movl_T0_r6, |
|
222 |
gen_op_movl_T0_r7, |
|
223 |
gen_op_movl_T0_r8, |
|
224 |
gen_op_movl_T0_r9, |
|
225 |
gen_op_movl_T0_r10, |
|
226 |
gen_op_movl_T0_r11, |
|
227 |
gen_op_movl_T0_r12, |
|
228 |
gen_op_movl_T0_r13, |
|
229 |
gen_op_movl_T0_r14, |
|
230 |
gen_op_movl_T0_r15, |
|
231 |
}, |
|
232 |
{ |
|
233 |
gen_op_movl_T1_r0, |
|
234 |
gen_op_movl_T1_r1, |
|
235 |
gen_op_movl_T1_r2, |
|
236 |
gen_op_movl_T1_r3, |
|
237 |
gen_op_movl_T1_r4, |
|
238 |
gen_op_movl_T1_r5, |
|
239 |
gen_op_movl_T1_r6, |
|
240 |
gen_op_movl_T1_r7, |
|
241 |
gen_op_movl_T1_r8, |
|
242 |
gen_op_movl_T1_r9, |
|
243 |
gen_op_movl_T1_r10, |
|
244 |
gen_op_movl_T1_r11, |
|
245 |
gen_op_movl_T1_r12, |
|
246 |
gen_op_movl_T1_r13, |
|
247 |
gen_op_movl_T1_r14, |
|
248 |
gen_op_movl_T1_r15, |
|
249 |
}, |
|
250 |
{ |
|
251 |
gen_op_movl_T2_r0, |
|
252 |
gen_op_movl_T2_r1, |
|
253 |
gen_op_movl_T2_r2, |
|
254 |
gen_op_movl_T2_r3, |
|
255 |
gen_op_movl_T2_r4, |
|
256 |
gen_op_movl_T2_r5, |
|
257 |
gen_op_movl_T2_r6, |
|
258 |
gen_op_movl_T2_r7, |
|
259 |
gen_op_movl_T2_r8, |
|
260 |
gen_op_movl_T2_r9, |
|
261 |
gen_op_movl_T2_r10, |
|
262 |
gen_op_movl_T2_r11, |
|
263 |
gen_op_movl_T2_r12, |
|
264 |
gen_op_movl_T2_r13, |
|
265 |
gen_op_movl_T2_r14, |
|
266 |
gen_op_movl_T2_r15, |
|
267 |
}, |
|
268 |
}; |
|
269 |
|
|
270 |
static GenOpFunc *gen_op_movl_reg_TN[2][16] = { |
|
271 |
{ |
|
272 |
gen_op_movl_r0_T0, |
|
273 |
gen_op_movl_r1_T0, |
|
274 |
gen_op_movl_r2_T0, |
|
275 |
gen_op_movl_r3_T0, |
|
276 |
gen_op_movl_r4_T0, |
|
277 |
gen_op_movl_r5_T0, |
|
278 |
gen_op_movl_r6_T0, |
|
279 |
gen_op_movl_r7_T0, |
|
280 |
gen_op_movl_r8_T0, |
|
281 |
gen_op_movl_r9_T0, |
|
282 |
gen_op_movl_r10_T0, |
|
283 |
gen_op_movl_r11_T0, |
|
284 |
gen_op_movl_r12_T0, |
|
285 |
gen_op_movl_r13_T0, |
|
286 |
gen_op_movl_r14_T0, |
|
287 |
gen_op_movl_r15_T0, |
|
288 |
}, |
|
289 |
{ |
|
290 |
gen_op_movl_r0_T1, |
|
291 |
gen_op_movl_r1_T1, |
|
292 |
gen_op_movl_r2_T1, |
|
293 |
gen_op_movl_r3_T1, |
|
294 |
gen_op_movl_r4_T1, |
|
295 |
gen_op_movl_r5_T1, |
|
296 |
gen_op_movl_r6_T1, |
|
297 |
gen_op_movl_r7_T1, |
|
298 |
gen_op_movl_r8_T1, |
|
299 |
gen_op_movl_r9_T1, |
|
300 |
gen_op_movl_r10_T1, |
|
301 |
gen_op_movl_r11_T1, |
|
302 |
gen_op_movl_r12_T1, |
|
303 |
gen_op_movl_r13_T1, |
|
304 |
gen_op_movl_r14_T1, |
|
305 |
gen_op_movl_r15_T1, |
|
306 |
}, |
|
307 |
}; |
|
308 |
|
|
309 |
static GenOpFunc1 *gen_op_movl_TN_im[3] = { |
|
310 |
gen_op_movl_T0_im, |
|
311 |
gen_op_movl_T1_im, |
|
312 |
gen_op_movl_T2_im, |
|
313 |
}; |
|
314 |
|
|
315 | 420 |
static GenOpFunc1 *gen_shift_T0_im_thumb_cc[3] = { |
316 | 421 |
gen_op_shll_T0_im_thumb_cc, |
317 | 422 |
gen_op_shrl_T0_im_thumb_cc, |
... | ... | |
324 | 429 |
gen_op_sarl_T0_im_thumb, |
325 | 430 |
}; |
326 | 431 |
|
432 |
/* Set PC and thumb state from T0. Clobbers T0. */ |
|
327 | 433 |
static inline void gen_bx(DisasContext *s) |
328 | 434 |
{ |
329 |
s->is_jmp = DISAS_UPDATE; |
|
330 |
gen_op_bx_T0(); |
|
331 |
} |
|
435 |
TCGv tmp; |
|
332 | 436 |
|
437 |
s->is_jmp = DISAS_UPDATE; |
|
438 |
tmp = new_tmp(); |
|
439 |
tcg_gen_andi_i32(tmp, cpu_T[0], 1); |
|
440 |
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb)); |
|
441 |
dead_tmp(tmp); |
|
442 |
tcg_gen_andi_i32(cpu_T[0], cpu_T[0], ~1); |
|
443 |
tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15])); |
|
444 |
} |
|
333 | 445 |
|
334 | 446 |
#if defined(CONFIG_USER_ONLY) |
335 | 447 |
#define gen_ldst(name, s) gen_op_##name##_raw() |
... | ... | |
343 | 455 |
} while (0) |
344 | 456 |
#endif |
345 | 457 |
|
346 |
static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t) |
|
347 |
{ |
|
348 |
int val; |
|
349 |
|
|
350 |
if (reg == 15) { |
|
351 |
/* normaly, since we updated PC, we need only to add one insn */ |
|
352 |
if (s->thumb) |
|
353 |
val = (long)s->pc + 2; |
|
354 |
else |
|
355 |
val = (long)s->pc + 4; |
|
356 |
gen_op_movl_TN_im[t](val); |
|
357 |
} else { |
|
358 |
gen_op_movl_TN_reg[t][reg](); |
|
359 |
} |
|
360 |
} |
|
361 |
|
|
362 | 458 |
static inline void gen_movl_T0_reg(DisasContext *s, int reg) |
363 | 459 |
{ |
364 |
gen_movl_TN_reg(s, reg, 0);
|
|
460 |
load_reg_var(s, cpu_T[0], reg);
|
|
365 | 461 |
} |
366 | 462 |
|
367 | 463 |
static inline void gen_movl_T1_reg(DisasContext *s, int reg) |
368 | 464 |
{ |
369 |
gen_movl_TN_reg(s, reg, 1);
|
|
465 |
load_reg_var(s, cpu_T[1], reg);
|
|
370 | 466 |
} |
371 | 467 |
|
372 | 468 |
static inline void gen_movl_T2_reg(DisasContext *s, int reg) |
373 | 469 |
{ |
374 |
gen_movl_TN_reg(s, reg, 2); |
|
470 |
load_reg_var(s, cpu_T[2], reg); |
|
471 |
} |
|
472 |
|
|
473 |
static inline void gen_set_pc_T0(void) |
|
474 |
{ |
|
475 |
tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15])); |
|
375 | 476 |
} |
376 | 477 |
|
377 | 478 |
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t) |
378 | 479 |
{ |
379 |
gen_op_movl_reg_TN[t][reg](); |
|
480 |
TCGv tmp; |
|
481 |
if (reg == 15) { |
|
482 |
tmp = new_tmp(); |
|
483 |
tcg_gen_andi_i32(tmp, cpu_T[t], ~1); |
|
484 |
} else { |
|
485 |
tmp = cpu_T[t]; |
|
486 |
} |
|
487 |
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg])); |
|
380 | 488 |
if (reg == 15) { |
489 |
dead_tmp(tmp); |
|
381 | 490 |
s->is_jmp = DISAS_JUMP; |
382 | 491 |
} |
383 | 492 |
} |
... | ... | |
403 | 512 |
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn) |
404 | 513 |
{ |
405 | 514 |
int val, rm, shift, shiftop; |
515 |
TCGv offset; |
|
406 | 516 |
|
407 | 517 |
if (!(insn & (1 << 25))) { |
408 | 518 |
/* immediate */ |
... | ... | |
415 | 525 |
/* shift/register */ |
416 | 526 |
rm = (insn) & 0xf; |
417 | 527 |
shift = (insn >> 7) & 0x1f; |
418 |
gen_movl_T2_reg(s, rm); |
|
419 | 528 |
shiftop = (insn >> 5) & 3; |
420 |
if (shift != 0) { |
|
421 |
gen_shift_T2_im[shiftop](shift); |
|
422 |
} else if (shiftop != 0) { |
|
423 |
gen_shift_T2_0[shiftop](); |
|
424 |
} |
|
529 |
offset = load_reg(s, rm); |
|
530 |
gen_arm_shift_im(offset, shiftop, shift); |
|
425 | 531 |
if (!(insn & (1 << 23))) |
426 |
gen_op_subl_T1_T2();
|
|
532 |
tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
|
|
427 | 533 |
else |
428 |
gen_op_addl_T1_T2(); |
|
534 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset); |
|
535 |
dead_tmp(offset); |
|
429 | 536 |
} |
430 | 537 |
} |
431 | 538 |
|
... | ... | |
433 | 540 |
int extra) |
434 | 541 |
{ |
435 | 542 |
int val, rm; |
543 |
TCGv offset; |
|
436 | 544 |
|
437 | 545 |
if (insn & (1 << 22)) { |
438 | 546 |
/* immediate */ |
... | ... | |
447 | 555 |
if (extra) |
448 | 556 |
gen_op_addl_T1_im(extra); |
449 | 557 |
rm = (insn) & 0xf; |
450 |
gen_movl_T2_reg(s, rm);
|
|
558 |
offset = load_reg(s, rm);
|
|
451 | 559 |
if (!(insn & (1 << 23))) |
452 |
gen_op_subl_T1_T2();
|
|
560 |
tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
|
|
453 | 561 |
else |
454 |
gen_op_addl_T1_T2(); |
|
562 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset); |
|
563 |
dead_tmp(offset); |
|
455 | 564 |
} |
456 | 565 |
} |
457 | 566 |
|
... | ... | |
979 | 1088 |
case 3: |
980 | 1089 |
return 1; |
981 | 1090 |
} |
982 |
gen_op_movl_reg_TN[0][rd]();
|
|
1091 |
gen_movl_reg_T0(s, rd);
|
|
983 | 1092 |
break; |
984 | 1093 |
case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ |
985 | 1094 |
if ((insn & 0x000ff008) != 0x0003f000) |
... | ... | |
1531 | 1640 |
gen_op_iwmmxt_movq_M0_wRn(wrd); |
1532 | 1641 |
switch ((insn >> 16) & 0xf) { |
1533 | 1642 |
case 0x0: /* TMIA */ |
1534 |
gen_op_movl_TN_reg[0][rd0]();
|
|
1535 |
gen_op_movl_TN_reg[1][rd1]();
|
|
1643 |
gen_movl_T0_reg(s, rd0);
|
|
1644 |
gen_movl_T1_reg(s, rd1);
|
|
1536 | 1645 |
gen_op_iwmmxt_muladdsl_M0_T0_T1(); |
1537 | 1646 |
break; |
1538 | 1647 |
case 0x8: /* TMIAPH */ |
1539 |
gen_op_movl_TN_reg[0][rd0]();
|
|
1540 |
gen_op_movl_TN_reg[1][rd1]();
|
|
1648 |
gen_movl_T0_reg(s, rd0);
|
|
1649 |
gen_movl_T1_reg(s, rd1);
|
|
1541 | 1650 |
gen_op_iwmmxt_muladdsw_M0_T0_T1(); |
1542 | 1651 |
break; |
1543 | 1652 |
case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ |
1544 |
gen_op_movl_TN_reg[1][rd0]();
|
|
1653 |
gen_movl_T1_reg(s, rd0);
|
|
1545 | 1654 |
if (insn & (1 << 16)) |
1546 | 1655 |
gen_op_shrl_T1_im(16); |
1547 | 1656 |
gen_op_movl_T0_T1(); |
1548 |
gen_op_movl_TN_reg[1][rd1]();
|
|
1657 |
gen_movl_T1_reg(s, rd1);
|
|
1549 | 1658 |
if (insn & (1 << 17)) |
1550 | 1659 |
gen_op_shrl_T1_im(16); |
1551 | 1660 |
gen_op_iwmmxt_muladdswl_M0_T0_T1(); |
... | ... | |
1580 | 1689 |
|
1581 | 1690 |
switch ((insn >> 16) & 0xf) { |
1582 | 1691 |
case 0x0: /* MIA */ |
1583 |
gen_op_movl_TN_reg[0][rd0]();
|
|
1584 |
gen_op_movl_TN_reg[1][rd1]();
|
|
1692 |
gen_movl_T0_reg(s, rd0);
|
|
1693 |
gen_movl_T1_reg(s, rd1);
|
|
1585 | 1694 |
gen_op_iwmmxt_muladdsl_M0_T0_T1(); |
1586 | 1695 |
break; |
1587 | 1696 |
case 0x8: /* MIAPH */ |
1588 |
gen_op_movl_TN_reg[0][rd0]();
|
|
1589 |
gen_op_movl_TN_reg[1][rd1]();
|
|
1697 |
gen_movl_T0_reg(s, rd0);
|
|
1698 |
gen_movl_T1_reg(s, rd1);
|
|
1590 | 1699 |
gen_op_iwmmxt_muladdsw_M0_T0_T1(); |
1591 | 1700 |
break; |
1592 | 1701 |
case 0xc: /* MIABB */ |
1593 | 1702 |
case 0xd: /* MIABT */ |
1594 | 1703 |
case 0xe: /* MIATB */ |
1595 | 1704 |
case 0xf: /* MIATT */ |
1596 |
gen_op_movl_TN_reg[1][rd0]();
|
|
1705 |
gen_movl_T1_reg(s, rd0);
|
|
1597 | 1706 |
if (insn & (1 << 16)) |
1598 | 1707 |
gen_op_shrl_T1_im(16); |
1599 | 1708 |
gen_op_movl_T0_T1(); |
1600 |
gen_op_movl_TN_reg[1][rd1]();
|
|
1709 |
gen_movl_T1_reg(s, rd1);
|
|
1601 | 1710 |
if (insn & (1 << 17)) |
1602 | 1711 |
gen_op_shrl_T1_im(16); |
1603 | 1712 |
gen_op_iwmmxt_muladdswl_M0_T0_T1(); |
... | ... | |
1621 | 1730 |
|
1622 | 1731 |
if (insn & ARM_CP_RW_BIT) { /* MRA */ |
1623 | 1732 |
gen_op_iwmmxt_movl_T0_T1_wRn(acc); |
1624 |
gen_op_movl_reg_TN[0][rdlo]();
|
|
1733 |
gen_movl_reg_T0(s, rdlo);
|
|
1625 | 1734 |
gen_op_movl_T0_im((1 << (40 - 32)) - 1); |
1626 | 1735 |
gen_op_andl_T0_T1(); |
1627 |
gen_op_movl_reg_TN[0][rdhi]();
|
|
1736 |
gen_movl_reg_T0(s, rdhi);
|
|
1628 | 1737 |
} else { /* MAR */ |
1629 |
gen_op_movl_TN_reg[0][rdlo]();
|
|
1630 |
gen_op_movl_TN_reg[1][rdhi]();
|
|
1738 |
gen_movl_T0_reg(s, rdlo);
|
|
1739 |
gen_movl_T1_reg(s, rdhi);
|
|
1631 | 1740 |
gen_op_iwmmxt_movl_wRn_T0_T1(acc); |
1632 | 1741 |
} |
1633 | 1742 |
return 0; |
... | ... | |
1650 | 1759 |
if (!env->cp[cp].cp_read) |
1651 | 1760 |
return 1; |
1652 | 1761 |
gen_op_movl_T0_im((uint32_t) s->pc); |
1653 |
gen_op_movl_reg_TN[0][15]();
|
|
1762 |
gen_set_pc_T0();
|
|
1654 | 1763 |
gen_op_movl_T0_cp(insn); |
1655 | 1764 |
gen_movl_reg_T0(s, rd); |
1656 | 1765 |
} else { |
1657 | 1766 |
if (!env->cp[cp].cp_write) |
1658 | 1767 |
return 1; |
1659 | 1768 |
gen_op_movl_T0_im((uint32_t) s->pc); |
1660 |
gen_op_movl_reg_TN[0][15]();
|
|
1769 |
gen_set_pc_T0();
|
|
1661 | 1770 |
gen_movl_T0_reg(s, rd); |
1662 | 1771 |
gen_op_movl_cp_T0(insn); |
1663 | 1772 |
} |
... | ... | |
1713 | 1822 |
|| (insn & 0x0fff0fff) == 0x0e070f58) { |
1714 | 1823 |
/* Wait for interrupt. */ |
1715 | 1824 |
gen_op_movl_T0_im((long)s->pc); |
1716 |
gen_op_movl_reg_TN[0][15]();
|
|
1825 |
gen_set_pc_T0();
|
|
1717 | 1826 |
s->is_jmp = DISAS_WFI; |
1718 | 1827 |
return 0; |
1719 | 1828 |
} |
... | ... | |
1817 | 1926 |
if (offset) |
1818 | 1927 |
gen_op_shrl_T1_im(offset); |
1819 | 1928 |
if (insn & (1 << 23)) |
1820 |
gen_op_uxtb_T1();
|
|
1929 |
gen_uxtb(cpu_T[1]);
|
|
1821 | 1930 |
else |
1822 |
gen_op_sxtb_T1();
|
|
1931 |
gen_sxtb(cpu_T[1]);
|
|
1823 | 1932 |
break; |
1824 | 1933 |
case 1: |
1825 | 1934 |
NEON_GET_REG(T1, rn, pass); |
... | ... | |
1827 | 1936 |
if (offset) { |
1828 | 1937 |
gen_op_shrl_T1_im(16); |
1829 | 1938 |
} else { |
1830 |
gen_op_uxth_T1();
|
|
1939 |
gen_uxth(cpu_T[1]);
|
|
1831 | 1940 |
} |
1832 | 1941 |
} else { |
1833 | 1942 |
if (offset) { |
1834 | 1943 |
gen_op_sarl_T1_im(16); |
1835 | 1944 |
} else { |
1836 |
gen_op_sxth_T1();
|
|
1945 |
gen_sxth(cpu_T[1]);
|
|
1837 | 1946 |
} |
1838 | 1947 |
} |
1839 | 1948 |
break; |
... | ... | |
2418 | 2527 |
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
2419 | 2528 |
tcg_gen_goto_tb(n); |
2420 | 2529 |
gen_op_movl_T0_im(dest); |
2421 |
gen_op_movl_r15_T0();
|
|
2530 |
gen_set_pc_T0();
|
|
2422 | 2531 |
tcg_gen_exit_tb((long)tb + n); |
2423 | 2532 |
} else { |
2424 | 2533 |
gen_op_movl_T0_im(dest); |
2425 |
gen_op_movl_r15_T0();
|
|
2534 |
gen_set_pc_T0();
|
|
2426 | 2535 |
tcg_gen_exit_tb(0); |
2427 | 2536 |
} |
2428 | 2537 |
} |
... | ... | |
2444 | 2553 |
static inline void gen_mulxy(int x, int y) |
2445 | 2554 |
{ |
2446 | 2555 |
if (x) |
2447 |
gen_op_sarl_T0_im(16);
|
|
2556 |
tcg_gen_sari_i32(cpu_T[0], cpu_T[0], 16);
|
|
2448 | 2557 |
else |
2449 |
gen_op_sxth_T0();
|
|
2558 |
gen_sxth(cpu_T[0]);
|
|
2450 | 2559 |
if (y) |
2451 | 2560 |
gen_op_sarl_T1_im(16); |
2452 | 2561 |
else |
2453 |
gen_op_sxth_T1();
|
|
2562 |
gen_sxth(cpu_T[1]);
|
|
2454 | 2563 |
gen_op_mul_T0_T1(); |
2455 | 2564 |
} |
2456 | 2565 |
|
... | ... | |
2501 | 2610 |
/* Generate an old-style exception return. */ |
2502 | 2611 |
static void gen_exception_return(DisasContext *s) |
2503 | 2612 |
{ |
2504 |
gen_op_movl_reg_TN[0][15]();
|
|
2613 |
gen_set_pc_T0();
|
|
2505 | 2614 |
gen_op_movl_T0_spsr(); |
2506 | 2615 |
gen_op_movl_cpsr_T0(0xffffffff); |
2507 | 2616 |
s->is_jmp = DISAS_UPDATE; |
... | ... | |
2512 | 2621 |
{ |
2513 | 2622 |
gen_op_movl_cpsr_T0(0xffffffff); |
2514 | 2623 |
gen_op_movl_T0_T2(); |
2515 |
gen_op_movl_reg_TN[0][15]();
|
|
2624 |
gen_set_pc_T0();
|
|
2516 | 2625 |
s->is_jmp = DISAS_UPDATE; |
2517 | 2626 |
} |
2518 | 2627 |
|
... | ... | |
2529 | 2638 |
switch (val) { |
2530 | 2639 |
case 3: /* wfi */ |
2531 | 2640 |
gen_op_movl_T0_im((long)s->pc); |
2532 |
gen_op_movl_reg_TN[0][15]();
|
|
2641 |
gen_set_pc_T0();
|
|
2533 | 2642 |
s->is_jmp = DISAS_WFI; |
2534 | 2643 |
break; |
2535 | 2644 |
case 2: /* wfe */ |
... | ... | |
3011 | 3120 |
} |
3012 | 3121 |
} |
3013 | 3122 |
if (rm != 15) { |
3014 |
gen_movl_T1_reg(s, rn); |
|
3123 |
TCGv base; |
|
3124 |
|
|
3125 |
base = load_reg(s, rn); |
|
3015 | 3126 |
if (rm == 13) { |
3016 |
gen_op_addl_T1_im(stride);
|
|
3127 |
tcg_gen_addi_i32(base, base, stride);
|
|
3017 | 3128 |
} else { |
3018 |
gen_movl_T2_reg(s, rm); |
|
3019 |
gen_op_addl_T1_T2(); |
|
3129 |
TCGv index; |
|
3130 |
index = load_reg(s, rm); |
|
3131 |
tcg_gen_add_i32(base, base, index); |
|
3132 |
dead_tmp(index); |
|
3020 | 3133 |
} |
3021 |
gen_movl_reg_T1(s, rn);
|
|
3134 |
store_reg(s, rn, base);
|
|
3022 | 3135 |
} |
3023 | 3136 |
return 0; |
3024 | 3137 |
} |
... | ... | |
4626 | 4739 |
static void disas_arm_insn(CPUState * env, DisasContext *s) |
4627 | 4740 |
{ |
4628 | 4741 |
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh; |
4742 |
TCGv tmp; |
|
4629 | 4743 |
|
4630 | 4744 |
insn = ldl_code(s->pc); |
4631 | 4745 |
s->pc += 4; |
... | ... | |
4936 | 5050 |
case 7: /* bkpt */ |
4937 | 5051 |
gen_set_condexec(s); |
4938 | 5052 |
gen_op_movl_T0_im((long)s->pc - 4); |
4939 |
gen_op_movl_reg_TN[0][15]();
|
|
5053 |
gen_set_pc_T0();
|
|
4940 | 5054 |
gen_op_bkpt(); |
4941 | 5055 |
s->is_jmp = DISAS_JUMP; |
4942 | 5056 |
break; |
... | ... | |
4954 | 5068 |
if (sh & 4) |
4955 | 5069 |
gen_op_sarl_T1_im(16); |
4956 | 5070 |
else |
4957 |
gen_op_sxth_T1();
|
|
5071 |
gen_sxth(cpu_T[1]);
|
|
4958 | 5072 |
gen_op_imulw_T0_T1(); |
4959 | 5073 |
if ((sh & 2) == 0) { |
4960 | 5074 |
gen_movl_T1_reg(s, rn); |
... | ... | |
5001 | 5115 |
val = (val >> shift) | (val << (32 - shift)); |
5002 | 5116 |
gen_op_movl_T1_im(val); |
5003 | 5117 |
if (logic_cc && shift) |
5004 |
gen_op_mov_CF_T1();
|
|
5118 |
gen_set_CF_bit31(cpu_T[1]);
|
|
5005 | 5119 |
} else { |
5006 | 5120 |
/* register */ |
5007 | 5121 |
rm = (insn) & 0xf; |
... | ... | |
5009 | 5123 |
shiftop = (insn >> 5) & 3; |
5010 | 5124 |
if (!(insn & (1 << 4))) { |
5011 | 5125 |
shift = (insn >> 7) & 0x1f; |
5012 |
if (shift != 0) {
|
|
5013 |
if (logic_cc) {
|
|
5126 |
if (logic_cc) {
|
|
5127 |
if (shift != 0) {
|
|
5014 | 5128 |
gen_shift_T1_im_cc[shiftop](shift); |
5015 |
} else { |
|
5016 |
gen_shift_T1_im[shiftop](shift); |
|
5017 |
} |
|
5018 |
} else if (shiftop != 0) { |
|
5019 |
if (logic_cc) { |
|
5129 |
} else if (shiftop != 0) { |
|
5020 | 5130 |
gen_shift_T1_0_cc[shiftop](); |
5021 |
} else { |
|
5022 |
gen_shift_T1_0[shiftop](); |
|
5023 | 5131 |
} |
5132 |
} else { |
|
5133 |
gen_arm_shift_im(cpu_T[1], shiftop, shift); |
|
5024 | 5134 |
} |
5025 | 5135 |
} else { |
5026 | 5136 |
rs = (insn >> 8) & 0xf; |
... | ... | |
5083 | 5193 |
if (set_cc) |
5084 | 5194 |
gen_op_adcl_T0_T1_cc(); |
5085 | 5195 |
else |
5086 |
gen_op_adcl_T0_T1();
|
|
5196 |
gen_adc_T0_T1();
|
|
5087 | 5197 |
gen_movl_reg_T0(s, rd); |
5088 | 5198 |
break; |
5089 | 5199 |
case 0x06: |
... | ... | |
5389 | 5499 |
gen_op_rorl_T1_im(shift * 8); |
5390 | 5500 |
op1 = (insn >> 20) & 7; |
5391 | 5501 |
switch (op1) { |
5392 |
case 0: gen_op_sxtb16_T1(); break;
|
|
5393 |
case 2: gen_op_sxtb_T1(); break;
|
|
5394 |
case 3: gen_op_sxth_T1(); break;
|
|
5395 |
case 4: gen_op_uxtb16_T1(); break;
|
|
5396 |
case 6: gen_op_uxtb_T1(); break;
|
|
5397 |
case 7: gen_op_uxth_T1(); break;
|
|
5502 |
case 0: gen_sxtb16(cpu_T[1]); break;
|
|
5503 |
case 2: gen_sxtb(cpu_T[1]); break;
|
|
5504 |
case 3: gen_sxth(cpu_T[1]); break;
|
|
5505 |
case 4: gen_uxtb16(cpu_T[1]); break;
|
|
5506 |
case 6: gen_uxtb(cpu_T[1]); break;
|
|
5507 |
case 7: gen_uxth(cpu_T[1]); break;
|
|
5398 | 5508 |
default: goto illegal_op; |
5399 | 5509 |
} |
5400 | 5510 |
if (rn != 15) { |
5401 |
gen_movl_T2_reg(s, rn);
|
|
5511 |
tmp = load_reg(s, rn);
|
|
5402 | 5512 |
if ((op1 & 3) == 0) { |
5403 |
gen_op_add16_T1_T2();
|
|
5513 |
gen_add16(cpu_T[1], tmp);
|
|
5404 | 5514 |
} else { |
5405 |
gen_op_addl_T1_T2(); |
|
5515 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp); |
|
5516 |
dead_tmp(tmp); |
|
5406 | 5517 |
} |
5407 | 5518 |
} |
5408 | 5519 |
gen_movl_reg_T1(s, rd); |
... | ... | |
5667 | 5778 |
if (i == 15) { |
5668 | 5779 |
/* special case: r15 = PC + 8 */ |
5669 | 5780 |
val = (long)s->pc + 4; |
5670 |
gen_op_movl_TN_im[0](val);
|
|
5781 |
gen_op_movl_T0_im(val);
|
|
5671 | 5782 |
} else if (user) { |
5672 | 5783 |
gen_op_movl_T0_user(i); |
5673 | 5784 |
} else { |
... | ... | |
5723 | 5834 |
val = (int32_t)s->pc; |
5724 | 5835 |
if (insn & (1 << 24)) { |
5725 | 5836 |
gen_op_movl_T0_im(val); |
5726 |
gen_op_movl_reg_TN[0][14]();
|
|
5837 |
gen_movl_reg_T0(s, 14);
|
|
5727 | 5838 |
} |
5728 | 5839 |
offset = (((int32_t)insn << 8) >> 8); |
5729 | 5840 |
val += (offset << 2) + 4; |
... | ... | |
5740 | 5851 |
case 0xf: |
5741 | 5852 |
/* swi */ |
5742 | 5853 |
gen_op_movl_T0_im((long)s->pc); |
5743 |
gen_op_movl_reg_TN[0][15]();
|
|
5854 |
gen_set_pc_T0();
|
|
5744 | 5855 |
s->is_jmp = DISAS_SWI; |
5745 | 5856 |
break; |
5746 | 5857 |
default: |
5747 | 5858 |
illegal_op: |
5748 | 5859 |
gen_set_condexec(s); |
5749 | 5860 |
gen_op_movl_T0_im((long)s->pc - 4); |
5750 |
gen_op_movl_reg_TN[0][15]();
|
|
5861 |
gen_set_pc_T0();
|
|
5751 | 5862 |
gen_op_undef_insn(); |
5752 | 5863 |
s->is_jmp = DISAS_JUMP; |
5753 | 5864 |
break; |
... | ... | |
5806 | 5917 |
if (conds) |
5807 | 5918 |
gen_op_adcl_T0_T1_cc(); |
5808 | 5919 |
else |
5809 |
gen_op_adcl_T0_T1();
|
|
5920 |
gen_adc_T0_T1();
|
|
5810 | 5921 |
break; |
5811 | 5922 |
case 11: /* sbc */ |
5812 | 5923 |
if (conds) |
... | ... | |
5832 | 5943 |
if (logic_cc) { |
5833 | 5944 |
gen_op_logic_T0_cc(); |
5834 | 5945 |
if (shifter_out) |
5835 |
gen_op_mov_CF_T1();
|
|
5946 |
gen_set_CF_bit31(cpu_T[1]);
|
|
5836 | 5947 |
} |
5837 | 5948 |
return 0; |
5838 | 5949 |
} |
... | ... | |
5843 | 5954 |
{ |
5844 | 5955 |
uint32_t insn, imm, shift, offset, addr; |
5845 | 5956 |
uint32_t rd, rn, rm, rs; |
5957 |
TCGv tmp; |
|
5846 | 5958 |
int op; |
5847 | 5959 |
int shiftop; |
5848 | 5960 |
int conds; |
... | ... | |
5966 | 6078 |
} else { |
5967 | 6079 |
gen_movl_T1_reg(s, rn); |
5968 | 6080 |
} |
5969 |
gen_movl_T2_reg(s, rm);
|
|
5970 |
gen_op_addl_T1_T2();
|
|
6081 |
tmp = load_reg(s, rm);
|
|
6082 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
|
|
5971 | 6083 |
if (insn & (1 << 4)) { |
5972 | 6084 |
/* tbh */ |
5973 |
gen_op_addl_T1_T2(); |
|
6085 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp); |
|
6086 |
dead_tmp(tmp); |
|
5974 | 6087 |
gen_ldst(lduw, s); |
5975 | 6088 |
} else { /* tbb */ |
6089 |
dead_tmp(tmp); |
|
5976 | 6090 |
gen_ldst(ldub, s); |
5977 | 6091 |
} |
5978 | 6092 |
gen_op_jmp_T0_im(s->pc); |
... | ... | |
6126 | 6240 |
shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); |
6127 | 6241 |
conds = (insn & (1 << 20)) != 0; |
6128 | 6242 |
logic_cc = (conds && thumb2_logic_op(op)); |
6129 |
if (shift != 0) {
|
|
6130 |
if (logic_cc) {
|
|
6243 |
if (logic_cc) {
|
|
6244 |
if (shift != 0) {
|
|
6131 | 6245 |
gen_shift_T1_im_cc[shiftop](shift); |
6132 |
} else { |
|
6133 |
gen_shift_T1_im[shiftop](shift); |
|
6134 |
} |
|
6135 |
} else if (shiftop != 0) { |
|
6136 |
if (logic_cc) { |
|
6246 |
} else if (shiftop != 0) { |
|
6137 | 6247 |
gen_shift_T1_0_cc[shiftop](); |
6138 |
} else { |
|
6139 |
gen_shift_T1_0[shiftop](); |
|
6140 | 6248 |
} |
6249 |
} else { |
|
6250 |
gen_arm_shift_im(cpu_T[1], shiftop, shift); |
|
6141 | 6251 |
} |
6142 | 6252 |
if (gen_thumb2_data_op(s, op, conds, 0)) |
6143 | 6253 |
goto illegal_op; |
... | ... | |
6172 | 6282 |
gen_op_rorl_T1_im(shift * 8); |
6173 | 6283 |
op = (insn >> 20) & 7; |
6174 | 6284 |
switch (op) { |
6175 |
case 0: gen_op_sxth_T1(); break;
|
|
6176 |
case 1: gen_op_uxth_T1(); break;
|
|
6177 |
case 2: gen_op_sxtb16_T1(); break;
|
|
6178 |
case 3: gen_op_uxtb16_T1(); break;
|
|
6179 |
case 4: gen_op_sxtb_T1(); break;
|
|
6180 |
case 5: gen_op_uxtb_T1(); break;
|
|
6285 |
case 0: gen_sxth(cpu_T[1]); break;
|
|
6286 |
case 1: gen_uxth(cpu_T[1]); break;
|
|
6287 |
case 2: gen_sxtb16(cpu_T[1]); break;
|
|
6288 |
case 3: gen_uxtb16(cpu_T[1]); break;
|
|
6289 |
case 4: gen_sxtb(cpu_T[1]); break;
|
|
6290 |
case 5: gen_uxtb(cpu_T[1]); break;
|
|
6181 | 6291 |
default: goto illegal_op; |
6182 | 6292 |
} |
6183 | 6293 |
if (rn != 15) { |
6184 |
gen_movl_T2_reg(s, rn);
|
|
6294 |
tmp = load_reg(s, rn);
|
|
6185 | 6295 |
if ((op >> 1) == 1) { |
6186 |
gen_op_add16_T1_T2();
|
|
6296 |
gen_add16(cpu_T[1], tmp);
|
|
6187 | 6297 |
} else { |
6188 |
gen_op_addl_T1_T2(); |
|
6298 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp); |
|
6299 |
dead_tmp(tmp); |
|
6189 | 6300 |
} |
6190 | 6301 |
} |
6191 | 6302 |
gen_movl_reg_T1(s, rd); |
... | ... | |
6286 | 6397 |
if (op) |
6287 | 6398 |
gen_op_sarl_T1_im(16); |
6288 | 6399 |
else |
6289 |
gen_op_sxth_T1();
|
|
6400 |
gen_sxth(cpu_T[1]);
|
|
6290 | 6401 |
gen_op_imulw_T0_T1(); |
6291 | 6402 |
if (rs != 15) |
6292 | 6403 |
{ |
... | ... | |
6718 | 6829 |
shift = (insn >> 4) & 0xf; |
6719 | 6830 |
if (shift > 3) |
6720 | 6831 |
goto illegal_op; |
6721 |
gen_movl_T2_reg(s, rm);
|
|
6832 |
tmp = load_reg(s, rm);
|
|
6722 | 6833 |
if (shift) |
6723 |
gen_op_shll_T2_im(shift); |
|
6724 |
gen_op_addl_T1_T2(); |
|
6834 |
tcg_gen_shli_i32(tmp, tmp, shift); |
|
6835 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp); |
|
6836 |
dead_tmp(tmp); |
|
6725 | 6837 |
break; |
6726 | 6838 |
case 4: /* Negative offset. */ |
6727 | 6839 |
gen_op_addl_T1_im(-imm); |
... | ... | |
6733 | 6845 |
imm = -imm; |
6734 | 6846 |
/* Fall through. */ |
6735 | 6847 |
case 3: /* Post-increment. */ |
6736 |
gen_op_movl_T2_im(imm); |
|
6737 | 6848 |
postinc = 1; |
6738 | 6849 |
writeback = 1; |
6739 | 6850 |
break; |
... | ... | |
6802 | 6913 |
uint32_t val, insn, op, rm, rn, rd, shift, cond; |
6803 | 6914 |
int32_t offset; |
6804 | 6915 |
int i; |
6916 |
TCGv tmp; |
|
6805 | 6917 |
|
6806 | 6918 |
if (s->condexec_mask) { |
6807 | 6919 |
cond = s->condexec_cond; |
... | ... | |
6989 | 7101 |
break; |
6990 | 7102 |
case 0x5: /* adc */ |
6991 | 7103 |
if (s->condexec_mask) |
6992 |
gen_op_adcl_T0_T1();
|
|
7104 |
gen_adc_T0_T1();
|
|
6993 | 7105 |
else |
6994 | 7106 |
gen_op_adcl_T0_T1_cc(); |
6995 | 7107 |
break; |
... | ... | |
7064 | 7176 |
rm = (insn >> 6) & 7; |
7065 | 7177 |
op = (insn >> 9) & 7; |
7066 | 7178 |
gen_movl_T1_reg(s, rn); |
7067 |
gen_movl_T2_reg(s, rm); |
|
7068 |
gen_op_addl_T1_T2(); |
|
7179 |
tmp = load_reg(s, rm); |
|
7180 |
tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp); |
|
7181 |
dead_tmp(tmp); |
|
7069 | 7182 |
|
7070 | 7183 |
if (op < 3) /* store */ |
7071 | 7184 |
gen_movl_T0_reg(s, rd); |
... | ... | |
7106 | 7219 |
rn = (insn >> 3) & 7; |
7107 | 7220 |
gen_movl_T1_reg(s, rn); |
7108 | 7221 |
val = (insn >> 4) & 0x7c; |
7109 |
gen_op_movl_T2_im(val); |
|
7110 |
gen_op_addl_T1_T2(); |
|
7222 |
tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val); |
|
7111 | 7223 |
|
7112 | 7224 |
if (insn & (1 << 11)) { |
7113 | 7225 |
/* load */ |
... | ... | |
7126 | 7238 |
rn = (insn >> 3) & 7; |
7127 | 7239 |
gen_movl_T1_reg(s, rn); |
7128 | 7240 |
val = (insn >> 6) & 0x1f; |
7129 |
gen_op_movl_T2_im(val); |
|
7130 |
gen_op_addl_T1_T2(); |
|
7241 |
tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val); |
|
7131 | 7242 |
|
7132 | 7243 |
if (insn & (1 << 11)) { |
7133 | 7244 |
/* load */ |
... | ... | |
7146 | 7257 |
rn = (insn >> 3) & 7; |
7147 | 7258 |
gen_movl_T1_reg(s, rn); |
7148 | 7259 |
val = (insn >> 5) & 0x3e; |
7149 |
gen_op_movl_T2_im(val); |
|
7150 |
gen_op_addl_T1_T2(); |
|
7260 |
tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val); |
|
7151 | 7261 |
|
7152 | 7262 |
if (insn & (1 << 11)) { |
7153 | 7263 |
/* load */ |
... | ... | |
7165 | 7275 |
rd = (insn >> 8) & 7; |
7166 | 7276 |
gen_movl_T1_reg(s, 13); |
7167 | 7277 |
val = (insn & 0xff) * 4; |
7168 |
gen_op_movl_T2_im(val); |
|
7169 |
gen_op_addl_T1_T2(); |
|
7278 |
tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val); |
|
7170 | 7279 |
|
7171 | 7280 |
if (insn & (1 << 11)) { |
7172 | 7281 |
/* load */ |
... | ... | |
7201 | 7310 |
switch (op) { |
7202 | 7311 |
case 0: |
7203 | 7312 |
/* adjust stack pointer */ |
7204 |
gen_movl_T1_reg(s, 13);
|
|
7313 |
tmp = load_reg(s, 13);
|
|
7205 | 7314 |
val = (insn & 0x7f) * 4; |
7206 | 7315 |
if (insn & (1 << 7)) |
7207 | 7316 |
val = -(int32_t)val; |
7208 |
gen_op_movl_T2_im(val); |
|
7209 |
gen_op_addl_T1_T2(); |
|
7210 |
gen_movl_reg_T1(s, 13); |
|
7317 |
tcg_gen_addi_i32(tmp, tmp, val); |
|
7318 |
store_reg(s, 13, tmp); |
|
7211 | 7319 |
break; |
7212 | 7320 |
|
7213 | 7321 |
case 2: /* sign/zero extend. */ |
... | ... | |
7216 | 7324 |
rm = (insn >> 3) & 7; |
7217 | 7325 |
gen_movl_T1_reg(s, rm); |
7218 | 7326 |
switch ((insn >> 6) & 3) { |
7219 |
case 0: gen_op_sxth_T1(); break;
|
|
7220 |
case 1: gen_op_sxtb_T1(); break;
|
|
7221 |
case 2: gen_op_uxth_T1(); break;
|
|
7222 |
case 3: gen_op_uxtb_T1(); break;
|
|
7327 |
case 0: gen_sxth(cpu_T[1]); break;
|
|
7328 |
case 1: gen_sxtb(cpu_T[1]); break;
|
|
7329 |
case 2: gen_uxth(cpu_T[1]); break;
|
|
7330 |
case 3: gen_uxtb(cpu_T[1]); break;
|
|
7223 | 7331 |
} |
7224 | 7332 |
gen_movl_reg_T1(s, rd); |
7225 | 7333 |
break; |
... | ... | |
7235 | 7343 |
offset += 4; |
7236 | 7344 |
} |
7237 | 7345 |
if ((insn & (1 << 11)) == 0) { |
7238 |
gen_op_movl_T2_im(-offset); |
|
7239 |
gen_op_addl_T1_T2(); |
|
7346 |
gen_op_addl_T1_im(-offset); |
|
7240 | 7347 |
} |
7241 |
gen_op_movl_T2_im(4); |
|
7242 | 7348 |
for (i = 0; i < 8; i++) { |
7243 | 7349 |
if (insn & (1 << i)) { |
7244 | 7350 |
if (insn & (1 << 11)) { |
... | ... | |
7251 | 7357 |
gen_ldst(stl, s); |
7252 | 7358 |
} |
7253 | 7359 |
/* advance to the next address. */ |
7254 |
gen_op_addl_T1_T2();
|
|
7360 |
gen_op_addl_T1_im(4);
|
|
7255 | 7361 |
} |
7256 | 7362 |
} |
7257 | 7363 |
if (insn & (1 << 8)) { |
... | ... | |
7265 | 7371 |
gen_movl_T0_reg(s, 14); |
7266 | 7372 |
gen_ldst(stl, s); |
7267 | 7373 |
} |
7268 |
gen_op_addl_T1_T2();
|
|
7374 |
gen_op_addl_T1_im(4);
|
|
7269 | 7375 |
} |
7270 | 7376 |
if ((insn & (1 << 11)) == 0) { |
7271 |
gen_op_movl_T2_im(-offset); |
|
7272 |
gen_op_addl_T1_T2(); |
|
7377 |
gen_op_addl_T1_im(-offset); |
|
7273 | 7378 |
} |
7274 | 7379 |
/* write back the new stack pointer */ |
7275 | 7380 |
gen_movl_reg_T1(s, 13); |
... | ... | |
7308 | 7413 |
case 0xe: /* bkpt */ |
7309 | 7414 |
gen_set_condexec(s); |
7310 | 7415 |
gen_op_movl_T0_im((long)s->pc - 2); |
7311 |
gen_op_movl_reg_TN[0][15]();
|
|
7416 |
gen_set_pc_T0();
|
|
7312 | 7417 |
gen_op_bkpt(); |
7313 | 7418 |
s->is_jmp = DISAS_JUMP; |
7314 | 7419 |
break; |
... | ... | |
7363 | 7468 |
/* load/store multiple */ |
7364 | 7469 |
rn = (insn >> 8) & 0x7; |
7365 | 7470 |
gen_movl_T1_reg(s, rn); |
7366 |
gen_op_movl_T2_im(4); |
|
7367 | 7471 |
for (i = 0; i < 8; i++) { |
7368 | 7472 |
if (insn & (1 << i)) { |
7369 | 7473 |
if (insn & (1 << 11)) { |
... | ... | |
7376 | 7480 |
gen_ldst(stl, s); |
7377 | 7481 |
} |
7378 | 7482 |
/* advance to the next address */ |
7379 |
gen_op_addl_T1_T2();
|
|
7483 |
gen_op_addl_T1_im(4);
|
|
7380 | 7484 |
} |
7381 | 7485 |
} |
7382 | 7486 |
/* Base register writeback. */ |
... | ... | |
7395 | 7499 |
gen_set_condexec(s); |
7396 | 7500 |
gen_op_movl_T0_im((long)s->pc | 1); |
7397 | 7501 |
/* Don't set r15. */ |
7398 |
gen_op_movl_reg_TN[0][15]();
|
|
7502 |
gen_set_pc_T0();
|
|
7399 | 7503 |
s->is_jmp = DISAS_SWI; |
7400 | 7504 |
break; |
7401 | 7505 |
} |
... | ... | |
7434 | 7538 |
undef32: |
7435 | 7539 |
gen_set_condexec(s); |
7436 | 7540 |
gen_op_movl_T0_im((long)s->pc - 4); |
7437 |
gen_op_movl_reg_TN[0][15]();
|
|
7541 |
gen_set_pc_T0();
|
|
7438 | 7542 |
gen_op_undef_insn(); |
7439 | 7543 |
s->is_jmp = DISAS_JUMP; |
7440 | 7544 |
return; |
... | ... | |
7442 | 7546 |
undef: |
7443 | 7547 |
gen_set_condexec(s); |
7444 | 7548 |
gen_op_movl_T0_im((long)s->pc - 2); |
7445 |
gen_op_movl_reg_TN[0][15]();
|
|
7549 |
gen_set_pc_T0();
|
|
7446 | 7550 |
gen_op_undef_insn(); |
7447 | 7551 |
s->is_jmp = DISAS_JUMP; |
7448 | 7552 |
} |
... | ... | |
7461 | 7565 |
uint32_t next_page_start; |
7462 | 7566 |
|
7463 | 7567 |
/* generate intermediate code */ |
7568 |
num_temps = 0; |
|
7569 |
memset(temps, 0, sizeof(temps)); |
|
7570 |
|
|
7464 | 7571 |
pc_start = tb->pc; |
7465 | 7572 |
|
7466 | 7573 |
dc->tb = tb; |
... | ... | |
7502 | 7609 |
if (env->breakpoints[j] == dc->pc) { |
7503 | 7610 |
gen_set_condexec(dc); |
7504 | 7611 |
gen_op_movl_T0_im((long)dc->pc); |
7505 |
gen_op_movl_reg_TN[0][15]();
|
|
7612 |
gen_set_pc_T0();
|
|
7506 | 7613 |
gen_op_debug(); |
7507 | 7614 |
dc->is_jmp = DISAS_JUMP; |
7508 | 7615 |
/* Advance PC so that clearing the breakpoint will |
... | ... | |
7537 | 7644 |
} else { |
7538 | 7645 |
disas_arm_insn(env, dc); |
7539 | 7646 |
} |
7647 |
if (num_temps) { |
|
7648 |
fprintf(stderr, "Internal resource leak before %08x\n", dc->pc); |
|
7649 |
num_temps = 0; |
|
7650 |
} |
|
7540 | 7651 |
|
7541 | 7652 |
if (dc->condjmp && !dc->is_jmp) { |
7542 | 7653 |
gen_set_label(dc->condlabel); |
... | ... | |
7572 | 7683 |
} |
7573 | 7684 |
if (dc->condjmp || !dc->is_jmp) { |
7574 | 7685 |
gen_op_movl_T0_im((long)dc->pc); |
7575 |
gen_op_movl_reg_TN[0][15]();
|
|
7686 |
gen_set_pc_T0();
|
|
7576 | 7687 |
dc->condjmp = 0; |
7577 | 7688 |
} |
7578 | 7689 |
gen_set_condexec(dc); |
Also available in: Unified diff