Revision 8984bd2e target-arm/translate.c
b/target-arm/translate.c | ||
---|---|---|
201 | 201 |
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
202 | 202 |
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) |
203 | 203 |
|
204 |
#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
205 |
#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
206 |
#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
207 |
#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) |
|
208 |
#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0]) |
|
209 |
#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0]) |
|
210 |
|
|
204 | 211 |
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
205 | 212 |
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
206 | 213 |
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
... | ... | |
538 | 545 |
} |
539 | 546 |
}; |
540 | 547 |
|
548 |
static inline void gen_arm_shift_reg(TCGv var, int shiftop, |
|
549 |
TCGv shift, int flags) |
|
550 |
{ |
|
551 |
if (flags) { |
|
552 |
switch (shiftop) { |
|
553 |
case 0: gen_helper_shl_cc(var, var, shift); break; |
|
554 |
case 1: gen_helper_shr_cc(var, var, shift); break; |
|
555 |
case 2: gen_helper_sar_cc(var, var, shift); break; |
|
556 |
case 3: gen_helper_ror_cc(var, var, shift); break; |
|
557 |
} |
|
558 |
} else { |
|
559 |
switch (shiftop) { |
|
560 |
case 0: gen_helper_shl(var, var, shift); break; |
|
561 |
case 1: gen_helper_shr(var, var, shift); break; |
|
562 |
case 2: gen_helper_sar(var, var, shift); break; |
|
563 |
case 3: gen_helper_ror(var, var, shift); break; |
|
564 |
} |
|
565 |
} |
|
566 |
dead_tmp(shift); |
|
567 |
} |
|
568 |
|
|
541 | 569 |
#define PAS_OP(pfx) \ |
542 | 570 |
switch (op2) { \ |
543 | 571 |
case 0: gen_pas_helper(glue(pfx,add16)); break; \ |
... | ... | |
746 | 774 |
1, /* mvn */ |
747 | 775 |
}; |
748 | 776 |
|
749 |
static GenOpFunc *gen_shift_T1_T0[4] = { |
|
750 |
gen_op_shll_T1_T0, |
|
751 |
gen_op_shrl_T1_T0, |
|
752 |
gen_op_sarl_T1_T0, |
|
753 |
gen_op_rorl_T1_T0, |
|
754 |
}; |
|
755 |
|
|
756 |
static GenOpFunc *gen_shift_T1_T0_cc[4] = { |
|
757 |
gen_op_shll_T1_T0_cc, |
|
758 |
gen_op_shrl_T1_T0_cc, |
|
759 |
gen_op_sarl_T1_T0_cc, |
|
760 |
gen_op_rorl_T1_T0_cc, |
|
761 |
}; |
|
762 |
|
|
763 | 777 |
/* Set PC and Thumb state from an immediate address. */ |
764 | 778 |
static inline void gen_bx_im(DisasContext *s, uint32_t addr) |
765 | 779 |
{ |
... | ... | |
2249 | 2263 |
instruction is not defined. */ |
2250 | 2264 |
static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn) |
2251 | 2265 |
{ |
2266 |
TCGv tmp; |
|
2252 | 2267 |
uint32_t rd = (insn >> 12) & 0xf; |
2253 | 2268 |
uint32_t cp = (insn >> 8) & 0xf; |
2254 | 2269 |
if (IS_USER(s)) { |
... | ... | |
2258 | 2273 |
if (insn & ARM_CP_RW_BIT) { |
2259 | 2274 |
if (!env->cp[cp].cp_read) |
2260 | 2275 |
return 1; |
2261 |
gen_op_movl_T0_im((uint32_t) s->pc);
|
|
2262 |
gen_set_pc_T0();
|
|
2263 |
gen_op_movl_T0_cp(insn);
|
|
2264 |
gen_movl_reg_T0(s, rd);
|
|
2276 |
gen_set_pc_im(s->pc);
|
|
2277 |
tmp = new_tmp();
|
|
2278 |
gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
|
|
2279 |
store_reg(s, rd, tmp);
|
|
2265 | 2280 |
} else { |
2266 | 2281 |
if (!env->cp[cp].cp_write) |
2267 | 2282 |
return 1; |
2268 |
gen_op_movl_T0_im((uint32_t) s->pc); |
|
2269 |
gen_set_pc_T0(); |
|
2270 |
gen_movl_T0_reg(s, rd); |
|
2271 |
gen_op_movl_cp_T0(insn); |
|
2283 |
gen_set_pc_im(s->pc); |
|
2284 |
tmp = load_reg(s, rd); |
|
2285 |
gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp); |
|
2272 | 2286 |
} |
2273 | 2287 |
return 0; |
2274 | 2288 |
} |
... | ... | |
2298 | 2312 |
static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn) |
2299 | 2313 |
{ |
2300 | 2314 |
uint32_t rd; |
2315 |
TCGv tmp; |
|
2301 | 2316 |
|
2302 | 2317 |
/* M profile cores use memory mapped registers instead of cp15. */ |
2303 | 2318 |
if (arm_feature(env, ARM_FEATURE_M)) |
... | ... | |
2321 | 2336 |
if ((insn & 0x0fff0fff) == 0x0e070f90 |
2322 | 2337 |
|| (insn & 0x0fff0fff) == 0x0e070f58) { |
2323 | 2338 |
/* Wait for interrupt. */ |
2324 |
gen_op_movl_T0_im((long)s->pc); |
|
2325 |
gen_set_pc_T0(); |
|
2339 |
gen_set_pc_im(s->pc); |
|
2326 | 2340 |
s->is_jmp = DISAS_WFI; |
2327 | 2341 |
return 0; |
2328 | 2342 |
} |
2329 | 2343 |
rd = (insn >> 12) & 0xf; |
2330 | 2344 |
if (insn & ARM_CP_RW_BIT) { |
2331 |
gen_op_movl_T0_cp15(insn); |
|
2345 |
tmp = new_tmp(); |
|
2346 |
gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn)); |
|
2332 | 2347 |
/* If the destination register is r15 then sets condition codes. */ |
2333 | 2348 |
if (rd != 15) |
2334 |
gen_movl_reg_T0(s, rd); |
|
2349 |
store_reg(s, rd, tmp); |
|
2350 |
else |
|
2351 |
dead_tmp(tmp); |
|
2335 | 2352 |
} else { |
2336 |
gen_movl_T0_reg(s, rd); |
|
2337 |
gen_op_movl_cp15_T0(insn); |
|
2353 |
tmp = load_reg(s, rd); |
|
2354 |
gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp); |
|
2355 |
dead_tmp(tmp); |
|
2338 | 2356 |
/* Normally we would always end the TB here, but Linux |
2339 | 2357 |
* arch/arm/mach-pxa/sleep.S expects two instructions following |
2340 | 2358 |
* an MMU enable to execute from cache. Imitate this behaviour. */ |
... | ... | |
3052 | 3070 |
tb = s->tb; |
3053 | 3071 |
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
3054 | 3072 |
tcg_gen_goto_tb(n); |
3055 |
gen_op_movl_T0_im(dest); |
|
3056 |
gen_set_pc_T0(); |
|
3073 |
gen_set_pc_im(dest); |
|
3057 | 3074 |
tcg_gen_exit_tb((long)tb + n); |
3058 | 3075 |
} else { |
3059 |
gen_op_movl_T0_im(dest); |
|
3060 |
gen_set_pc_T0(); |
|
3076 |
gen_set_pc_im(dest); |
|
3061 | 3077 |
tcg_gen_exit_tb(0); |
3062 | 3078 |
} |
3063 | 3079 |
} |
... | ... | |
3173 | 3189 |
{ |
3174 | 3190 |
switch (val) { |
3175 | 3191 |
case 3: /* wfi */ |
3176 |
gen_op_movl_T0_im((long)s->pc); |
|
3177 |
gen_set_pc_T0(); |
|
3192 |
gen_set_pc_im(s->pc); |
|
3178 | 3193 |
s->is_jmp = DISAS_WFI; |
3179 | 3194 |
break; |
3180 | 3195 |
case 2: /* wfe */ |
... | ... | |
5770 | 5785 |
gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc); |
5771 | 5786 |
} else { |
5772 | 5787 |
rs = (insn >> 8) & 0xf; |
5773 |
gen_movl_T0_reg(s, rs); |
|
5774 |
if (logic_cc) { |
|
5775 |
gen_shift_T1_T0_cc[shiftop](); |
|
5776 |
} else { |
|
5777 |
gen_shift_T1_T0[shiftop](); |
|
5778 |
} |
|
5788 |
tmp = load_reg(s, rs); |
|
5789 |
gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc); |
|
5779 | 5790 |
} |
5780 | 5791 |
} |
5781 | 5792 |
if (op1 != 0x0f && op1 != 0x0d) { |
... | ... | |
5977 | 5988 |
/* SWP instruction */ |
5978 | 5989 |
rm = (insn) & 0xf; |
5979 | 5990 |
|
5980 |
gen_movl_T0_reg(s, rm); |
|
5981 |
gen_movl_T1_reg(s, rn); |
|
5991 |
/* ??? This is not really atomic. However we know |
|
5992 |
we never have multiple CPUs running in parallel, |
|
5993 |
so it is good enough. */ |
|
5994 |
addr = load_reg(s, rn); |
|
5995 |
tmp = load_reg(s, rm); |
|
5982 | 5996 |
if (insn & (1 << 22)) { |
5983 |
gen_ldst(swpb, s); |
|
5997 |
tmp2 = gen_ld8u(addr, IS_USER(s)); |
|
5998 |
gen_st8(tmp, addr, IS_USER(s)); |
|
5984 | 5999 |
} else { |
5985 |
gen_ldst(swpl, s); |
|
6000 |
tmp2 = gen_ld32(addr, IS_USER(s)); |
|
6001 |
gen_st32(tmp, addr, IS_USER(s)); |
|
5986 | 6002 |
} |
5987 |
gen_movl_reg_T0(s, rd); |
|
6003 |
dead_tmp(addr); |
|
6004 |
store_reg(s, rd, tmp2); |
|
5988 | 6005 |
} |
5989 | 6006 |
} |
5990 | 6007 |
} else { |
... | ... | |
6903 | 6920 |
goto illegal_op; |
6904 | 6921 |
switch (op) { |
6905 | 6922 |
case 0: /* Register controlled shift. */ |
6906 |
gen_movl_T0_reg(s, rm);
|
|
6907 |
gen_movl_T1_reg(s, rn);
|
|
6923 |
tmp = load_reg(s, rn);
|
|
6924 |
tmp2 = load_reg(s, rm);
|
|
6908 | 6925 |
if ((insn & 0x70) != 0) |
6909 | 6926 |
goto illegal_op; |
6910 | 6927 |
op = (insn >> 21) & 3; |
6911 |
if (insn & (1 << 20)) { |
|
6912 |
gen_shift_T1_T0_cc[op](); |
|
6913 |
gen_op_logic_T1_cc(); |
|
6914 |
} else { |
|
6915 |
gen_shift_T1_T0[op](); |
|
6916 |
} |
|
6917 |
gen_movl_reg_T1(s, rd); |
|
6928 |
logic_cc = (insn & (1 << 20)) != 0; |
|
6929 |
gen_arm_shift_reg(tmp, op, tmp2, logic_cc); |
|
6930 |
if (logic_cc) |
|
6931 |
gen_logic_CC(tmp); |
|
6932 |
store_reg(s, rd, tmp); |
|
6918 | 6933 |
break; |
6919 | 6934 |
case 1: /* Sign/zero extend. */ |
6920 | 6935 |
tmp = load_reg(s, rm); |
... | ... | |
7208 | 7223 |
switch (op) { |
7209 | 7224 |
case 0: /* msr cpsr. */ |
7210 | 7225 |
if (IS_M(env)) { |
7211 |
gen_op_v7m_msr_T0(insn & 0xff); |
|
7212 |
gen_movl_reg_T0(s, rn); |
|
7226 |
tmp = load_reg(s, rn); |
|
7227 |
addr = tcg_const_i32(insn & 0xff); |
|
7228 |
gen_helper_v7m_msr(cpu_env, addr, tmp); |
|
7213 | 7229 |
gen_lookup_tb(s); |
7214 | 7230 |
break; |
7215 | 7231 |
} |
... | ... | |
7276 | 7292 |
/* Unpredictable in user mode. */ |
7277 | 7293 |
goto illegal_op; |
7278 | 7294 |
case 6: /* mrs cpsr. */ |
7295 |
tmp = new_tmp(); |
|
7279 | 7296 |
if (IS_M(env)) { |
7280 |
gen_op_v7m_mrs_T0(insn & 0xff); |
|
7297 |
addr = tcg_const_i32(insn & 0xff); |
|
7298 |
gen_helper_v7m_mrs(tmp, cpu_env, addr); |
|
7281 | 7299 |
} else { |
7282 |
gen_helper_cpsr_read(cpu_T[0]);
|
|
7300 |
gen_helper_cpsr_read(tmp);
|
|
7283 | 7301 |
} |
7284 |
gen_movl_reg_T0(s, rd);
|
|
7302 |
store_reg(s, rd, tmp);
|
|
7285 | 7303 |
break; |
7286 | 7304 |
case 7: /* mrs spsr. */ |
7287 | 7305 |
/* Not accessible in user mode. */ |
... | ... | |
7753 | 7771 |
break; |
7754 | 7772 |
case 0x2: /* lsl */ |
7755 | 7773 |
if (s->condexec_mask) { |
7756 |
gen_op_shll_T1_T0();
|
|
7774 |
gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7757 | 7775 |
} else { |
7758 |
gen_op_shll_T1_T0_cc();
|
|
7776 |
gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7759 | 7777 |
gen_op_logic_T1_cc(); |
7760 | 7778 |
} |
7761 | 7779 |
break; |
7762 | 7780 |
case 0x3: /* lsr */ |
7763 | 7781 |
if (s->condexec_mask) { |
7764 |
gen_op_shrl_T1_T0();
|
|
7782 |
gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7765 | 7783 |
} else { |
7766 |
gen_op_shrl_T1_T0_cc();
|
|
7784 |
gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7767 | 7785 |
gen_op_logic_T1_cc(); |
7768 | 7786 |
} |
7769 | 7787 |
break; |
7770 | 7788 |
case 0x4: /* asr */ |
7771 | 7789 |
if (s->condexec_mask) { |
7772 |
gen_op_sarl_T1_T0();
|
|
7790 |
gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7773 | 7791 |
} else { |
7774 |
gen_op_sarl_T1_T0_cc();
|
|
7792 |
gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7775 | 7793 |
gen_op_logic_T1_cc(); |
7776 | 7794 |
} |
7777 | 7795 |
break; |
... | ... | |
7789 | 7807 |
break; |
7790 | 7808 |
case 0x7: /* ror */ |
7791 | 7809 |
if (s->condexec_mask) { |
7792 |
gen_op_rorl_T1_T0();
|
|
7810 |
gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7793 | 7811 |
} else { |
7794 |
gen_op_rorl_T1_T0_cc();
|
|
7812 |
gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
|
|
7795 | 7813 |
gen_op_logic_T1_cc(); |
7796 | 7814 |
} |
7797 | 7815 |
break; |
... | ... | |
8118 | 8136 |
if (IS_USER(s)) |
8119 | 8137 |
break; |
8120 | 8138 |
if (IS_M(env)) { |
8121 |
val = (insn & (1 << 4)) != 0; |
|
8122 |
gen_op_movl_T0_im(val); |
|
8139 |
tmp = tcg_const_i32((insn & (1 << 4)) != 0); |
|
8123 | 8140 |
/* PRIMASK */ |
8124 |
if (insn & 1) |
|
8125 |
gen_op_v7m_msr_T0(16); |
|
8141 |
if (insn & 1) { |
|
8142 |
addr = tcg_const_i32(16); |
|
8143 |
gen_helper_v7m_msr(cpu_env, addr, tmp); |
|
8144 |
} |
|
8126 | 8145 |
/* FAULTMASK */ |
8127 |
if (insn & 2) |
|
8128 |
gen_op_v7m_msr_T0(17); |
|
8129 |
|
|
8146 |
if (insn & 2) { |
|
8147 |
addr = tcg_const_i32(17); |
|
8148 |
gen_helper_v7m_msr(cpu_env, addr, tmp); |
|
8149 |
} |
|
8130 | 8150 |
gen_lookup_tb(s); |
8131 | 8151 |
} else { |
8132 | 8152 |
if (insn & (1 << 4)) |
Also available in: Unified diff