Revision 8f8e3aa4 target-arm/translate.c
b/target-arm/translate.c | ||
---|---|---|
78 | 78 |
|
79 | 79 |
static TCGv cpu_env; |
80 | 80 |
/* FIXME: These should be removed. */ |
81 |
static TCGv cpu_T[3];
|
|
81 |
static TCGv cpu_T[2];
|
|
82 | 82 |
static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d; |
83 | 83 |
|
84 | 84 |
/* initialize TCG globals. */ |
... | ... | |
88 | 88 |
|
89 | 89 |
cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0"); |
90 | 90 |
cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1"); |
91 |
cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2"); |
|
92 | 91 |
} |
93 | 92 |
|
94 | 93 |
/* The code generator doesn't like lots of temporaries, so maintain our own |
... | ... | |
188 | 187 |
|
189 | 188 |
/* Basic operations. */ |
190 | 189 |
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1]) |
191 |
#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2]) |
|
192 | 190 |
#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0]) |
193 |
#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2]) |
|
194 |
#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0]) |
|
195 | 191 |
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im) |
196 | 192 |
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im) |
197 |
#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im) |
|
198 | 193 |
|
199 | 194 |
#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im) |
200 | 195 |
#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
... | ... | |
310 | 305 |
/* Bitfield insertion. Insert val into base. Clobbers base and val. */ |
311 | 306 |
static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask) |
312 | 307 |
{ |
313 |
tcg_gen_shli_i32(val, val, shift); |
|
314 | 308 |
tcg_gen_andi_i32(val, val, mask); |
315 |
tcg_gen_andi_i32(base, base, ~mask); |
|
309 |
tcg_gen_shli_i32(val, val, shift); |
|
310 |
tcg_gen_andi_i32(base, base, ~(mask << shift)); |
|
316 | 311 |
tcg_gen_or_i32(dest, base, val); |
317 | 312 |
} |
318 | 313 |
|
... | ... | |
460 | 455 |
|
461 | 456 |
/* T0 &= ~T1. Clobbers T1. */ |
462 | 457 |
/* FIXME: Implement bic natively. */ |
458 |
static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1) |
|
459 |
{ |
|
460 |
TCGv tmp = new_tmp(); |
|
461 |
tcg_gen_not_i32(tmp, t1); |
|
462 |
tcg_gen_and_i32(dest, t0, tmp); |
|
463 |
dead_tmp(tmp); |
|
464 |
} |
|
463 | 465 |
static inline void gen_op_bicl_T0_T1(void) |
464 | 466 |
{ |
465 | 467 |
gen_op_notl_T1(); |
... | ... | |
1167 | 1169 |
#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n)) |
1168 | 1170 |
#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n)) |
1169 | 1171 |
|
1172 |
static TCGv neon_load_reg(int reg, int pass) |
|
1173 |
{ |
|
1174 |
TCGv tmp = new_tmp(); |
|
1175 |
tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass)); |
|
1176 |
return tmp; |
|
1177 |
} |
|
1178 |
|
|
1179 |
static void neon_store_reg(int reg, int pass, TCGv var) |
|
1180 |
{ |
|
1181 |
tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); |
|
1182 |
dead_tmp(var); |
|
1183 |
} |
|
1184 |
|
|
1170 | 1185 |
#define tcg_gen_ld_f32 tcg_gen_ld_i32 |
1171 | 1186 |
#define tcg_gen_ld_f64 tcg_gen_ld_i64 |
1172 | 1187 |
#define tcg_gen_st_f32 tcg_gen_st_i32 |
... | ... | |
2500 | 2515 |
/* VMOV */ |
2501 | 2516 |
switch (size) { |
2502 | 2517 |
case 0: |
2503 |
NEON_GET_REG(T2, rn, pass); |
|
2504 |
gen_op_movl_T1_im(0xff); |
|
2505 |
gen_op_andl_T0_T1(); |
|
2506 |
gen_op_neon_insert_elt(offset, ~(0xff << offset)); |
|
2507 |
NEON_SET_REG(T2, rn, pass); |
|
2518 |
tmp = neon_load_reg(rn, pass); |
|
2519 |
gen_bfi(tmp, tmp, cpu_T[0], offset, 0xff); |
|
2520 |
neon_store_reg(rn, pass, tmp); |
|
2508 | 2521 |
break; |
2509 | 2522 |
case 1: |
2510 |
NEON_GET_REG(T2, rn, pass); |
|
2511 |
gen_op_movl_T1_im(0xffff); |
|
2512 |
gen_op_andl_T0_T1(); |
|
2513 |
bank_mask = offset ? 0xffff : 0xffff0000; |
|
2514 |
gen_op_neon_insert_elt(offset, bank_mask); |
|
2515 |
NEON_SET_REG(T2, rn, pass); |
|
2523 |
tmp = neon_load_reg(rn, pass); |
|
2524 |
gen_bfi(tmp, tmp, cpu_T[0], offset, 0xffff); |
|
2525 |
neon_store_reg(rn, pass, tmp); |
|
2516 | 2526 |
break; |
2517 | 2527 |
case 2: |
2518 | 2528 |
NEON_SET_REG(T0, rn, pass); |
... | ... | |
3480 | 3490 |
int pass; |
3481 | 3491 |
int load; |
3482 | 3492 |
int shift; |
3483 |
uint32_t mask; |
|
3484 | 3493 |
int n; |
3485 | 3494 |
TCGv tmp; |
3495 |
TCGv tmp2; |
|
3486 | 3496 |
|
3487 | 3497 |
if (!vfp_enabled(env)) |
3488 | 3498 |
return 1; |
... | ... | |
3525 | 3535 |
} else if (size == 1) { |
3526 | 3536 |
if (load) { |
3527 | 3537 |
tmp = gen_ld16u(cpu_T[1], IS_USER(s)); |
3528 |
tcg_gen_mov_i32(cpu_T[0], tmp); |
|
3529 |
dead_tmp(tmp); |
|
3530 | 3538 |
gen_op_addl_T1_im(stride); |
3531 |
gen_op_movl_T2_T0(); |
|
3532 |
tmp = gen_ld16u(cpu_T[1], IS_USER(s)); |
|
3533 |
tcg_gen_mov_i32(cpu_T[0], tmp); |
|
3534 |
dead_tmp(tmp); |
|
3539 |
tmp2 = gen_ld16u(cpu_T[1], IS_USER(s)); |
|
3535 | 3540 |
gen_op_addl_T1_im(stride); |
3536 |
gen_op_neon_insert_elt(16, 0xffff); |
|
3537 |
NEON_SET_REG(T2, rd, pass); |
|
3541 |
gen_bfi(tmp, tmp, tmp2, 16, 0xffff); |
|
3542 |
dead_tmp(tmp2); |
|
3543 |
neon_store_reg(rd, pass, tmp); |
|
3538 | 3544 |
} else { |
3539 |
NEON_GET_REG(T2, rd, pass); |
|
3540 |
gen_op_movl_T0_T2(); |
|
3541 |
tmp = new_tmp(); |
|
3542 |
tcg_gen_mov_i32(tmp, cpu_T[0]); |
|
3545 |
tmp = neon_load_reg(rd, pass); |
|
3546 |
tmp2 = new_tmp(); |
|
3547 |
tcg_gen_shri_i32(tmp2, tmp, 16); |
|
3543 | 3548 |
gen_st16(tmp, cpu_T[1], IS_USER(s)); |
3544 | 3549 |
gen_op_addl_T1_im(stride); |
3545 |
gen_op_neon_extract_elt(16, 0xffff0000); |
|
3546 |
tmp = new_tmp(); |
|
3547 |
tcg_gen_mov_i32(tmp, cpu_T[0]); |
|
3548 |
gen_st16(tmp, cpu_T[1], IS_USER(s)); |
|
3550 |
gen_st16(tmp2, cpu_T[1], IS_USER(s)); |
|
3549 | 3551 |
gen_op_addl_T1_im(stride); |
3550 | 3552 |
} |
3551 | 3553 |
} else /* size == 0 */ { |
3552 | 3554 |
if (load) { |
3553 |
mask = 0xff; |
|
3554 | 3555 |
for (n = 0; n < 4; n++) { |
3555 | 3556 |
tmp = gen_ld8u(cpu_T[1], IS_USER(s)); |
3556 |
tcg_gen_mov_i32(cpu_T[0], tmp); |
|
3557 |
dead_tmp(tmp); |
|
3558 | 3557 |
gen_op_addl_T1_im(stride); |
3559 | 3558 |
if (n == 0) { |
3560 |
gen_op_movl_T2_T0();
|
|
3559 |
tmp2 = tmp;
|
|
3561 | 3560 |
} else { |
3562 |
gen_op_neon_insert_elt(n * 8, ~mask); |
|
3561 |
gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff); |
|
3562 |
dead_tmp(tmp); |
|
3563 | 3563 |
} |
3564 |
mask <<= 8; |
|
3565 | 3564 |
} |
3566 |
NEON_SET_REG(T2, rd, pass);
|
|
3565 |
neon_store_reg(rd, pass, tmp2);
|
|
3567 | 3566 |
} else { |
3568 |
NEON_GET_REG(T2, rd, pass); |
|
3569 |
mask = 0xff; |
|
3567 |
tmp2 = neon_load_reg(rd, pass); |
|
3570 | 3568 |
for (n = 0; n < 4; n++) { |
3569 |
tmp = new_tmp(); |
|
3571 | 3570 |
if (n == 0) { |
3572 |
gen_op_movl_T0_T2();
|
|
3571 |
tcg_gen_mov_i32(tmp, tmp2);
|
|
3573 | 3572 |
} else { |
3574 |
gen_op_neon_extract_elt(n * 8, mask);
|
|
3573 |
tcg_gen_shri_i32(tmp, tmp2, n * 8);
|
|
3575 | 3574 |
} |
3576 |
tmp = new_tmp(); |
|
3577 |
tcg_gen_mov_i32(tmp, cpu_T[0]); |
|
3578 | 3575 |
gen_st8(tmp, cpu_T[1], IS_USER(s)); |
3579 | 3576 |
gen_op_addl_T1_im(stride); |
3580 |
mask <<= 8; |
|
3581 | 3577 |
} |
3578 |
dead_tmp(tmp2); |
|
3582 | 3579 |
} |
3583 | 3580 |
} |
3584 | 3581 |
} |
... | ... | |
3629 | 3626 |
switch (size) { |
3630 | 3627 |
case 0: |
3631 | 3628 |
shift = ((insn >> 5) & 3) * 8; |
3632 |
mask = 0xff << shift; |
|
3633 | 3629 |
stride = 1; |
3634 | 3630 |
break; |
3635 | 3631 |
case 1: |
3636 | 3632 |
shift = ((insn >> 6) & 1) * 16; |
3637 |
mask = shift ? 0xffff0000 : 0xffff; |
|
3638 | 3633 |
stride = (insn & (1 << 5)) ? 2 : 1; |
3639 | 3634 |
break; |
3640 | 3635 |
case 2: |
3641 | 3636 |
shift = 0; |
3642 |
mask = 0xffffffff; |
|
3643 | 3637 |
stride = (insn & (1 << 6)) ? 2 : 1; |
3644 | 3638 |
break; |
3645 | 3639 |
default: |
... | ... | |
3649 | 3643 |
gen_movl_T1_reg(s, rn); |
3650 | 3644 |
for (reg = 0; reg < nregs; reg++) { |
3651 | 3645 |
if (load) { |
3652 |
if (size != 2) { |
|
3653 |
NEON_GET_REG(T2, rd, pass); |
|
3654 |
} |
|
3655 | 3646 |
switch (size) { |
3656 | 3647 |
case 0: |
3657 | 3648 |
tmp = gen_ld8u(cpu_T[1], IS_USER(s)); |
... | ... | |
3663 | 3654 |
tmp = gen_ld32(cpu_T[1], IS_USER(s)); |
3664 | 3655 |
break; |
3665 | 3656 |
} |
3666 |
tcg_gen_mov_i32(cpu_T[0], tmp); |
|
3667 |
dead_tmp(tmp); |
|
3668 | 3657 |
if (size != 2) { |
3669 |
gen_op_neon_insert_elt(shift, ~mask); |
|
3670 |
NEON_SET_REG(T0, rd, pass); |
|
3671 |
} else { |
|
3672 |
NEON_SET_REG(T0, rd, pass); |
|
3658 |
tmp2 = neon_load_reg(rd, pass); |
|
3659 |
gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff); |
|
3660 |
dead_tmp(tmp2); |
|
3673 | 3661 |
} |
3662 |
neon_store_reg(rd, pass, tmp); |
|
3674 | 3663 |
} else { /* Store */ |
3675 |
if (size == 2) { |
|
3676 |
NEON_GET_REG(T0, rd, pass); |
|
3677 |
} else { |
|
3678 |
NEON_GET_REG(T2, rd, pass); |
|
3679 |
gen_op_neon_extract_elt(shift, mask); |
|
3680 |
} |
|
3681 |
tmp = new_tmp(); |
|
3682 |
tcg_gen_mov_i32(tmp, cpu_T[0]); |
|
3664 |
tmp = neon_load_reg(rd, pass); |
|
3665 |
if (shift) |
|
3666 |
tcg_gen_shri_i32(tmp, tmp, shift); |
|
3683 | 3667 |
switch (size) { |
3684 | 3668 |
case 0: |
3685 | 3669 |
gen_st8(tmp, cpu_T[1], IS_USER(s)); |
... | ... | |
3715 | 3699 |
return 0; |
3716 | 3700 |
} |
3717 | 3701 |
|
3702 |
/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ |
|
3703 |
static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c) |
|
3704 |
{ |
|
3705 |
tcg_gen_and_i32(t, t, c); |
|
3706 |
tcg_gen_bic_i32(f, f, c); |
|
3707 |
tcg_gen_or_i32(dest, t, f); |
|
3708 |
} |
|
3709 |
|
|
3718 | 3710 |
/* Translate a NEON data processing instruction. Return nonzero if the |
3719 | 3711 |
instruction is invalid. |
3720 | 3712 |
In general we process vectors in 32-bit chunks. This means we can reuse |
... | ... | |
3735 | 3727 |
int u; |
3736 | 3728 |
int n; |
3737 | 3729 |
uint32_t imm; |
3730 |
TCGv tmp; |
|
3731 |
TCGv tmp2; |
|
3732 |
TCGv tmp3; |
|
3738 | 3733 |
|
3739 | 3734 |
if (!vfp_enabled(env)) |
3740 | 3735 |
return 1; |
... | ... | |
3875 | 3870 |
gen_op_xorl_T0_T1(); |
3876 | 3871 |
break; |
3877 | 3872 |
case 5: /* VBSL */ |
3878 |
NEON_GET_REG(T2, rd, pass); |
|
3879 |
gen_op_neon_bsl(); |
|
3873 |
tmp = neon_load_reg(rd, pass); |
|
3874 |
gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp); |
|
3875 |
dead_tmp(tmp); |
|
3880 | 3876 |
break; |
3881 | 3877 |
case 6: /* VBIT */ |
3882 |
NEON_GET_REG(T2, rd, pass); |
|
3883 |
gen_op_neon_bit(); |
|
3878 |
tmp = neon_load_reg(rd, pass); |
|
3879 |
gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]); |
|
3880 |
dead_tmp(tmp); |
|
3884 | 3881 |
break; |
3885 | 3882 |
case 7: /* VBIF */ |
3886 |
NEON_GET_REG(T2, rd, pass); |
|
3887 |
gen_op_neon_bif(); |
|
3883 |
tmp = neon_load_reg(rd, pass); |
|
3884 |
gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]); |
|
3885 |
dead_tmp(tmp); |
|
3888 | 3886 |
break; |
3889 | 3887 |
} |
3890 | 3888 |
break; |
... | ... | |
4190 | 4188 |
element size in bits. */ |
4191 | 4189 |
if (op <= 4) |
4192 | 4190 |
shift = shift - (1 << (size + 3)); |
4193 |
else |
|
4194 |
shift++; |
|
4195 | 4191 |
if (size == 3) { |
4196 | 4192 |
count = q + 1; |
4197 | 4193 |
} else { |
... | ... | |
4276 | 4272 |
default: |
4277 | 4273 |
abort(); |
4278 | 4274 |
} |
4279 |
NEON_GET_REG(T1, rd, pass); |
|
4280 |
gen_op_movl_T2_im(imm); |
|
4281 |
gen_op_neon_bsl(); |
|
4275 |
tmp = neon_load_reg(rd, pass); |
|
4276 |
tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm); |
|
4277 |
tcg_gen_andi_i32(tmp, tmp, ~imm); |
|
4278 |
tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp); |
|
4282 | 4279 |
} |
4283 | 4280 |
if (size == 3) { |
4284 | 4281 |
NEON_SET_REG(T0, rd, pass * 2); |
... | ... | |
4519 | 4516 |
/* Avoid overlapping operands. Wide source operands are |
4520 | 4517 |
always aligned so will never overlap with wide |
4521 | 4518 |
destinations in problematic ways. */ |
4522 |
if (rd == rm) { |
|
4523 |
NEON_GET_REG(T2, rm, 1); |
|
4524 |
} else if (rd == rn) { |
|
4525 |
NEON_GET_REG(T2, rn, 1); |
|
4519 |
if (rd == rm && !src2_wide) { |
|
4520 |
NEON_GET_REG(T0, rm, 1); |
|
4521 |
gen_neon_movl_scratch_T0(2); |
|
4522 |
} else if (rd == rn && !src1_wide) { |
|
4523 |
NEON_GET_REG(T0, rn, 1); |
|
4524 |
gen_neon_movl_scratch_T0(2); |
|
4526 | 4525 |
} |
4527 | 4526 |
for (pass = 0; pass < 2; pass++) { |
4528 | 4527 |
/* Load the second operand into env->vfp.scratch. |
4529 | 4528 |
Also widen narrow operands. */ |
4530 |
if (pass == 1 && rd == rm) { |
|
4531 |
if (prewiden) { |
|
4532 |
gen_op_movl_T0_T2(); |
|
4533 |
} else { |
|
4534 |
gen_op_movl_T1_T2(); |
|
4535 |
} |
|
4529 |
if (src2_wide) { |
|
4530 |
NEON_GET_REG(T0, rm, pass * 2); |
|
4531 |
NEON_GET_REG(T1, rm, pass * 2 + 1); |
|
4536 | 4532 |
} else { |
4537 |
if (src2_wide) { |
|
4538 |
NEON_GET_REG(T0, rm, pass * 2); |
|
4539 |
NEON_GET_REG(T1, rm, pass * 2 + 1); |
|
4533 |
if (pass == 1 && rd == rm) { |
|
4534 |
if (prewiden) { |
|
4535 |
gen_neon_movl_T0_scratch(2); |
|
4536 |
} else { |
|
4537 |
gen_neon_movl_T1_scratch(2); |
|
4538 |
} |
|
4540 | 4539 |
} else { |
4541 | 4540 |
if (prewiden) { |
4542 | 4541 |
NEON_GET_REG(T0, rm, pass); |
... | ... | |
4554 | 4553 |
} |
4555 | 4554 |
|
4556 | 4555 |
/* Load the first operand. */ |
4557 |
if (pass == 1 && rd == rn) { |
|
4558 |
gen_op_movl_T0_T2(); |
|
4556 |
if (src1_wide) { |
|
4557 |
NEON_GET_REG(T0, rn, pass * 2); |
|
4558 |
NEON_GET_REG(T1, rn, pass * 2 + 1); |
|
4559 | 4559 |
} else { |
4560 |
if (src1_wide) { |
|
4561 |
NEON_GET_REG(T0, rn, pass * 2); |
|
4562 |
NEON_GET_REG(T1, rn, pass * 2 + 1); |
|
4560 |
if (pass == 1 && rd == rn) { |
|
4561 |
gen_neon_movl_T0_scratch(2); |
|
4563 | 4562 |
} else { |
4564 | 4563 |
NEON_GET_REG(T0, rn, pass); |
4565 | 4564 |
} |
... | ... | |
4696 | 4695 |
case 12: /* VQDMULH scalar */ |
4697 | 4696 |
case 13: /* VQRDMULH scalar */ |
4698 | 4697 |
gen_neon_get_scalar(size, rm); |
4699 |
gen_op_movl_T2_T0();
|
|
4698 |
gen_neon_movl_scratch_T0(0);
|
|
4700 | 4699 |
for (pass = 0; pass < (u ? 4 : 2); pass++) { |
4701 | 4700 |
if (pass != 0) |
4702 |
gen_op_movl_T0_T2();
|
|
4701 |
gen_neon_movl_T0_scratch(0);
|
|
4703 | 4702 |
NEON_GET_REG(T1, rn, pass); |
4704 | 4703 |
if (op == 12) { |
4705 | 4704 |
if (size == 1) { |
... | ... | |
4764 | 4763 |
gen_neon_movl_scratch_T0(2); |
4765 | 4764 |
} |
4766 | 4765 |
gen_neon_get_scalar(size, rm); |
4767 |
gen_op_movl_T2_T0();
|
|
4766 |
gen_neon_movl_scratch_T0(3);
|
|
4768 | 4767 |
for (pass = 0; pass < 2; pass++) { |
4769 | 4768 |
if (pass != 0) { |
4770 |
gen_op_movl_T0_T2();
|
|
4769 |
gen_neon_movl_T0_scratch(3);
|
|
4771 | 4770 |
} |
4772 | 4771 |
if (pass != 0 && rd == rn) { |
4773 | 4772 |
gen_neon_movl_T1_scratch(2); |
... | ... | |
5025 | 5024 |
if (q) |
5026 | 5025 |
return 1; |
5027 | 5026 |
if (rm == rd) { |
5028 |
NEON_GET_REG(T2, rm, 1); |
|
5027 |
NEON_GET_REG(T0, rm, 1); |
|
5028 |
gen_neon_movl_scratch_T0(0); |
|
5029 | 5029 |
} |
5030 | 5030 |
for (pass = 0; pass < 2; pass++) { |
5031 | 5031 |
if (pass == 1 && rm == rd) { |
5032 |
gen_op_movl_T0_T2();
|
|
5032 |
gen_neon_movl_T0_scratch(0);
|
|
5033 | 5033 |
} else { |
5034 | 5034 |
NEON_GET_REG(T0, rm, pass); |
5035 | 5035 |
} |
... | ... | |
5253 | 5253 |
} else if ((insn & (1 << 10)) == 0) { |
5254 | 5254 |
/* VTBL, VTBX. */ |
5255 | 5255 |
n = (insn >> 5) & 0x18; |
5256 |
NEON_GET_REG(T1, rm, 0); |
|
5257 | 5256 |
if (insn & (1 << 6)) { |
5258 |
NEON_GET_REG(T0, rd, 0);
|
|
5257 |
tmp = neon_load_reg(rd, 0);
|
|
5259 | 5258 |
} else { |
5260 |
gen_op_movl_T0_im(0); |
|
5259 |
tmp = new_tmp(); |
|
5260 |
tcg_gen_movi_i32(tmp, 0); |
|
5261 | 5261 |
} |
5262 |
gen_op_neon_tbl(rn, n);
|
|
5263 |
gen_op_movl_T2_T0();
|
|
5264 |
NEON_GET_REG(T1, rm, 1);
|
|
5262 |
tmp2 = neon_load_reg(rm, 0);
|
|
5263 |
gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
|
|
5264 |
tcg_const_i32(n));
|
|
5265 | 5265 |
if (insn & (1 << 6)) { |
5266 |
NEON_GET_REG(T0, rd, 0);
|
|
5266 |
tmp = neon_load_reg(rd, 1);
|
|
5267 | 5267 |
} else { |
5268 |
gen_op_movl_T0_im(0); |
|
5268 |
tmp = new_tmp(); |
|
5269 |
tcg_gen_movi_i32(tmp, 0); |
|
5269 | 5270 |
} |
5270 |
gen_op_neon_tbl(rn, n); |
|
5271 |
NEON_SET_REG(T2, rd, 0); |
|
5272 |
NEON_SET_REG(T0, rd, 1); |
|
5271 |
tmp3 = neon_load_reg(rm, 1); |
|
5272 |
gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn), |
|
5273 |
tcg_const_i32(n)); |
|
5274 |
neon_store_reg(rd, 0, tmp2); |
|
5275 |
neon_store_reg(rd, 1, tmp2); |
|
5273 | 5276 |
} else if ((insn & 0x380) == 0) { |
5274 | 5277 |
/* VDUP */ |
5275 | 5278 |
if (insn & (1 << 19)) { |
... | ... | |
5430 | 5433 |
switch ((insn >> 4) & 0xf) { |
5431 | 5434 |
case 1: /* clrex */ |
5432 | 5435 |
ARCH(6K); |
5433 |
gen_op_clrex();
|
|
5436 |
gen_helper_clrex(cpu_env);
|
|
5434 | 5437 |
return; |
5435 | 5438 |
case 4: /* dsb */ |
5436 | 5439 |
case 5: /* dmb */ |
... | ... | |
5977 | 5980 |
/* load/store exclusive */ |
5978 | 5981 |
gen_movl_T1_reg(s, rn); |
5979 | 5982 |
if (insn & (1 << 20)) { |
5980 |
gen_ldst(ldlex, s); |
|
5983 |
gen_helper_mark_exclusive(cpu_env, cpu_T[1]); |
|
5984 |
tmp = gen_ld32(addr, IS_USER(s)); |
|
5985 |
store_reg(s, rd, tmp); |
|
5981 | 5986 |
} else { |
5987 |
int label = gen_new_label(); |
|
5982 | 5988 |
rm = insn & 0xf; |
5983 |
gen_movl_T0_reg(s, rm); |
|
5984 |
gen_ldst(stlex, s); |
|
5989 |
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr); |
|
5990 |
tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], |
|
5991 |
tcg_const_i32(0), label); |
|
5992 |
tmp = load_reg(s,rm); |
|
5993 |
gen_st32(tmp, cpu_T[1], IS_USER(s)); |
|
5994 |
gen_movl_reg_T0(s, rd); |
|
5985 | 5995 |
} |
5986 |
gen_movl_reg_T0(s, rd); |
|
5987 | 5996 |
} else { |
5988 | 5997 |
/* SWP instruction */ |
5989 | 5998 |
rm = (insn) & 0xf; |
... | ... | |
6287 | 6296 |
} |
6288 | 6297 |
if (i != 32) { |
6289 | 6298 |
tmp2 = load_reg(s, rd); |
6290 |
gen_bfi(tmp, tmp2, tmp, |
|
6291 |
shift, ((1u << i) - 1) << shift); |
|
6299 |
gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1); |
|
6292 | 6300 |
dead_tmp(tmp2); |
6293 | 6301 |
} |
6294 | 6302 |
store_reg(s, rd, tmp); |
... | ... | |
6720 | 6728 |
} |
6721 | 6729 |
} else if ((insn & (1 << 23)) == 0) { |
6722 | 6730 |
/* Load/store exclusive word. */ |
6723 |
gen_movl_T0_reg(s, rd); |
|
6724 | 6731 |
gen_movl_T1_reg(s, rn); |
6725 | 6732 |
if (insn & (1 << 20)) { |
6726 |
gen_ldst(ldlex, s); |
|
6733 |
gen_helper_mark_exclusive(cpu_env, cpu_T[1]); |
|
6734 |
tmp = gen_ld32(addr, IS_USER(s)); |
|
6735 |
store_reg(s, rd, tmp); |
|
6727 | 6736 |
} else { |
6728 |
gen_ldst(stlex, s); |
|
6737 |
int label = gen_new_label(); |
|
6738 |
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr); |
|
6739 |
tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], |
|
6740 |
tcg_const_i32(0), label); |
|
6741 |
tmp = load_reg(s, rs); |
|
6742 |
gen_st32(tmp, cpu_T[1], IS_USER(s)); |
|
6743 |
gen_set_label(label); |
|
6744 |
gen_movl_reg_T0(s, rd); |
|
6729 | 6745 |
} |
6730 |
gen_movl_reg_T0(s, rd); |
|
6731 | 6746 |
} else if ((insn & (1 << 6)) == 0) { |
6732 | 6747 |
/* Table Branch. */ |
6733 | 6748 |
if (rn == 15) { |
... | ... | |
6753 | 6768 |
store_reg(s, 15, tmp); |
6754 | 6769 |
} else { |
6755 | 6770 |
/* Load/store exclusive byte/halfword/doubleword. */ |
6771 |
/* ??? These are not really atomic. However we know |
|
6772 |
we never have multiple CPUs running in parallel, |
|
6773 |
so it is good enough. */ |
|
6756 | 6774 |
op = (insn >> 4) & 0x3; |
6775 |
/* Must use a global reg for the address because we have |
|
6776 |
a conditional branch in the store instruction. */ |
|
6757 | 6777 |
gen_movl_T1_reg(s, rn); |
6778 |
addr = cpu_T[1]; |
|
6758 | 6779 |
if (insn & (1 << 20)) { |
6780 |
gen_helper_mark_exclusive(cpu_env, addr); |
|
6759 | 6781 |
switch (op) { |
6760 | 6782 |
case 0: |
6761 |
gen_ldst(ldbex, s);
|
|
6783 |
tmp = gen_ld8u(addr, IS_USER(s));
|
|
6762 | 6784 |
break; |
6763 | 6785 |
case 1: |
6764 |
gen_ldst(ldwex, s);
|
|
6786 |
tmp = gen_ld16u(addr, IS_USER(s));
|
|
6765 | 6787 |
break; |
6766 | 6788 |
case 3: |
6767 |
gen_ldst(ldqex, s); |
|
6768 |
gen_movl_reg_T1(s, rd); |
|
6789 |
tmp = gen_ld32(addr, IS_USER(s)); |
|
6790 |
tcg_gen_addi_i32(addr, addr, 4); |
|
6791 |
tmp2 = gen_ld32(addr, IS_USER(s)); |
|
6792 |
store_reg(s, rd, tmp2); |
|
6769 | 6793 |
break; |
6770 | 6794 |
default: |
6771 | 6795 |
goto illegal_op; |
6772 | 6796 |
} |
6773 |
gen_movl_reg_T0(s, rs);
|
|
6797 |
store_reg(s, rs, tmp);
|
|
6774 | 6798 |
} else { |
6775 |
gen_movl_T0_reg(s, rs); |
|
6799 |
int label = gen_new_label(); |
|
6800 |
/* Must use a global that is not killed by the branch. */ |
|
6801 |
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr); |
|
6802 |
tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], tcg_const_i32(0), |
|
6803 |
label); |
|
6804 |
tmp = load_reg(s, rs); |
|
6776 | 6805 |
switch (op) { |
6777 | 6806 |
case 0: |
6778 |
gen_ldst(stbex, s);
|
|
6807 |
gen_st8(tmp, addr, IS_USER(s));
|
|
6779 | 6808 |
break; |
6780 | 6809 |
case 1: |
6781 |
gen_ldst(stwex, s);
|
|
6810 |
gen_st16(tmp, addr, IS_USER(s));
|
|
6782 | 6811 |
break; |
6783 | 6812 |
case 3: |
6784 |
gen_movl_T2_reg(s, rd); |
|
6785 |
gen_ldst(stqex, s); |
|
6813 |
gen_st32(tmp, addr, IS_USER(s)); |
|
6814 |
tcg_gen_addi_i32(addr, addr, 4); |
|
6815 |
tmp = load_reg(s, rd); |
|
6816 |
gen_st32(tmp, addr, IS_USER(s)); |
|
6786 | 6817 |
break; |
6787 | 6818 |
default: |
6788 | 6819 |
goto illegal_op; |
6789 | 6820 |
} |
6821 |
gen_set_label(label); |
|
6790 | 6822 |
gen_movl_reg_T0(s, rm); |
6791 | 6823 |
} |
6792 | 6824 |
} |
... | ... | |
7271 | 7303 |
op = (insn >> 4) & 0xf; |
7272 | 7304 |
switch (op) { |
7273 | 7305 |
case 2: /* clrex */ |
7274 |
gen_op_clrex();
|
|
7306 |
gen_helper_clrex(cpu_env);
|
|
7275 | 7307 |
break; |
7276 | 7308 |
case 4: /* dsb */ |
7277 | 7309 |
case 5: /* dmb */ |
... | ... | |
7369 | 7401 |
imm = imm + 1 - shift; |
7370 | 7402 |
if (imm != 32) { |
7371 | 7403 |
tmp2 = load_reg(s, rd); |
7372 |
gen_bfi(tmp, tmp2, tmp, |
|
7373 |
shift, ((1u << imm) - 1) << shift); |
|
7404 |
gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1); |
|
7374 | 7405 |
dead_tmp(tmp2); |
7375 | 7406 |
} |
7376 | 7407 |
break; |
Also available in: Unified diff