Revision 0df5bdbe

b/target-ppc/op.c
1153 1153
    RETURN();
1154 1154
}
1155 1155

  
1156
/* andi. */
1157
void OPPROTO op_andi_T0 (void)
1158
{
1159
    T0 &= (uint32_t)PARAM1;
1160
    RETURN();
1161
}
1162

  
1163
void OPPROTO op_andi_T1 (void)
1164
{
1165
    T1 &= (uint32_t)PARAM1;
1166
    RETURN();
1167
}
1168

  
1169
#if defined(TARGET_PPC64)
1170
void OPPROTO op_andi_T0_64 (void)
1171
{
1172
    T0 &= ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
1173
    RETURN();
1174
}
1175

  
1176
void OPPROTO op_andi_T1_64 (void)
1177
{
1178
    T1 &= ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
1179
    RETURN();
1180
}
1181
#endif
1182

  
1183 1156
/* count leading zero */
1184 1157
void OPPROTO op_cntlzw (void)
1185 1158
{
b/target-ppc/translate.c
1235 1235
GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
1236 1236
{
1237 1237
    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
1238
    gen_op_andi_T0(UIMM(ctx->opcode));
1238
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], UIMM(ctx->opcode));
1239 1239
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
1240 1240
    gen_set_Rc0(ctx);
1241 1241
}
......
1243 1243
GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
1244 1244
{
1245 1245
    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
1246
    gen_op_andi_T0(UIMM(ctx->opcode) << 16);
1246
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], UIMM(ctx->opcode) << 16);
1247 1247
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
1248 1248
    gen_set_Rc0(ctx);
1249 1249
}
......
1458 1458
    me += 32;
1459 1459
#endif
1460 1460
    mask = MASK(mb, me);
1461
    gen_op_andi_T0(mask);
1462
    gen_op_andi_T1(~mask);
1461
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], mask);
1462
    tcg_gen_andi_tl(cpu_T[1], cpu_T[1], ~mask);
1463 1463
    gen_op_or();
1464 1464
 do_store:
1465 1465
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
......
1498 1498
    mb += 32;
1499 1499
    me += 32;
1500 1500
#endif
1501
    gen_op_andi_T0(MASK(mb, me));
1501
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
1502 1502
 do_store:
1503 1503
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
1504 1504
    if (unlikely(Rc(ctx->opcode) != 0))
......
1519 1519
        mb += 32;
1520 1520
        me += 32;
1521 1521
#endif
1522
        gen_op_andi_T0(MASK(mb, me));
1522
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
1523 1523
    }
1524 1524
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
1525 1525
    if (unlikely(Rc(ctx->opcode) != 0))
......
1558 1558
    gen_##name(ctx, 1, 1);                                                    \
1559 1559
}
1560 1560

  
1561
static always_inline void gen_andi_T0_64 (DisasContext *ctx, uint64_t mask)
1562
{
1563
    if (mask >> 32)
1564
        gen_op_andi_T0_64(mask >> 32, mask & 0xFFFFFFFF);
1565
    else
1566
        gen_op_andi_T0(mask);
1567
}
1568

  
1569
static always_inline void gen_andi_T1_64 (DisasContext *ctx, uint64_t mask)
1570
{
1571
    if (mask >> 32)
1572
        gen_op_andi_T1_64(mask >> 32, mask & 0xFFFFFFFF);
1573
    else
1574
        gen_op_andi_T1(mask);
1575
}
1576

  
1577 1561
static always_inline void gen_rldinm (DisasContext *ctx, uint32_t mb,
1578 1562
                                      uint32_t me, uint32_t sh)
1579 1563
{
......
1597 1581
    }
1598 1582
    gen_op_rotli64_T0(sh);
1599 1583
 do_mask:
1600
    gen_andi_T0_64(ctx, MASK(mb, me));
1584
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
1601 1585
 do_store:
1602 1586
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
1603 1587
    if (unlikely(Rc(ctx->opcode) != 0))
......
1641 1625
    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
1642 1626
    gen_op_rotl64_T0_T1();
1643 1627
    if (unlikely(mb != 0 || me != 63)) {
1644
        gen_andi_T0_64(ctx, MASK(mb, me));
1628
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
1645 1629
    }
1646 1630
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
1647 1631
    if (unlikely(Rc(ctx->opcode) != 0))
......
1689 1673
    gen_op_rotli64_T0(sh);
1690 1674
 do_mask:
1691 1675
    mask = MASK(mb, me);
1692
    gen_andi_T0_64(ctx, mask);
1693
    gen_andi_T1_64(ctx, ~mask);
1676
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], mask);
1677
    tcg_gen_andi_tl(cpu_T[1], cpu_T[1], ~mask);
1694 1678
    gen_op_or();
1695 1679
 do_store:
1696 1680
    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
......
3107 3091
        gen_op_sli_T1(-sh);                                                   \
3108 3092
    gen_op_##op();                                                            \
3109 3093
    bitmask = 1 << (3 - (crbD(ctx->opcode) & 0x03));                          \
3110
    gen_op_andi_T0(bitmask);                                                  \
3094
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], bitmask);                             \
3111 3095
    tcg_gen_andi_i32(cpu_T[1], cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);    \
3112 3096
    gen_op_or();                                                              \
3113 3097
    tcg_gen_andi_i32(cpu_crf[crbD(ctx->opcode) >> 2], cpu_T[0], 0xf);         \

Also available in: Unified diff