Revision 203342d8

b/tcg/sparc/tcg-target.c
124 124
#define INSN_RS2(x) (x)
125 125
#define INSN_ASI(x) ((x) << 5)
126 126

  
127
#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
127 128
#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
128 129
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
129 130
#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
......
185 186
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
186 187
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
187 188
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
189
#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
188 190

  
189 191
#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
190 192
#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
......
605 607
    tcg_out_nop(s);
606 608
}
607 609

  
610
static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
611
                         TCGArg v1, int v1const)
612
{
613
    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
614
              | (tcg_cond_to_rcond[cond] << 10)
615
              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
616
}
617

  
608 618
static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
609 619
                                TCGArg c1, TCGArg c2, int c2const,
610 620
                                TCGArg v1, int v1const)
611 621
{
612
    tcg_out_cmp(s, c1, c2, c2const);
613
    tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
622
    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
623
       Note that the immediate range is one bit smaller, so we must check
624
       for that as well.  */
625
    if (c2 == 0 && !is_unsigned_cond(cond)
626
        && (!v1const || check_fit_tl(v1, 10))) {
627
        tcg_out_movr(s, cond, ret, c1, v1, v1const);
628
    } else {
629
        tcg_out_cmp(s, c1, c2, c2const);
630
        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
631
    }
614 632
}
615 633
#else
616 634
static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
......
706 724
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
707 725
                                TCGArg c1, TCGArg c2, int c2const)
708 726
{
709
    tcg_out_cmp(s, c1, c2, c2const);
710
    tcg_out_movi_imm13(s, ret, 0);
711
    tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
727
    /* For 64-bit signed comparisons vs zero, we can avoid the compare
728
       if the input does not overlap the output.  */
729
    if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
730
        tcg_out_movi_imm13(s, ret, 0);
731
        tcg_out_movr(s, cond, ret, c1, 1, 1);
732
    } else {
733
        tcg_out_cmp(s, c1, c2, c2const);
734
        tcg_out_movi_imm13(s, ret, 0);
735
        tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
736
    }
712 737
}
713 738
#else
714 739
static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,

Also available in: Unified diff