Revision a7812ae4 target-arm/translate.c

b/target-arm/translate.c
31 31
#include "tcg-op.h"
32 32
#include "qemu-log.h"
33 33

  
34
#include "helpers.h"
34 35
#define GEN_HELPER 1
35 36
#include "helpers.h"
36 37

  
......
73 74
#define DISAS_WFI 4
74 75
#define DISAS_SWI 5
75 76

  
76
static TCGv cpu_env;
77
static TCGv_ptr cpu_env;
77 78
/* We reuse the same 64-bit temporaries for efficiency.  */
78
static TCGv cpu_V0, cpu_V1, cpu_M0;
79
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
79 80

  
80 81
/* FIXME:  These should be removed.  */
81 82
static TCGv cpu_T[2];
82
static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
83
static TCGv cpu_F0s, cpu_F1s;
84
static TCGv_i64 cpu_F0d, cpu_F1d;
83 85

  
84 86
#define ICOUNT_TEMP cpu_T[0]
85 87
#include "gen-icount.h"
......
87 89
/* initialize TCG globals.  */
88 90
void arm_translate_init(void)
89 91
{
90
    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
92
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93

  
94
    cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
95
    cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
91 96

  
92
    cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
93
    cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
97
#define GEN_HELPER 2
98
#include "helpers.h"
94 99
}
95 100

  
96 101
/* The code generator doesn't like lots of temporaries, so maintain our own
......
100 105
static TCGv temps[MAX_TEMPS];
101 106

  
102 107
/* Allocate a temporary variable.  */
103
static TCGv new_tmp(void)
108
static TCGv_i32 new_tmp(void)
104 109
{
105 110
    TCGv tmp;
106 111
    if (num_temps == MAX_TEMPS)
107 112
        abort();
108 113

  
109
    if (GET_TCGV(temps[num_temps]))
114
    if (GET_TCGV_I32(temps[num_temps]))
110 115
      return temps[num_temps++];
111 116

  
112
    tmp = tcg_temp_new(TCG_TYPE_I32);
117
    tmp = tcg_temp_new_i32();
113 118
    temps[num_temps++] = tmp;
114 119
    return tmp;
115 120
}
......
120 125
    int i;
121 126
    num_temps--;
122 127
    i = num_temps;
123
    if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
128
    if (TCGV_EQUAL(temps[i], tmp))
124 129
        return;
125 130

  
126 131
    /* Shuffle this temp to the last slot.  */
127
    while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
132
    while (!TCGV_EQUAL(temps[i], tmp))
128 133
        i--;
129 134
    while (i < num_temps) {
130 135
        temps[i] = temps[i + 1];
......
324 329
/* FIXME: Most targets have native widening multiplication.
325 330
   It would be good to use that instead of a full wide multiply.  */
326 331
/* 32x32->64 multiply.  Marks inputs as dead.  */
327
static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
332
static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
328 333
{
329
    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
330
    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
334
    TCGv_i64 tmp1 = tcg_temp_new_i64();
335
    TCGv_i64 tmp2 = tcg_temp_new_i64();
331 336

  
332 337
    tcg_gen_extu_i32_i64(tmp1, a);
333 338
    dead_tmp(a);
......
337 342
    return tmp1;
338 343
}
339 344

  
340
static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
345
static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 346
{
342
    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
343
    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
347
    TCGv_i64 tmp1 = tcg_temp_new_i64();
348
    TCGv_i64 tmp2 = tcg_temp_new_i64();
344 349

  
345 350
    tcg_gen_ext_i32_i64(tmp1, a);
346 351
    dead_tmp(a);
......
353 358
/* Unsigned 32x32->64 multiply.  */
354 359
static void gen_op_mull_T0_T1(void)
355 360
{
356
    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
357
    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
361
    TCGv_i64 tmp1 = tcg_temp_new_i64();
362
    TCGv_i64 tmp2 = tcg_temp_new_i64();
358 363

  
359 364
    tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 365
    tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
......
367 372
/* Signed 32x32->64 multiply.  */
368 373
static void gen_imull(TCGv a, TCGv b)
369 374
{
370
    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
371
    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
375
    TCGv_i64 tmp1 = tcg_temp_new_i64();
376
    TCGv_i64 tmp2 = tcg_temp_new_i64();
372 377

  
373 378
    tcg_gen_ext_i32_i64(tmp1, a);
374 379
    tcg_gen_ext_i32_i64(tmp2, b);
......
580 585
    }
581 586
static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
582 587
{
583
    TCGv tmp;
588
    TCGv_ptr tmp;
584 589

  
585 590
    switch (op1) {
586 591
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 592
    case 1:
588
        tmp = tcg_temp_new(TCG_TYPE_PTR);
593
        tmp = tcg_temp_new_ptr();
589 594
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
590 595
        PAS_OP(s)
591 596
        break;
592 597
    case 5:
593
        tmp = tcg_temp_new(TCG_TYPE_PTR);
598
        tmp = tcg_temp_new_ptr();
594 599
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
595 600
        PAS_OP(u)
596 601
        break;
......
625 630
    }
626 631
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
627 632
{
628
    TCGv tmp;
633
    TCGv_ptr tmp;
629 634

  
630 635
    switch (op1) {
631 636
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 637
    case 0:
633
        tmp = tcg_temp_new(TCG_TYPE_PTR);
638
        tmp = tcg_temp_new_ptr();
634 639
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
635 640
        PAS_OP(s)
636 641
        break;
637 642
    case 4:
638
        tmp = tcg_temp_new(TCG_TYPE_PTR);
643
        tmp = tcg_temp_new_ptr();
639 644
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
640 645
        PAS_OP(u)
641 646
        break;
......
1181 1186
    dead_tmp(var);
1182 1187
}
1183 1188

  
1184
static inline void neon_load_reg64(TCGv var, int reg)
1189
static inline void neon_load_reg64(TCGv_i64 var, int reg)
1185 1190
{
1186 1191
    tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1187 1192
}
1188 1193

  
1189
static inline void neon_store_reg64(TCGv var, int reg)
1194
static inline void neon_store_reg64(TCGv_i64 var, int reg)
1190 1195
{
1191 1196
    tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1192 1197
}
......
1222 1227

  
1223 1228
#define ARM_CP_RW_BIT	(1 << 20)
1224 1229

  
1225
static inline void iwmmxt_load_reg(TCGv var, int reg)
1230
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1226 1231
{
1227 1232
    tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1228 1233
}
1229 1234

  
1230
static inline void iwmmxt_store_reg(TCGv var, int reg)
1235
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1231 1236
{
1232 1237
    tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1233 1238
}
......
3907 3912
    tcg_gen_or_i32(dest, t, f);
3908 3913
}
3909 3914

  
3910
static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3915
static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
3911 3916
{
3912 3917
    switch (size) {
3913 3918
    case 0: gen_helper_neon_narrow_u8(dest, src); break;
......
3917 3922
    }
3918 3923
}
3919 3924

  
3920
static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3925
static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
3921 3926
{
3922 3927
    switch (size) {
3923 3928
    case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
......
3927 3932
    }
3928 3933
}
3929 3934

  
3930
static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3935
static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
3931 3936
{
3932 3937
    switch (size) {
3933 3938
    case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
......
3971 3976
    }
3972 3977
}
3973 3978

  
3974
static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3979
static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
3975 3980
{
3976 3981
    if (u) {
3977 3982
        switch (size) {
......
4011 4016
    }
4012 4017
}
4013 4018

  
4014
static inline void gen_neon_negl(TCGv var, int size)
4019
static inline void gen_neon_negl(TCGv_i64 var, int size)
4015 4020
{
4016 4021
    switch (size) {
4017 4022
    case 0: gen_helper_neon_negl_u16(var, var); break;
......
4021 4026
    }
4022 4027
}
4023 4028

  
4024
static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4029
static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4025 4030
{
4026 4031
    switch (size) {
4027 4032
    case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
......
4030 4035
    }
4031 4036
}
4032 4037

  
4033
static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4038
static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4034 4039
{
4035
    TCGv tmp;
4040
    TCGv_i64 tmp;
4036 4041

  
4037 4042
    switch ((size << 1) | u) {
4038 4043
    case 0: gen_helper_neon_mull_s8(dest, a, b); break;
......
4076 4081
    TCGv tmp;
4077 4082
    TCGv tmp2;
4078 4083
    TCGv tmp3;
4084
    TCGv_i64 tmp64;
4079 4085

  
4080 4086
    if (!vfp_enabled(env))
4081 4087
      return 1;
......
4632 4638
                    imm = (uint16_t)shift;
4633 4639
                    imm |= imm << 16;
4634 4640
                    tmp2 = tcg_const_i32(imm);
4641
                    TCGV_UNUSED_I64(tmp64);
4635 4642
                    break;
4636 4643
                case 2:
4637 4644
                    imm = (uint32_t)shift;
4638 4645
                    tmp2 = tcg_const_i32(imm);
4646
                    TCGV_UNUSED_I64(tmp64);
4639 4647
                case 3:
4640
                    tmp2 = tcg_const_i64(shift);
4648
                    tmp64 = tcg_const_i64(shift);
4649
                    TCGV_UNUSED(tmp2);
4641 4650
                    break;
4642 4651
                default:
4643 4652
                    abort();
......
4648 4657
                        neon_load_reg64(cpu_V0, rm + pass);
4649 4658
                        if (q) {
4650 4659
                          if (u)
4651
                            gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4660
                            gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4652 4661
                          else
4653
                            gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4662
                            gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4654 4663
                        } else {
4655 4664
                          if (u)
4656
                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4665
                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4657 4666
                          else
4658
                            gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4667
                            gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4659 4668
                        }
4660 4669
                    } else {
4661 4670
                        tmp = neon_load_reg(rm + pass, 0);
......
5130 5139
                        neon_load_reg64(cpu_V1, rm);
5131 5140
                    }
5132 5141
                } else if (q) {
5133
                    tmp = tcg_temp_new(TCG_TYPE_I64);
5142
                    tmp64 = tcg_temp_new_i64();
5134 5143
                    if (imm < 8) {
5135 5144
                        neon_load_reg64(cpu_V0, rn);
5136
                        neon_load_reg64(tmp, rn + 1);
5145
                        neon_load_reg64(tmp64, rn + 1);
5137 5146
                    } else {
5138 5147
                        neon_load_reg64(cpu_V0, rn + 1);
5139
                        neon_load_reg64(tmp, rm);
5148
                        neon_load_reg64(tmp64, rm);
5140 5149
                    }
5141 5150
                    tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5142
                    tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5151
                    tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5143 5152
                    tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5144 5153
                    if (imm < 8) {
5145 5154
                        neon_load_reg64(cpu_V1, rm);
......
5148 5157
                        imm -= 8;
5149 5158
                    }
5150 5159
                    tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5151
                    tcg_gen_shri_i64(tmp, tmp, imm * 8);
5152
                    tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5160
                    tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5161
                    tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5153 5162
                } else {
5163
                    /* BUGFIX */
5154 5164
                    neon_load_reg64(cpu_V0, rn);
5155
                    tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5165
                    tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5156 5166
                    neon_load_reg64(cpu_V1, rm);
5157
                    tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5167
                    tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5158 5168
                    tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5159 5169
                }
5160 5170
                neon_store_reg64(cpu_V0, rd);
......
5578 5588

  
5579 5589

  
5580 5590
/* Store a 64-bit value to a register pair.  Clobbers val.  */
5581
static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5591
static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5582 5592
{
5583 5593
    TCGv tmp;
5584 5594
    tmp = new_tmp();
......
5591 5601
}
5592 5602

  
5593 5603
/* load a 32-bit value from a register and perform a 64-bit accumulate.  */
5594
static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5604
static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5595 5605
{
5596
    TCGv tmp;
5606
    TCGv_i64 tmp;
5597 5607
    TCGv tmp2;
5598 5608

  
5599 5609
    /* Load value and extend to 64 bits.  */
5600
    tmp = tcg_temp_new(TCG_TYPE_I64);
5610
    tmp = tcg_temp_new_i64();
5601 5611
    tmp2 = load_reg(s, rlow);
5602 5612
    tcg_gen_extu_i32_i64(tmp, tmp2);
5603 5613
    dead_tmp(tmp2);
......
5605 5615
}
5606 5616

  
5607 5617
/* load and add a 64-bit value from a register pair.  */
5608
static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5618
static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5609 5619
{
5610
    TCGv tmp;
5620
    TCGv_i64 tmp;
5611 5621
    TCGv tmpl;
5612 5622
    TCGv tmph;
5613 5623

  
5614 5624
    /* Load 64-bit value rd:rn.  */
5615 5625
    tmpl = load_reg(s, rlow);
5616 5626
    tmph = load_reg(s, rhigh);
5617
    tmp = tcg_temp_new(TCG_TYPE_I64);
5627
    tmp = tcg_temp_new_i64();
5618 5628
    tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5619 5629
    dead_tmp(tmpl);
5620 5630
    dead_tmp(tmph);
......
5622 5632
}
5623 5633

  
5624 5634
/* Set N and Z flags from a 64-bit value.  */
5625
static void gen_logicq_cc(TCGv val)
5635
static void gen_logicq_cc(TCGv_i64 val)
5626 5636
{
5627 5637
    TCGv tmp = new_tmp();
5628 5638
    gen_helper_logicq_cc(tmp, val);
......
5637 5647
    TCGv tmp2;
5638 5648
    TCGv tmp3;
5639 5649
    TCGv addr;
5650
    TCGv_i64 tmp64;
5640 5651

  
5641 5652
    insn = ldl_code(s->pc);
5642 5653
    s->pc += 4;
......
5971 5982
                    tcg_gen_sari_i32(tmp2, tmp2, 16);
5972 5983
                else
5973 5984
                    gen_sxth(tmp2);
5974
                tmp2 = gen_muls_i64_i32(tmp, tmp2);
5975
                tcg_gen_shri_i64(tmp2, tmp2, 16);
5985
                tmp64 = gen_muls_i64_i32(tmp, tmp2);
5986
                tcg_gen_shri_i64(tmp64, tmp64, 16);
5976 5987
                tmp = new_tmp();
5977
                tcg_gen_trunc_i64_i32(tmp, tmp2);
5988
                tcg_gen_trunc_i64_i32(tmp, tmp64);
5978 5989
                if ((sh & 2) == 0) {
5979 5990
                    tmp2 = load_reg(s, rn);
5980 5991
                    gen_helper_add_setq(tmp, tmp, tmp2);
......
5988 5999
                gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5989 6000
                dead_tmp(tmp2);
5990 6001
                if (op1 == 2) {
5991
                    tmp2 = tcg_temp_new(TCG_TYPE_I64);
5992
                    tcg_gen_ext_i32_i64(tmp2, tmp);
6002
                    tmp64 = tcg_temp_new_i64();
6003
                    tcg_gen_ext_i32_i64(tmp64, tmp);
5993 6004
                    dead_tmp(tmp);
5994
                    gen_addq(s, tmp2, rn, rd);
5995
                    gen_storeq_reg(s, rn, rd, tmp2);
6005
                    gen_addq(s, tmp64, rn, rd);
6006
                    gen_storeq_reg(s, rn, rd, tmp64);
5996 6007
                } else {
5997 6008
                    if (op1 == 0) {
5998 6009
                        tmp2 = load_reg(s, rn);
......
6205 6216
                        tmp = load_reg(s, rs);
6206 6217
                        tmp2 = load_reg(s, rm);
6207 6218
                        if (insn & (1 << 22))
6208
                            tmp = gen_muls_i64_i32(tmp, tmp2);
6219
                            tmp64 = gen_muls_i64_i32(tmp, tmp2);
6209 6220
                        else
6210
                            tmp = gen_mulu_i64_i32(tmp, tmp2);
6221
                            tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6211 6222
                        if (insn & (1 << 21)) /* mult accumulate */
6212
                            gen_addq(s, tmp, rn, rd);
6223
                            gen_addq(s, tmp64, rn, rd);
6213 6224
                        if (!(insn & (1 << 23))) { /* double accumulate */
6214 6225
                            ARCH(6);
6215
                            gen_addq_lo(s, tmp, rn);
6216
                            gen_addq_lo(s, tmp, rd);
6226
                            gen_addq_lo(s, tmp64, rn);
6227
                            gen_addq_lo(s, tmp64, rd);
6217 6228
                        }
6218 6229
                        if (insn & (1 << 20))
6219
                            gen_logicq_cc(tmp);
6220
                        gen_storeq_reg(s, rn, rd, tmp);
6230
                            gen_logicq_cc(tmp64);
6231
                        gen_storeq_reg(s, rn, rd, tmp64);
6221 6232
                        break;
6222 6233
                    }
6223 6234
                } else {
......
6515 6526
                    tmp2 = load_reg(s, rs);
6516 6527
                    if (insn & (1 << 20)) {
6517 6528
                        /* Signed multiply most significant [accumulate].  */
6518
                        tmp2 = gen_muls_i64_i32(tmp, tmp2);
6529
                        tmp64 = gen_muls_i64_i32(tmp, tmp2);
6519 6530
                        if (insn & (1 << 5))
6520
                            tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6521
                        tcg_gen_shri_i64(tmp2, tmp2, 32);
6531
                            tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6532
                        tcg_gen_shri_i64(tmp64, tmp64, 32);
6522 6533
                        tmp = new_tmp();
6523
                        tcg_gen_trunc_i64_i32(tmp, tmp2);
6534
                        tcg_gen_trunc_i64_i32(tmp, tmp64);
6524 6535
                        if (rn != 15) {
6525 6536
                            tmp2 = load_reg(s, rn);
6526 6537
                            if (insn & (1 << 6)) {
......
6544 6555
                        dead_tmp(tmp2);
6545 6556
                        if (insn & (1 << 22)) {
6546 6557
                            /* smlald, smlsld */
6547
                            tmp2 = tcg_temp_new(TCG_TYPE_I64);
6548
                            tcg_gen_ext_i32_i64(tmp2, tmp);
6558
                            tmp64 = tcg_temp_new_i64();
6559
                            tcg_gen_ext_i32_i64(tmp64, tmp);
6549 6560
                            dead_tmp(tmp);
6550
                            gen_addq(s, tmp2, rd, rn);
6551
                            gen_storeq_reg(s, rd, rn, tmp2);
6561
                            gen_addq(s, tmp64, rd, rn);
6562
                            gen_storeq_reg(s, rd, rn, tmp64);
6552 6563
                        } else {
6553 6564
                            /* smuad, smusd, smlad, smlsd */
6554 6565
                            if (rd != 15)
......
6917 6928
    TCGv tmp2;
6918 6929
    TCGv tmp3;
6919 6930
    TCGv addr;
6931
    TCGv_i64 tmp64;
6920 6932
    int op;
6921 6933
    int shiftop;
6922 6934
    int conds;
......
7393 7405
                    tcg_gen_sari_i32(tmp2, tmp2, 16);
7394 7406
                else
7395 7407
                    gen_sxth(tmp2);
7396
                tmp2 = gen_muls_i64_i32(tmp, tmp2);
7397
                tcg_gen_shri_i64(tmp2, tmp2, 16);
7408
                tmp64 = gen_muls_i64_i32(tmp, tmp2);
7409
                tcg_gen_shri_i64(tmp64, tmp64, 16);
7398 7410
                tmp = new_tmp();
7399
                tcg_gen_trunc_i64_i32(tmp, tmp2);
7411
                tcg_gen_trunc_i64_i32(tmp, tmp64);
7400 7412
                if (rs != 15)
7401 7413
                  {
7402 7414
                    tmp2 = load_reg(s, rs);
......
7460 7472
                    tcg_gen_add_i32(tmp, tmp, tmp2);
7461 7473
                }
7462 7474
                dead_tmp(tmp2);
7463
                tmp2 = tcg_temp_new(TCG_TYPE_I64);
7464
                gen_addq(s, tmp, rs, rd);
7465
                gen_storeq_reg(s, rs, rd, tmp);
7475
                /* BUGFIX */
7476
                tmp64 = tcg_temp_new_i64();
7477
                tcg_gen_ext_i32_i64(tmp64, tmp);
7478
                dead_tmp(tmp);
7479
                gen_addq(s, tmp64, rs, rd);
7480
                gen_storeq_reg(s, rs, rd, tmp64);
7466 7481
            } else {
7467 7482
                if (op & 0x20) {
7468 7483
                    /* Unsigned 64-bit multiply  */
7469
                    tmp = gen_mulu_i64_i32(tmp, tmp2);
7484
                    tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7470 7485
                } else {
7471 7486
                    if (op & 8) {
7472 7487
                        /* smlalxy */
7473 7488
                        gen_mulxy(tmp, tmp2, op & 2, op & 1);
7474 7489
                        dead_tmp(tmp2);
7475
                        tmp2 = tcg_temp_new(TCG_TYPE_I64);
7476
                        tcg_gen_ext_i32_i64(tmp2, tmp);
7490
                        tmp64 = tcg_temp_new_i64();
7491
                        tcg_gen_ext_i32_i64(tmp64, tmp);
7477 7492
                        dead_tmp(tmp);
7478
                        tmp = tmp2;
7479 7493
                    } else {
7480 7494
                        /* Signed 64-bit multiply  */
7481
                        tmp = gen_muls_i64_i32(tmp, tmp2);
7495
                        tmp64 = gen_muls_i64_i32(tmp, tmp2);
7482 7496
                    }
7483 7497
                }
7484 7498
                if (op & 4) {
7485 7499
                    /* umaal */
7486
                    gen_addq_lo(s, tmp, rs);
7487
                    gen_addq_lo(s, tmp, rd);
7500
                    gen_addq_lo(s, tmp64, rs);
7501
                    gen_addq_lo(s, tmp64, rd);
7488 7502
                } else if (op & 0x40) {
7489 7503
                    /* 64-bit accumulate.  */
7490
                    gen_addq(s, tmp, rs, rd);
7504
                    gen_addq(s, tmp64, rs, rd);
7491 7505
                }
7492
                gen_storeq_reg(s, rs, rd, tmp);
7506
                gen_storeq_reg(s, rs, rd, tmp64);
7493 7507
            }
7494 7508
            break;
7495 7509
        }
......
8618 8632
        dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8619 8633
    }
8620 8634
#endif
8621
    cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8622
    cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8623
    cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8624
    cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
8635
    cpu_F0s = tcg_temp_new_i32();
8636
    cpu_F1s = tcg_temp_new_i32();
8637
    cpu_F0d = tcg_temp_new_i64();
8638
    cpu_F1d = tcg_temp_new_i64();
8625 8639
    cpu_V0 = cpu_F0d;
8626 8640
    cpu_V1 = cpu_F1d;
8627 8641
    /* FIXME: cpu_M0 can probably be the same as cpu_V0.  */
8628
    cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
8642
    cpu_M0 = tcg_temp_new_i64();
8629 8643
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8630 8644
    lj = -1;
8631 8645
    num_insns = 0;

Also available in: Unified diff