Revision 7d1b0095

b/target-arm/translate.c
128 128
#include "helpers.h"
129 129
}
130 130

  
131
static int num_temps;
132

  
133
/* Allocate a temporary variable.  */
134
static TCGv_i32 new_tmp(void)
135
{
136
    num_temps++;
137
    return tcg_temp_new_i32();
138
}
139

  
140
/* Release a temporary variable.  */
141
static void dead_tmp(TCGv tmp)
142
{
143
    tcg_temp_free(tmp);
144
    num_temps--;
145
}
146

  
147 131
static inline TCGv load_cpu_offset(int offset)
148 132
{
149
    TCGv tmp = new_tmp();
133
    TCGv tmp = tcg_temp_new_i32();
150 134
    tcg_gen_ld_i32(tmp, cpu_env, offset);
151 135
    return tmp;
152 136
}
......
156 140
static inline void store_cpu_offset(TCGv var, int offset)
157 141
{
158 142
    tcg_gen_st_i32(var, cpu_env, offset);
159
    dead_tmp(var);
143
    tcg_temp_free_i32(var);
160 144
}
161 145

  
162 146
#define store_cpu_field(var, name) \
......
181 165
/* Create a new temporary and set it to the value of a CPU register.  */
182 166
static inline TCGv load_reg(DisasContext *s, int reg)
183 167
{
184
    TCGv tmp = new_tmp();
168
    TCGv tmp = tcg_temp_new_i32();
185 169
    load_reg_var(s, tmp, reg);
186 170
    return tmp;
187 171
}
......
195 179
        s->is_jmp = DISAS_JUMP;
196 180
    }
197 181
    tcg_gen_mov_i32(cpu_R[reg], var);
198
    dead_tmp(var);
182
    tcg_temp_free_i32(var);
199 183
}
200 184

  
201 185
/* Value extensions.  */
......
219 203

  
220 204
static void gen_exception(int excp)
221 205
{
222
    TCGv tmp = new_tmp();
206
    TCGv tmp = tcg_temp_new_i32();
223 207
    tcg_gen_movi_i32(tmp, excp);
224 208
    gen_helper_exception(tmp);
225
    dead_tmp(tmp);
209
    tcg_temp_free_i32(tmp);
226 210
}
227 211

  
228 212
static void gen_smul_dual(TCGv a, TCGv b)
229 213
{
230
    TCGv tmp1 = new_tmp();
231
    TCGv tmp2 = new_tmp();
214
    TCGv tmp1 = tcg_temp_new_i32();
215
    TCGv tmp2 = tcg_temp_new_i32();
232 216
    tcg_gen_ext16s_i32(tmp1, a);
233 217
    tcg_gen_ext16s_i32(tmp2, b);
234 218
    tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235
    dead_tmp(tmp2);
219
    tcg_temp_free_i32(tmp2);
236 220
    tcg_gen_sari_i32(a, a, 16);
237 221
    tcg_gen_sari_i32(b, b, 16);
238 222
    tcg_gen_mul_i32(b, b, a);
239 223
    tcg_gen_mov_i32(a, tmp1);
240
    dead_tmp(tmp1);
224
    tcg_temp_free_i32(tmp1);
241 225
}
242 226

  
243 227
/* Byteswap each halfword.  */
244 228
static void gen_rev16(TCGv var)
245 229
{
246
    TCGv tmp = new_tmp();
230
    TCGv tmp = tcg_temp_new_i32();
247 231
    tcg_gen_shri_i32(tmp, var, 8);
248 232
    tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 233
    tcg_gen_shli_i32(var, var, 8);
250 234
    tcg_gen_andi_i32(var, var, 0xff00ff00);
251 235
    tcg_gen_or_i32(var, var, tmp);
252
    dead_tmp(tmp);
236
    tcg_temp_free_i32(tmp);
253 237
}
254 238

  
255 239
/* Byteswap low halfword and sign extend.  */
......
298 282
    TCGv_i64 tmp64 = tcg_temp_new_i64();
299 283

  
300 284
    tcg_gen_extu_i32_i64(tmp64, b);
301
    dead_tmp(b);
285
    tcg_temp_free_i32(b);
302 286
    tcg_gen_shli_i64(tmp64, tmp64, 32);
303 287
    tcg_gen_add_i64(a, tmp64, a);
304 288

  
......
312 296
    TCGv_i64 tmp64 = tcg_temp_new_i64();
313 297

  
314 298
    tcg_gen_extu_i32_i64(tmp64, b);
315
    dead_tmp(b);
299
    tcg_temp_free_i32(b);
316 300
    tcg_gen_shli_i64(tmp64, tmp64, 32);
317 301
    tcg_gen_sub_i64(a, tmp64, a);
318 302

  
......
329 313
    TCGv_i64 tmp2 = tcg_temp_new_i64();
330 314

  
331 315
    tcg_gen_extu_i32_i64(tmp1, a);
332
    dead_tmp(a);
316
    tcg_temp_free_i32(a);
333 317
    tcg_gen_extu_i32_i64(tmp2, b);
334
    dead_tmp(b);
318
    tcg_temp_free_i32(b);
335 319
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 320
    tcg_temp_free_i64(tmp2);
337 321
    return tmp1;
......
343 327
    TCGv_i64 tmp2 = tcg_temp_new_i64();
344 328

  
345 329
    tcg_gen_ext_i32_i64(tmp1, a);
346
    dead_tmp(a);
330
    tcg_temp_free_i32(a);
347 331
    tcg_gen_ext_i32_i64(tmp2, b);
348
    dead_tmp(b);
332
    tcg_temp_free_i32(b);
349 333
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 334
    tcg_temp_free_i64(tmp2);
351 335
    return tmp1;
......
354 338
/* Swap low and high halfwords.  */
355 339
static void gen_swap_half(TCGv var)
356 340
{
357
    TCGv tmp = new_tmp();
341
    TCGv tmp = tcg_temp_new_i32();
358 342
    tcg_gen_shri_i32(tmp, var, 16);
359 343
    tcg_gen_shli_i32(var, var, 16);
360 344
    tcg_gen_or_i32(var, var, tmp);
361
    dead_tmp(tmp);
345
    tcg_temp_free_i32(tmp);
362 346
}
363 347

  
364 348
/* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
......
370 354

  
371 355
static void gen_add16(TCGv t0, TCGv t1)
372 356
{
373
    TCGv tmp = new_tmp();
357
    TCGv tmp = tcg_temp_new_i32();
374 358
    tcg_gen_xor_i32(tmp, t0, t1);
375 359
    tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 360
    tcg_gen_andi_i32(t0, t0, ~0x8000);
377 361
    tcg_gen_andi_i32(t1, t1, ~0x8000);
378 362
    tcg_gen_add_i32(t0, t0, t1);
379 363
    tcg_gen_xor_i32(t0, t0, tmp);
380
    dead_tmp(tmp);
381
    dead_tmp(t1);
364
    tcg_temp_free_i32(tmp);
365
    tcg_temp_free_i32(t1);
382 366
}
383 367

  
384 368
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
......
386 370
/* Set CF to the top bit of var.  */
387 371
static void gen_set_CF_bit31(TCGv var)
388 372
{
389
    TCGv tmp = new_tmp();
373
    TCGv tmp = tcg_temp_new_i32();
390 374
    tcg_gen_shri_i32(tmp, var, 31);
391 375
    gen_set_CF(tmp);
392
    dead_tmp(tmp);
376
    tcg_temp_free_i32(tmp);
393 377
}
394 378

  
395 379
/* Set N and Z flags from var.  */
......
406 390
    tcg_gen_add_i32(t0, t0, t1);
407 391
    tmp = load_cpu_field(CF);
408 392
    tcg_gen_add_i32(t0, t0, tmp);
409
    dead_tmp(tmp);
393
    tcg_temp_free_i32(tmp);
410 394
}
411 395

  
412 396
/* dest = T0 + T1 + CF. */
......
416 400
    tcg_gen_add_i32(dest, t0, t1);
417 401
    tmp = load_cpu_field(CF);
418 402
    tcg_gen_add_i32(dest, dest, tmp);
419
    dead_tmp(tmp);
403
    tcg_temp_free_i32(tmp);
420 404
}
421 405

  
422 406
/* dest = T0 - T1 + CF - 1.  */
......
427 411
    tmp = load_cpu_field(CF);
428 412
    tcg_gen_add_i32(dest, dest, tmp);
429 413
    tcg_gen_subi_i32(dest, dest, 1);
430
    dead_tmp(tmp);
414
    tcg_temp_free_i32(tmp);
431 415
}
432 416

  
433 417
/* FIXME:  Implement this natively.  */
......
435 419

  
436 420
static void shifter_out_im(TCGv var, int shift)
437 421
{
438
    TCGv tmp = new_tmp();
422
    TCGv tmp = tcg_temp_new_i32();
439 423
    if (shift == 0) {
440 424
        tcg_gen_andi_i32(tmp, var, 1);
441 425
    } else {
......
444 428
            tcg_gen_andi_i32(tmp, tmp, 1);
445 429
    }
446 430
    gen_set_CF(tmp);
447
    dead_tmp(tmp);
431
    tcg_temp_free_i32(tmp);
448 432
}
449 433

  
450 434
/* Shift by immediate.  Includes special handling for shift == 0.  */
......
492 476
            tcg_gen_shri_i32(var, var, 1);
493 477
            tcg_gen_shli_i32(tmp, tmp, 31);
494 478
            tcg_gen_or_i32(var, var, tmp);
495
            dead_tmp(tmp);
479
            tcg_temp_free_i32(tmp);
496 480
        }
497 481
    }
498 482
};
......
516 500
                tcg_gen_rotr_i32(var, var, shift); break;
517 501
        }
518 502
    }
519
    dead_tmp(shift);
503
    tcg_temp_free_i32(shift);
520 504
}
521 505

  
522 506
#define PAS_OP(pfx) \
......
655 639
        inv = gen_new_label();
656 640
        tmp = load_cpu_field(CF);
657 641
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658
        dead_tmp(tmp);
642
        tcg_temp_free_i32(tmp);
659 643
        tmp = load_cpu_field(ZF);
660 644
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 645
        gen_set_label(inv);
......
663 647
    case 9: /* ls: !C || Z */
664 648
        tmp = load_cpu_field(CF);
665 649
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666
        dead_tmp(tmp);
650
        tcg_temp_free_i32(tmp);
667 651
        tmp = load_cpu_field(ZF);
668 652
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 653
        break;
......
671 655
        tmp = load_cpu_field(VF);
672 656
        tmp2 = load_cpu_field(NF);
673 657
        tcg_gen_xor_i32(tmp, tmp, tmp2);
674
        dead_tmp(tmp2);
658
        tcg_temp_free_i32(tmp2);
675 659
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 660
        break;
677 661
    case 11: /* lt: N != V -> N ^ V != 0 */
678 662
        tmp = load_cpu_field(VF);
679 663
        tmp2 = load_cpu_field(NF);
680 664
        tcg_gen_xor_i32(tmp, tmp, tmp2);
681
        dead_tmp(tmp2);
665
        tcg_temp_free_i32(tmp2);
682 666
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 667
        break;
684 668
    case 12: /* gt: !Z && N == V */
685 669
        inv = gen_new_label();
686 670
        tmp = load_cpu_field(ZF);
687 671
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688
        dead_tmp(tmp);
672
        tcg_temp_free_i32(tmp);
689 673
        tmp = load_cpu_field(VF);
690 674
        tmp2 = load_cpu_field(NF);
691 675
        tcg_gen_xor_i32(tmp, tmp, tmp2);
692
        dead_tmp(tmp2);
676
        tcg_temp_free_i32(tmp2);
693 677
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 678
        gen_set_label(inv);
695 679
        break;
696 680
    case 13: /* le: Z || N != V */
697 681
        tmp = load_cpu_field(ZF);
698 682
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699
        dead_tmp(tmp);
683
        tcg_temp_free_i32(tmp);
700 684
        tmp = load_cpu_field(VF);
701 685
        tmp2 = load_cpu_field(NF);
702 686
        tcg_gen_xor_i32(tmp, tmp, tmp2);
703
        dead_tmp(tmp2);
687
        tcg_temp_free_i32(tmp2);
704 688
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 689
        break;
706 690
    default:
707 691
        fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 692
        abort();
709 693
    }
710
    dead_tmp(tmp);
694
    tcg_temp_free_i32(tmp);
711 695
}
712 696

  
713 697
static const uint8_t table_logic_cc[16] = {
......
736 720

  
737 721
    s->is_jmp = DISAS_UPDATE;
738 722
    if (s->thumb != (addr & 1)) {
739
        tmp = new_tmp();
723
        tmp = tcg_temp_new_i32();
740 724
        tcg_gen_movi_i32(tmp, addr & 1);
741 725
        tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742
        dead_tmp(tmp);
726
        tcg_temp_free_i32(tmp);
743 727
    }
744 728
    tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 729
}
......
768 752

  
769 753
static inline TCGv gen_ld8s(TCGv addr, int index)
770 754
{
771
    TCGv tmp = new_tmp();
755
    TCGv tmp = tcg_temp_new_i32();
772 756
    tcg_gen_qemu_ld8s(tmp, addr, index);
773 757
    return tmp;
774 758
}
775 759
static inline TCGv gen_ld8u(TCGv addr, int index)
776 760
{
777
    TCGv tmp = new_tmp();
761
    TCGv tmp = tcg_temp_new_i32();
778 762
    tcg_gen_qemu_ld8u(tmp, addr, index);
779 763
    return tmp;
780 764
}
781 765
static inline TCGv gen_ld16s(TCGv addr, int index)
782 766
{
783
    TCGv tmp = new_tmp();
767
    TCGv tmp = tcg_temp_new_i32();
784 768
    tcg_gen_qemu_ld16s(tmp, addr, index);
785 769
    return tmp;
786 770
}
787 771
static inline TCGv gen_ld16u(TCGv addr, int index)
788 772
{
789
    TCGv tmp = new_tmp();
773
    TCGv tmp = tcg_temp_new_i32();
790 774
    tcg_gen_qemu_ld16u(tmp, addr, index);
791 775
    return tmp;
792 776
}
793 777
static inline TCGv gen_ld32(TCGv addr, int index)
794 778
{
795
    TCGv tmp = new_tmp();
779
    TCGv tmp = tcg_temp_new_i32();
796 780
    tcg_gen_qemu_ld32u(tmp, addr, index);
797 781
    return tmp;
798 782
}
......
805 789
static inline void gen_st8(TCGv val, TCGv addr, int index)
806 790
{
807 791
    tcg_gen_qemu_st8(val, addr, index);
808
    dead_tmp(val);
792
    tcg_temp_free_i32(val);
809 793
}
810 794
static inline void gen_st16(TCGv val, TCGv addr, int index)
811 795
{
812 796
    tcg_gen_qemu_st16(val, addr, index);
813
    dead_tmp(val);
797
    tcg_temp_free_i32(val);
814 798
}
815 799
static inline void gen_st32(TCGv val, TCGv addr, int index)
816 800
{
817 801
    tcg_gen_qemu_st32(val, addr, index);
818
    dead_tmp(val);
802
    tcg_temp_free_i32(val);
819 803
}
820 804
static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 805
{
......
859 843
            tcg_gen_sub_i32(var, var, offset);
860 844
        else
861 845
            tcg_gen_add_i32(var, var, offset);
862
        dead_tmp(offset);
846
        tcg_temp_free_i32(offset);
863 847
    }
864 848
}
865 849

  
......
887 871
            tcg_gen_sub_i32(var, var, offset);
888 872
        else
889 873
            tcg_gen_add_i32(var, var, offset);
890
        dead_tmp(offset);
874
        tcg_temp_free_i32(offset);
891 875
    }
892 876
}
893 877

  
......
1065 1049

  
1066 1050
static TCGv neon_load_reg(int reg, int pass)
1067 1051
{
1068
    TCGv tmp = new_tmp();
1052
    TCGv tmp = tcg_temp_new_i32();
1069 1053
    tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 1054
    return tmp;
1071 1055
}
......
1073 1057
static void neon_store_reg(int reg, int pass, TCGv var)
1074 1058
{
1075 1059
    tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076
    dead_tmp(var);
1060
    tcg_temp_free_i32(var);
1077 1061
}
1078 1062

  
1079 1063
static inline void neon_load_reg64(TCGv_i64 var, int reg)
......
1129 1113

  
1130 1114
static inline TCGv iwmmxt_load_creg(int reg)
1131 1115
{
1132
    TCGv var = new_tmp();
1116
    TCGv var = tcg_temp_new_i32();
1133 1117
    tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 1118
    return var;
1135 1119
}
......
1137 1121
static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 1122
{
1139 1123
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140
    dead_tmp(var);
1124
    tcg_temp_free_i32(var);
1141 1125
}
1142 1126

  
1143 1127
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
......
1268 1252

  
1269 1253
static void gen_op_iwmmxt_setpsr_nz(void)
1270 1254
{
1271
    TCGv tmp = new_tmp();
1255
    TCGv tmp = tcg_temp_new_i32();
1272 1256
    gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 1257
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 1258
}
......
1300 1284
        if (insn & (1 << 21))
1301 1285
            store_reg(s, rd, tmp);
1302 1286
        else
1303
            dead_tmp(tmp);
1287
            tcg_temp_free_i32(tmp);
1304 1288
    } else if (insn & (1 << 21)) {
1305 1289
        /* Post indexed */
1306 1290
        tcg_gen_mov_i32(dest, tmp);
......
1326 1310
            tmp = iwmmxt_load_creg(rd);
1327 1311
        }
1328 1312
    } else {
1329
        tmp = new_tmp();
1313
        tmp = tcg_temp_new_i32();
1330 1314
        iwmmxt_load_reg(cpu_V0, rd);
1331 1315
        tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 1316
    }
1333 1317
    tcg_gen_andi_i32(tmp, tmp, mask);
1334 1318
    tcg_gen_mov_i32(dest, tmp);
1335
    dead_tmp(tmp);
1319
    tcg_temp_free_i32(tmp);
1336 1320
    return 0;
1337 1321
}
1338 1322

  
......
1364 1348
        }
1365 1349

  
1366 1350
        wrd = (insn >> 12) & 0xf;
1367
        addr = new_tmp();
1351
        addr = tcg_temp_new_i32();
1368 1352
        if (gen_iwmmxt_address(s, insn, addr)) {
1369
            dead_tmp(addr);
1353
            tcg_temp_free_i32(addr);
1370 1354
            return 1;
1371 1355
        }
1372 1356
        if (insn & ARM_CP_RW_BIT) {
1373 1357
            if ((insn >> 28) == 0xf) {			/* WLDRW wCx */
1374
                tmp = new_tmp();
1358
                tmp = tcg_temp_new_i32();
1375 1359
                tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 1360
                iwmmxt_store_creg(wrd, tmp);
1377 1361
            } else {
......
1392 1376
                }
1393 1377
                if (i) {
1394 1378
                    tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395
                    dead_tmp(tmp);
1379
                    tcg_temp_free_i32(tmp);
1396 1380
                }
1397 1381
                gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 1382
            }
......
1402 1386
                gen_st32(tmp, addr, IS_USER(s));
1403 1387
            } else {
1404 1388
                gen_op_iwmmxt_movq_M0_wRn(wrd);
1405
                tmp = new_tmp();
1389
                tmp = tcg_temp_new_i32();
1406 1390
                if (insn & (1 << 8)) {
1407 1391
                    if (insn & (1 << 22)) {		/* WSTRD */
1408
                        dead_tmp(tmp);
1392
                        tcg_temp_free_i32(tmp);
1409 1393
                        tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 1394
                    } else {				/* WSTRW wRd */
1411 1395
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
......
1422 1406
                }
1423 1407
            }
1424 1408
        }
1425
        dead_tmp(addr);
1409
        tcg_temp_free_i32(addr);
1426 1410
        return 0;
1427 1411
    }
1428 1412

  
......
1457 1441
            tmp = iwmmxt_load_creg(wrd);
1458 1442
            tmp2 = load_reg(s, rd);
1459 1443
            tcg_gen_andc_i32(tmp, tmp, tmp2);
1460
            dead_tmp(tmp2);
1444
            tcg_temp_free_i32(tmp2);
1461 1445
            iwmmxt_store_creg(wrd, tmp);
1462 1446
            break;
1463 1447
        case ARM_IWMMXT_wCGR0:
......
1670 1654
        tcg_gen_andi_i32(tmp, tmp, 7);
1671 1655
        iwmmxt_load_reg(cpu_V1, rd1);
1672 1656
        gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673
        dead_tmp(tmp);
1657
        tcg_temp_free_i32(tmp);
1674 1658
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 1659
        gen_op_iwmmxt_set_mup();
1676 1660
        break;
......
1701 1685
        gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 1686
        tcg_temp_free(tmp3);
1703 1687
        tcg_temp_free(tmp2);
1704
        dead_tmp(tmp);
1688
        tcg_temp_free_i32(tmp);
1705 1689
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 1690
        gen_op_iwmmxt_set_mup();
1707 1691
        break;
......
1711 1695
        if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 1696
            return 1;
1713 1697
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1714
        tmp = new_tmp();
1698
        tmp = tcg_temp_new_i32();
1715 1699
        switch ((insn >> 22) & 3) {
1716 1700
        case 0:
1717 1701
            tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
......
1755 1739
        }
1756 1740
        tcg_gen_shli_i32(tmp, tmp, 28);
1757 1741
        gen_set_nzcv(tmp);
1758
        dead_tmp(tmp);
1742
        tcg_temp_free_i32(tmp);
1759 1743
        break;
1760 1744
    case 0x401: case 0x405: case 0x409: case 0x40d:	/* TBCST */
1761 1745
        if (((insn >> 6) & 3) == 3)
......
1774 1758
            gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 1759
            break;
1776 1760
        }
1777
        dead_tmp(tmp);
1761
        tcg_temp_free_i32(tmp);
1778 1762
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 1763
        gen_op_iwmmxt_set_mup();
1780 1764
        break;
......
1782 1766
        if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 1767
            return 1;
1784 1768
        tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785
        tmp2 = new_tmp();
1769
        tmp2 = tcg_temp_new_i32();
1786 1770
        tcg_gen_mov_i32(tmp2, tmp);
1787 1771
        switch ((insn >> 22) & 3) {
1788 1772
        case 0:
......
1803 1787
            break;
1804 1788
        }
1805 1789
        gen_set_nzcv(tmp);
1806
        dead_tmp(tmp2);
1807
        dead_tmp(tmp);
1790
        tcg_temp_free_i32(tmp2);
1791
        tcg_temp_free_i32(tmp);
1808 1792
        break;
1809 1793
    case 0x01c: case 0x41c: case 0x81c: case 0xc1c:	/* WACC */
1810 1794
        wrd = (insn >> 12) & 0xf;
......
1830 1814
        if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 1815
            return 1;
1832 1816
        tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833
        tmp2 = new_tmp();
1817
        tmp2 = tcg_temp_new_i32();
1834 1818
        tcg_gen_mov_i32(tmp2, tmp);
1835 1819
        switch ((insn >> 22) & 3) {
1836 1820
        case 0:
......
1851 1835
            break;
1852 1836
        }
1853 1837
        gen_set_nzcv(tmp);
1854
        dead_tmp(tmp2);
1855
        dead_tmp(tmp);
1838
        tcg_temp_free_i32(tmp2);
1839
        tcg_temp_free_i32(tmp);
1856 1840
        break;
1857 1841
    case 0x103: case 0x503: case 0x903: case 0xd03:	/* TMOVMSK */
1858 1842
        rd = (insn >> 12) & 0xf;
......
1860 1844
        if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 1845
            return 1;
1862 1846
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1863
        tmp = new_tmp();
1847
        tmp = tcg_temp_new_i32();
1864 1848
        switch ((insn >> 22) & 3) {
1865 1849
        case 0:
1866 1850
            gen_helper_iwmmxt_msbb(tmp, cpu_M0);
......
1975 1959
        wrd = (insn >> 12) & 0xf;
1976 1960
        rd0 = (insn >> 16) & 0xf;
1977 1961
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1978
        tmp = new_tmp();
1962
        tmp = tcg_temp_new_i32();
1979 1963
        if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980
            dead_tmp(tmp);
1964
            tcg_temp_free_i32(tmp);
1981 1965
            return 1;
1982 1966
        }
1983 1967
        switch ((insn >> 22) & 3) {
......
1991 1975
            gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 1976
            break;
1993 1977
        }
1994
        dead_tmp(tmp);
1978
        tcg_temp_free_i32(tmp);
1995 1979
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 1980
        gen_op_iwmmxt_set_mup();
1997 1981
        gen_op_iwmmxt_set_cup();
......
2003 1987
        wrd = (insn >> 12) & 0xf;
2004 1988
        rd0 = (insn >> 16) & 0xf;
2005 1989
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2006
        tmp = new_tmp();
1990
        tmp = tcg_temp_new_i32();
2007 1991
        if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008
            dead_tmp(tmp);
1992
            tcg_temp_free_i32(tmp);
2009 1993
            return 1;
2010 1994
        }
2011 1995
        switch ((insn >> 22) & 3) {
......
2019 2003
            gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 2004
            break;
2021 2005
        }
2022
        dead_tmp(tmp);
2006
        tcg_temp_free_i32(tmp);
2023 2007
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 2008
        gen_op_iwmmxt_set_mup();
2025 2009
        gen_op_iwmmxt_set_cup();
......
2031 2015
        wrd = (insn >> 12) & 0xf;
2032 2016
        rd0 = (insn >> 16) & 0xf;
2033 2017
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2034
        tmp = new_tmp();
2018
        tmp = tcg_temp_new_i32();
2035 2019
        if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036
            dead_tmp(tmp);
2020
            tcg_temp_free_i32(tmp);
2037 2021
            return 1;
2038 2022
        }
2039 2023
        switch ((insn >> 22) & 3) {
......
2047 2031
            gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 2032
            break;
2049 2033
        }
2050
        dead_tmp(tmp);
2034
        tcg_temp_free_i32(tmp);
2051 2035
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 2036
        gen_op_iwmmxt_set_mup();
2053 2037
        gen_op_iwmmxt_set_cup();
......
2059 2043
        wrd = (insn >> 12) & 0xf;
2060 2044
        rd0 = (insn >> 16) & 0xf;
2061 2045
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2062
        tmp = new_tmp();
2046
        tmp = tcg_temp_new_i32();
2063 2047
        switch ((insn >> 22) & 3) {
2064 2048
        case 1:
2065 2049
            if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066
                dead_tmp(tmp);
2050
                tcg_temp_free_i32(tmp);
2067 2051
                return 1;
2068 2052
            }
2069 2053
            gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 2054
            break;
2071 2055
        case 2:
2072 2056
            if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073
                dead_tmp(tmp);
2057
                tcg_temp_free_i32(tmp);
2074 2058
                return 1;
2075 2059
            }
2076 2060
            gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 2061
            break;
2078 2062
        case 3:
2079 2063
            if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080
                dead_tmp(tmp);
2064
                tcg_temp_free_i32(tmp);
2081 2065
                return 1;
2082 2066
            }
2083 2067
            gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 2068
            break;
2085 2069
        }
2086
        dead_tmp(tmp);
2070
        tcg_temp_free_i32(tmp);
2087 2071
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 2072
        gen_op_iwmmxt_set_mup();
2089 2073
        gen_op_iwmmxt_set_cup();
......
2324 2308
            gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 2309
            break;
2326 2310
        default:
2327
            dead_tmp(tmp2);
2328
            dead_tmp(tmp);
2311
            tcg_temp_free_i32(tmp2);
2312
            tcg_temp_free_i32(tmp);
2329 2313
            return 1;
2330 2314
        }
2331
        dead_tmp(tmp2);
2332
        dead_tmp(tmp);
2315
        tcg_temp_free_i32(tmp2);
2316
        tcg_temp_free_i32(tmp);
2333 2317
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 2318
        gen_op_iwmmxt_set_mup();
2335 2319
        break;
......
2378 2362
        default:
2379 2363
            return 1;
2380 2364
        }
2381
        dead_tmp(tmp2);
2382
        dead_tmp(tmp);
2365
        tcg_temp_free_i32(tmp2);
2366
        tcg_temp_free_i32(tmp);
2383 2367

  
2384 2368
        gen_op_iwmmxt_movq_wRn_M0(acc);
2385 2369
        return 0;
......
2425 2409
        if (!env->cp[cp].cp_read)
2426 2410
            return 1;
2427 2411
        gen_set_pc_im(s->pc);
2428
        tmp = new_tmp();
2412
        tmp = tcg_temp_new_i32();
2429 2413
        tmp2 = tcg_const_i32(insn);
2430 2414
        gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 2415
        tcg_temp_free(tmp2);
......
2438 2422
        tmp2 = tcg_const_i32(insn);
2439 2423
        gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 2424
        tcg_temp_free(tmp2);
2441
        dead_tmp(tmp);
2425
        tcg_temp_free_i32(tmp);
2442 2426
    }
2443 2427
    return 0;
2444 2428
}
......
2505 2489
            store_cpu_field(tmp, cp15.c13_tls3);
2506 2490
            break;
2507 2491
        default:
2508
            dead_tmp(tmp);
2492
            tcg_temp_free_i32(tmp);
2509 2493
            return 0;
2510 2494
        }
2511 2495
    }
......
2577 2561

  
2578 2562
    tmp2 = tcg_const_i32(insn);
2579 2563
    if (insn & ARM_CP_RW_BIT) {
2580
        tmp = new_tmp();
2564
        tmp = tcg_temp_new_i32();
2581 2565
        gen_helper_get_cp15(tmp, cpu_env, tmp2);
2582 2566
        /* If the destination register is r15 then sets condition codes.  */
2583 2567
        if (rd != 15)
2584 2568
            store_reg(s, rd, tmp);
2585 2569
        else
2586
            dead_tmp(tmp);
2570
            tcg_temp_free_i32(tmp);
2587 2571
    } else {
2588 2572
        tmp = load_reg(s, rd);
2589 2573
        gen_helper_set_cp15(cpu_env, tmp2, tmp);
2590
        dead_tmp(tmp);
2574
        tcg_temp_free_i32(tmp);
2591 2575
        /* Normally we would always end the TB here, but Linux
2592 2576
         * arch/arm/mach-pxa/sleep.S expects two instructions following
2593 2577
         * an MMU enable to execute from cache.  Imitate this behaviour.  */
......
2622 2606
/* Move between integer and VFP cores.  */
2623 2607
static TCGv gen_vfp_mrs(void)
2624 2608
{
2625
    TCGv tmp = new_tmp();
2609
    TCGv tmp = tcg_temp_new_i32();
2626 2610
    tcg_gen_mov_i32(tmp, cpu_F0s);
2627 2611
    return tmp;
2628 2612
}
......
2630 2614
static void gen_vfp_msr(TCGv tmp)
2631 2615
{
2632 2616
    tcg_gen_mov_i32(cpu_F0s, tmp);
2633
    dead_tmp(tmp);
2617
    tcg_temp_free_i32(tmp);
2634 2618
}
2635 2619

  
2636 2620
static void gen_neon_dup_u8(TCGv var, int shift)
2637 2621
{
2638
    TCGv tmp = new_tmp();
2622
    TCGv tmp = tcg_temp_new_i32();
2639 2623
    if (shift)
2640 2624
        tcg_gen_shri_i32(var, var, shift);
2641 2625
    tcg_gen_ext8u_i32(var, var);
......
2643 2627
    tcg_gen_or_i32(var, var, tmp);
2644 2628
    tcg_gen_shli_i32(tmp, var, 16);
2645 2629
    tcg_gen_or_i32(var, var, tmp);
2646
    dead_tmp(tmp);
2630
    tcg_temp_free_i32(tmp);
2647 2631
}
2648 2632

  
2649 2633
static void gen_neon_dup_low16(TCGv var)
2650 2634
{
2651
    TCGv tmp = new_tmp();
2635
    TCGv tmp = tcg_temp_new_i32();
2652 2636
    tcg_gen_ext16u_i32(var, var);
2653 2637
    tcg_gen_shli_i32(tmp, var, 16);
2654 2638
    tcg_gen_or_i32(var, var, tmp);
2655
    dead_tmp(tmp);
2639
    tcg_temp_free_i32(tmp);
2656 2640
}
2657 2641

  
2658 2642
static void gen_neon_dup_high16(TCGv var)
2659 2643
{
2660
    TCGv tmp = new_tmp();
2644
    TCGv tmp = tcg_temp_new_i32();
2661 2645
    tcg_gen_andi_i32(var, var, 0xffff0000);
2662 2646
    tcg_gen_shri_i32(tmp, var, 16);
2663 2647
    tcg_gen_or_i32(var, var, tmp);
2664
    dead_tmp(tmp);
2648
    tcg_temp_free_i32(tmp);
2665 2649
}
2666 2650

  
2667 2651
/* Disassemble a VFP instruction.  Returns nonzero if an error occured
......
2756 2740
                            gen_neon_dup_low16(tmp);
2757 2741
                        }
2758 2742
                        for (n = 0; n <= pass * 2; n++) {
2759
                            tmp2 = new_tmp();
2743
                            tmp2 = tcg_temp_new_i32();
2760 2744
                            tcg_gen_mov_i32(tmp2, tmp);
2761 2745
                            neon_store_reg(rn, n, tmp2);
2762 2746
                        }
......
2767 2751
                        case 0:
2768 2752
                            tmp2 = neon_load_reg(rn, pass);
2769 2753
                            gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2770
                            dead_tmp(tmp2);
2754
                            tcg_temp_free_i32(tmp2);
2771 2755
                            break;
2772 2756
                        case 1:
2773 2757
                            tmp2 = neon_load_reg(rn, pass);
2774 2758
                            gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2775
                            dead_tmp(tmp2);
2759
                            tcg_temp_free_i32(tmp2);
2776 2760
                            break;
2777 2761
                        case 2:
2778 2762
                            break;
......
2818 2802
                                tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2819 2803
                                tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2820 2804
                            } else {
2821
                                tmp = new_tmp();
2805
                                tmp = tcg_temp_new_i32();
2822 2806
                                gen_helper_vfp_get_fpscr(tmp, cpu_env);
2823 2807
                            }
2824 2808
                            break;
......
2839 2823
                    if (rd == 15) {
2840 2824
                        /* Set the 4 flag bits in the CPSR.  */
2841 2825
                        gen_set_nzcv(tmp);
2842
                        dead_tmp(tmp);
2826
                        tcg_temp_free_i32(tmp);
2843 2827
                    } else {
2844 2828
                        store_reg(s, rd, tmp);
2845 2829
                    }
......
2857 2841
                            break;
2858 2842
                        case ARM_VFP_FPSCR:
2859 2843
                            gen_helper_vfp_set_fpscr(cpu_env, tmp);
2860
                            dead_tmp(tmp);
2844
                            tcg_temp_free_i32(tmp);
2861 2845
                            gen_lookup_tb(s);
2862 2846
                            break;
2863 2847
                        case ARM_VFP_FPEXC:
......
3084 3068
                        tmp = gen_vfp_mrs();
3085 3069
                        tcg_gen_ext16u_i32(tmp, tmp);
3086 3070
                        gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3087
                        dead_tmp(tmp);
3071
                        tcg_temp_free_i32(tmp);
3088 3072
                        break;
3089 3073
                    case 5: /* vcvtt.f32.f16 */
3090 3074
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
......
3092 3076
                        tmp = gen_vfp_mrs();
3093 3077
                        tcg_gen_shri_i32(tmp, tmp, 16);
3094 3078
                        gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3095
                        dead_tmp(tmp);
3079
                        tcg_temp_free_i32(tmp);
3096 3080
                        break;
3097 3081
                    case 6: /* vcvtb.f16.f32 */
3098 3082
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3099 3083
                          return 1;
3100
                        tmp = new_tmp();
3084
                        tmp = tcg_temp_new_i32();
3101 3085
                        gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3102 3086
                        gen_mov_F0_vreg(0, rd);
3103 3087
                        tmp2 = gen_vfp_mrs();
3104 3088
                        tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3105 3089
                        tcg_gen_or_i32(tmp, tmp, tmp2);
3106
                        dead_tmp(tmp2);
3090
                        tcg_temp_free_i32(tmp2);
3107 3091
                        gen_vfp_msr(tmp);
3108 3092
                        break;
3109 3093
                    case 7: /* vcvtt.f16.f32 */
3110 3094
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3111 3095
                          return 1;
3112
                        tmp = new_tmp();
3096
                        tmp = tcg_temp_new_i32();
3113 3097
                        gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3114 3098
                        tcg_gen_shli_i32(tmp, tmp, 16);
3115 3099
                        gen_mov_F0_vreg(0, rd);
3116 3100
                        tmp2 = gen_vfp_mrs();
3117 3101
                        tcg_gen_ext16u_i32(tmp2, tmp2);
3118 3102
                        tcg_gen_or_i32(tmp, tmp, tmp2);
3119
                        dead_tmp(tmp2);
3103
                        tcg_temp_free_i32(tmp2);
3120 3104
                        gen_vfp_msr(tmp);
3121 3105
                        break;
3122 3106
                    case 8: /* cmp */
......
3310 3294
            else
3311 3295
                rd = VFP_SREG_D(insn);
3312 3296
            if (s->thumb && rn == 15) {
3313
                addr = new_tmp();
3297
                addr = tcg_temp_new_i32();
3314 3298
                tcg_gen_movi_i32(addr, s->pc & ~2);
3315 3299
            } else {
3316 3300
                addr = load_reg(s, rn);
......
3328 3312
                    gen_mov_F0_vreg(dp, rd);
3329 3313
                    gen_vfp_st(s, dp, addr);
3330 3314
                }
3331
                dead_tmp(addr);
3315
                tcg_temp_free_i32(addr);
3332 3316
            } else {
3333 3317
                /* load/store multiple */
3334 3318
                if (dp)
......
3368 3352
                        tcg_gen_addi_i32(addr, addr, offset);
3369 3353
                    store_reg(s, rn, addr);
3370 3354
                } else {
3371
                    dead_tmp(addr);
3355
                    tcg_temp_free_i32(addr);
3372 3356
                }
3373 3357
            }
3374 3358
        }
......
3467 3451
    } else {
3468 3452
        gen_set_cpsr(t0, mask);
3469 3453
    }
3470
    dead_tmp(t0);
3454
    tcg_temp_free_i32(t0);
3471 3455
    gen_lookup_tb(s);
3472 3456
    return 0;
3473 3457
}
......
3476 3460
static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3477 3461
{
3478 3462
    TCGv tmp;
3479
    tmp = new_tmp();
3463
    tmp = tcg_temp_new_i32();
3480 3464
    tcg_gen_movi_i32(tmp, val);
3481 3465
    return gen_set_psr(s, mask, spsr, tmp);
3482 3466
}
......
3488 3472
    store_reg(s, 15, pc);
3489 3473
    tmp = load_cpu_field(spsr);
3490 3474
    gen_set_cpsr(tmp, 0xffffffff);
3491
    dead_tmp(tmp);
3475
    tcg_temp_free_i32(tmp);
3492 3476
    s->is_jmp = DISAS_UPDATE;
3493 3477
}
3494 3478

  
......
3496 3480
static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3497 3481
{
3498 3482
    gen_set_cpsr(cpsr, 0xffffffff);
3499
    dead_tmp(cpsr);
3483
    tcg_temp_free_i32(cpsr);
3500 3484
    store_reg(s, 15, pc);
3501 3485
    s->is_jmp = DISAS_UPDATE;
3502 3486
}
......
3506 3490
{
3507 3491
    if (s->condexec_mask) {
3508 3492
        uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3509
        TCGv tmp = new_tmp();
3493
        TCGv tmp = tcg_temp_new_i32();
3510 3494
        tcg_gen_movi_i32(tmp, val);
3511 3495
        store_cpu_field(tmp, condexec_bits);
3512 3496
    }
......
3612 3596

  
3613 3597
static TCGv neon_load_scratch(int scratch)
3614 3598
{
3615
    TCGv tmp = new_tmp();
3599
    TCGv tmp = tcg_temp_new_i32();
3616 3600
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3617 3601
    return tmp;
3618 3602
}
......
3620 3604
static void neon_store_scratch(int scratch, TCGv var)
3621 3605
{
3622 3606
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3623
    dead_tmp(var);
3607
    tcg_temp_free_i32(var);
3624 3608
}
3625 3609

  
3626 3610
static inline TCGv neon_get_scalar(int size, int reg)
......
3721 3705
{
3722 3706
    TCGv rd, tmp;
3723 3707

  
3724
    rd = new_tmp();
3725
    tmp = new_tmp();
3708
    rd = tcg_temp_new_i32();
3709
    tmp = tcg_temp_new_i32();
3726 3710

  
3727 3711
    tcg_gen_shli_i32(rd, t0, 8);
3728 3712
    tcg_gen_andi_i32(rd, rd, 0xff00ff00);
......
3735 3719
    tcg_gen_or_i32(t1, t1, tmp);
3736 3720
    tcg_gen_mov_i32(t0, rd);
3737 3721

  
3738
    dead_tmp(tmp);
3739
    dead_tmp(rd);
3722
    tcg_temp_free_i32(tmp);
3723
    tcg_temp_free_i32(rd);
3740 3724
}
3741 3725

  
3742 3726
static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3743 3727
{
3744 3728
    TCGv rd, tmp;
3745 3729

  
3746
    rd = new_tmp();
3747
    tmp = new_tmp();
3730
    rd = tcg_temp_new_i32();
3731
    tmp = tcg_temp_new_i32();
3748 3732

  
3749 3733
    tcg_gen_shli_i32(rd, t0, 16);
3750 3734
    tcg_gen_andi_i32(tmp, t1, 0xffff);
......
3754 3738
    tcg_gen_or_i32(t1, t1, tmp);
3755 3739
    tcg_gen_mov_i32(t0, rd);
3756 3740

  
3757
    dead_tmp(tmp);
3758
    dead_tmp(rd);
3741
    tcg_temp_free_i32(tmp);
3742
    tcg_temp_free_i32(rd);
3759 3743
}
3760 3744

  
3761 3745

  
......
3804 3788
    rn = (insn >> 16) & 0xf;
3805 3789
    rm = insn & 0xf;
3806 3790
    load = (insn & (1 << 21)) != 0;
3807
    addr = new_tmp();
3791
    addr = tcg_temp_new_i32();
3808 3792
    if ((insn & (1 << 23)) == 0) {
3809 3793
        /* Load store all elements.  */
3810 3794
        op = (insn >> 8) & 0xf;
......
3856 3840
                            tcg_gen_addi_i32(addr, addr, stride);
3857 3841
                            tcg_gen_shli_i32(tmp2, tmp2, 16);
3858 3842
                            tcg_gen_or_i32(tmp, tmp, tmp2);
3859
                            dead_tmp(tmp2);
3843
                            tcg_temp_free_i32(tmp2);
3860 3844
                            neon_store_reg(rd, pass, tmp);
3861 3845
                        } else {
3862 3846
                            tmp = neon_load_reg(rd, pass);
3863
                            tmp2 = new_tmp();
3847
                            tmp2 = tcg_temp_new_i32();
3864 3848
                            tcg_gen_shri_i32(tmp2, tmp, 16);
3865 3849
                            gen_st16(tmp, addr, IS_USER(s));
3866 3850
                            tcg_gen_addi_i32(addr, addr, stride);
......
3878 3862
                                } else {
3879 3863
                                    tcg_gen_shli_i32(tmp, tmp, n * 8);
3880 3864
                                    tcg_gen_or_i32(tmp2, tmp2, tmp);
3881
                                    dead_tmp(tmp);
3865
                                    tcg_temp_free_i32(tmp);
3882 3866
                                }
3883 3867
                            }
3884 3868
                            neon_store_reg(rd, pass, tmp2);
3885 3869
                        } else {
3886 3870
                            tmp2 = neon_load_reg(rd, pass);
3887 3871
                            for (n = 0; n < 4; n++) {
3888
                                tmp = new_tmp();
3872
                                tmp = tcg_temp_new_i32();
3889 3873
                                if (n == 0) {
3890 3874
                                    tcg_gen_mov_i32(tmp, tmp2);
3891 3875
                                } else {
......
3894 3878
                                gen_st8(tmp, addr, IS_USER(s));
3895 3879
                                tcg_gen_addi_i32(addr, addr, stride);
3896 3880
                            }
3897
                            dead_tmp(tmp2);
3881
                            tcg_temp_free_i32(tmp2);
3898 3882
                        }
3899 3883
                    }
3900 3884
                }
......
3931 3915
                    abort();
3932 3916
                }
3933 3917
                tcg_gen_addi_i32(addr, addr, 1 << size);
3934
                tmp2 = new_tmp();
3918
                tmp2 = tcg_temp_new_i32();
3935 3919
                tcg_gen_mov_i32(tmp2, tmp);
3936 3920
                neon_store_reg(rd, 0, tmp2);
3937 3921
                neon_store_reg(rd, 1, tmp);
......
3977 3961
                    if (size != 2) {
3978 3962
                        tmp2 = neon_load_reg(rd, pass);
3979 3963
                        gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3980
                        dead_tmp(tmp2);
3964
                        tcg_temp_free_i32(tmp2);
3981 3965
                    }
3982 3966
                    neon_store_reg(rd, pass, tmp);
3983 3967
                } else { /* Store */
......
4002 3986
            stride = nregs * (1 << size);
4003 3987
        }
4004 3988
    }
4005
    dead_tmp(addr);
3989
    tcg_temp_free_i32(addr);
4006 3990
    if (rm != 15) {
4007 3991
        TCGv base;
4008 3992

  
......
4013 3997
            TCGv index;
4014 3998
            index = load_reg(s, rm);
4015 3999
            tcg_gen_add_i32(base, base, index);
4016
            dead_tmp(index);
4000
            tcg_temp_free_i32(index);
4017 4001
        }
4018 4002
        store_reg(s, rn, base);
4019 4003
    }
......
4119 4103
        default: abort();
4120 4104
        }
4121 4105
    }
4122
    dead_tmp(src);
4106
    tcg_temp_free_i32(src);
4123 4107
}
4124 4108

  
4125 4109
static inline void gen_neon_addl(int size)
......
4184 4168
    /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4185 4169
       Don't forget to clean them now.  */
4186 4170
    if (size < 2) {
4187
      dead_tmp(a);
4188
      dead_tmp(b);
4171
        tcg_temp_free_i32(a);
4172
        tcg_temp_free_i32(b);
4189 4173
    }
4190 4174
}
4191 4175

  
......
4389 4373
            case 5: /* VBSL */
4390 4374
                tmp3 = neon_load_reg(rd, pass);
4391 4375
                gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4392
                dead_tmp(tmp3);
4376
                tcg_temp_free_i32(tmp3);
4393 4377
                break;
4394 4378
            case 6: /* VBIT */
4395 4379
                tmp3 = neon_load_reg(rd, pass);
4396 4380
                gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4397
                dead_tmp(tmp3);
4381
                tcg_temp_free_i32(tmp3);
4398 4382
                break;
4399 4383
            case 7: /* VBIF */
4400 4384
                tmp3 = neon_load_reg(rd, pass);
4401 4385
                gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4402
                dead_tmp(tmp3);
4386
                tcg_temp_free_i32(tmp3);
4403 4387
                break;
4404 4388
            }
4405 4389
            break;
......
4438 4422
            break;
4439 4423
        case 15: /* VABA */
4440 4424
            GEN_NEON_INTEGER_OP(abd);
4441
            dead_tmp(tmp2);
4425
            tcg_temp_free_i32(tmp2);
4442 4426
            tmp2 = neon_load_reg(rd, pass);
4443 4427
            gen_neon_add(size, tmp, tmp2);
4444 4428
            break;
......
4479 4463
            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4480 4464
            default: return 1;
4481 4465
            }
4482
            dead_tmp(tmp2);
4466
            tcg_temp_free_i32(tmp2);
4483 4467
            tmp2 = neon_load_reg(rd, pass);
4484 4468
            if (u) { /* VMLS */
4485 4469
                gen_neon_rsb(size, tmp, tmp2);
......
4551 4535
        case 27: /* Float multiply.  */
4552 4536
            gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4553 4537
            if (!u) {
4554
                dead_tmp(tmp2);
4538
                tcg_temp_free_i32(tmp2);
4555 4539
                tmp2 = neon_load_reg(rd, pass);
4556 4540
                if (size == 0) {
4557 4541
                    gen_helper_neon_add_f32(tmp, tmp, tmp2);
......
4593 4577
        default:
4594 4578
            abort();
4595 4579
        }
4596
        dead_tmp(tmp2);
4580
        tcg_temp_free_i32(tmp2);
4597 4581

  
4598 4582
        /* Save the result.  For elementwise operations we can put it
4599 4583
           straight into the destination register.  For pairwise operations
......
4726 4710
                    } else { /* size < 3 */
4727 4711
                        /* Operands in T0 and T1.  */
4728 4712
                        tmp = neon_load_reg(rm, pass);
4729
                        tmp2 = new_tmp();
4713
                        tmp2 = tcg_temp_new_i32();
4730 4714
                        tcg_gen_movi_i32(tmp2, imm);
4731 4715
                        switch (op) {
4732 4716
                        case 0:  /* VSHR */
......
4775 4759
                            GEN_NEON_INTEGER_OP_ENV(qshl);
4776 4760
                            break;
4777 4761
                        }
4778
                        dead_tmp(tmp2);
4762
                        tcg_temp_free_i32(tmp2);
4779 4763

  
4780 4764
                        if (op == 1 || op == 3) {
4781 4765
                            /* Accumulate.  */
4782 4766
                            tmp2 = neon_load_reg(rd, pass);
4783 4767
                            gen_neon_add(size, tmp, tmp2);
4784
                            dead_tmp(tmp2);
4768
                            tcg_temp_free_i32(tmp2);
4785 4769
                        } else if (op == 4 || (op == 5 && u)) {
4786 4770
                            /* Insert */
4787 4771
                            switch (size) {
......
4817 4801
                            tcg_gen_andi_i32(tmp, tmp, mask);
4818 4802
                            tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4819 4803
                            tcg_gen_or_i32(tmp, tmp, tmp2);
4820
                            dead_tmp(tmp2);
4804
                            tcg_temp_free_i32(tmp2);
4821 4805
                        }
4822 4806
                        neon_store_reg(rd, pass, tmp);
4823 4807
                    }
......
4853 4837
                                gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4854 4838
                            }
4855 4839
                        }
4856
                        tmp = new_tmp();
4840
                        tmp = tcg_temp_new_i32();
4857 4841
                        gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4858 4842
                        neon_store_reg(rd, pass, tmp);
4859 4843
                    } /* for pass */
......
4885 4869
                        gen_neon_shift_narrow(size, tmp3, tmp2, q,
4886 4870
                                              input_unsigned);
4887 4871
                        tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4888
                        dead_tmp(tmp);
4889
                        dead_tmp(tmp3);
4890
                        tmp = new_tmp();
4872
                        tcg_temp_free_i32(tmp);
4873
                        tcg_temp_free_i32(tmp3);
4874
                        tmp = tcg_temp_new_i32();
4891 4875
                        gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4892 4876
                        neon_store_reg(rd, pass, tmp);
4893 4877
                    } /* for pass */
......
5016 5000
                    }
5017 5001
                } else {
5018 5002
                    /* VMOV, VMVN.  */
5019
                    tmp = new_tmp();
5003
                    tmp = tcg_temp_new_i32();
5020 5004
                    if (op == 14 && invert) {
5021 5005
                        uint32_t val;
5022 5006
                        val = 0;
......
5133 5117
                            break;
5134 5118
                        default: abort();
5135 5119
                        }
5136
                        dead_tmp(tmp2);
5137
                        dead_tmp(tmp);
5120
                        tcg_temp_free_i32(tmp2);
5121
                        tcg_temp_free_i32(tmp);
5138 5122
                        break;
5139 5123
                    case 8: case 9: case 10: case 11: case 12: case 13:
5140 5124
                        /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
......
5142 5126
                        break;
5143 5127
                    case 14: /* Polynomial VMULL */
5144 5128
                        gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5145
                        dead_tmp(tmp2);
5146
                        dead_tmp(tmp);
5129
                        tcg_temp_free_i32(tmp2);
5130
                        tcg_temp_free_i32(tmp);
5147 5131
                        break;
5148 5132
                    default: /* 15 is RESERVED.  */
5149 5133
                        return 1;
......
5175 5159
                        neon_store_reg64(cpu_V0, rd + pass);
5176 5160
                    } else if (op == 4 || op == 6) {
5177 5161
                        /* Narrowing operation.  */
5178
                        tmp = new_tmp();
5162
                        tmp = tcg_temp_new_i32();
5179 5163
                        if (!u) {
5180 5164
                            switch (size) {
5181 5165
                            case 0:
......
5255 5239
                            default: return 1;
5256 5240
                            }
5257 5241
                        }
5258
                        dead_tmp(tmp2);
5242
                        tcg_temp_free_i32(tmp2);
5259 5243
                        if (op < 8) {
5260 5244
                            /* Accumulate.  */
5261 5245
                            tmp2 = neon_load_reg(rd, pass);
......
5275 5259
                            default:
5276 5260
                                abort();
5277 5261
                            }
5278
                            dead_tmp(tmp2);
5262
                            tcg_temp_free_i32(tmp2);
5279 5263
                        }
5280 5264
                        neon_store_reg(rd, pass, tmp);
5281 5265
                    }
......
5292 5276
                    tmp2 = neon_get_scalar(size, rm);
5293 5277
                    /* We need a copy of tmp2 because gen_neon_mull
5294 5278
                     * deletes it during pass 0.  */
5295
                    tmp4 = new_tmp();
5279
                    tmp4 = tcg_temp_new_i32();
5296 5280
                    tcg_gen_mov_i32(tmp4, tmp2);
5297 5281
                    tmp3 = neon_load_reg(rn, 1);
5298 5282

  
......
5472 5456
                    TCGV_UNUSED(tmp2);
5473 5457
                    for (pass = 0; pass < 2; pass++) {
5474 5458
                        neon_load_reg64(cpu_V0, rm + pass);
5475
                        tmp = new_tmp();
5459
                        tmp = tcg_temp_new_i32();
5476 5460
                        gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5477 5461
                        if (pass == 0) {
5478 5462
                            tmp2 = tmp;
......
5498 5482
                case 44: /* VCVT.F16.F32 */
5499 5483
                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5500 5484
                      return 1;
5501
                    tmp = new_tmp();
5502
                    tmp2 = new_tmp();
5485
                    tmp = tcg_temp_new_i32();
5486
                    tmp2 = tcg_temp_new_i32();
5503 5487
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5504 5488
                    gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5505 5489
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
......
5510 5494
                    gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5511 5495
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5512 5496
                    neon_store_reg(rd, 0, tmp2);
5513
                    tmp2 = new_tmp();
5497
                    tmp2 = tcg_temp_new_i32();
5514 5498
                    gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5515 5499
                    tcg_gen_shli_i32(tmp2, tmp2, 16);
5516 5500
                    tcg_gen_or_i32(tmp2, tmp2, tmp);
5517 5501
                    neon_store_reg(rd, 1, tmp2);
5518
                    dead_tmp(tmp);
5502
                    tcg_temp_free_i32(tmp);
5519 5503
                    break;
5520 5504
                case 46: /* VCVT.F32.F16 */
5521 5505
                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5522 5506
                      return 1;
5523
                    tmp3 = new_tmp();
5507
                    tmp3 = tcg_temp_new_i32();
5524 5508
                    tmp = neon_load_reg(rm, 0);
5525 5509
                    tmp2 = neon_load_reg(rm, 1);
5526 5510
                    tcg_gen_ext16u_i32(tmp3, tmp);
......
5529 5513
                    tcg_gen_shri_i32(tmp3, tmp, 16);
5530 5514
                    gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5531 5515
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5532
                    dead_tmp(tmp);
5516
                    tcg_temp_free_i32(tmp);
5533 5517
                    tcg_gen_ext16u_i32(tmp3, tmp2);
5534 5518
                    gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5535 5519
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5536 5520
                    tcg_gen_shri_i32(tmp3, tmp2, 16);
5537 5521
                    gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5538 5522
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5539
                    dead_tmp(tmp2);
5540
                    dead_tmp(tmp3);
5523
                    tcg_temp_free_i32(tmp2);
5524
                    tcg_temp_free_i32(tmp3);
5541 5525
                    break;
5542 5526
                default:
5543 5527
                elementwise:
......
5735 5719
                if (insn & (1 << 6)) {
5736 5720
                    tmp = neon_load_reg(rd, 0);
5737 5721
                } else {
5738
                    tmp = new_tmp();
5722
                    tmp = tcg_temp_new_i32();
5739 5723
                    tcg_gen_movi_i32(tmp, 0);
5740 5724
                }
5741 5725
                tmp2 = neon_load_reg(rm, 0);
5742 5726
                tmp4 = tcg_const_i32(rn);
5743 5727
                tmp5 = tcg_const_i32(n);
5744 5728
                gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5745
                dead_tmp(tmp);
5729
                tcg_temp_free_i32(tmp);
5746 5730
                if (insn & (1 << 6)) {
5747 5731
                    tmp = neon_load_reg(rd, 1);
5748 5732
                } else {
5749
                    tmp = new_tmp();
5733
                    tmp = tcg_temp_new_i32();
5750 5734
                    tcg_gen_movi_i32(tmp, 0);
5751 5735
                }
5752 5736
                tmp3 = neon_load_reg(rm, 1);
......
5755 5739
                tcg_temp_free_i32(tmp4);
5756 5740
                neon_store_reg(rd, 0, tmp2);
5757 5741
                neon_store_reg(rd, 1, tmp3);
5758
                dead_tmp(tmp);
5742
                tcg_temp_free_i32(tmp);
5759 5743
            } else if ((insn & 0x380) == 0) {
5760 5744
                /* VDUP */
5761 5745
                if (insn & (1 << 19)) {
......
5772 5756
                        gen_neon_dup_low16(tmp);
5773 5757
                }
5774 5758
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
5775
                    tmp2 = new_tmp();
5759
                    tmp2 = tcg_temp_new_i32();
5776 5760
                    tcg_gen_mov_i32(tmp2, tmp);
5777 5761
                    neon_store_reg(rd, pass, tmp2);
5778 5762
                }
5779
                dead_tmp(tmp);
5763
                tcg_temp_free_i32(tmp);
5780 5764
            } else {
5781 5765
                return 1;
5782 5766
            }
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff