Revision b26eefb6

b/target-arm/cpu.h
89 89
    uint32_t NZF; /* N is bit 31. Z is computed from NZF */
90 90
    uint32_t QF; /* 0 or 1 */
91 91
    uint32_t GE; /* cpsr[19:16] */
92
    int thumb; /* cprs[5]. 0 = arm mode, 1 = thumb mode. */
92
    uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
93 93
    uint32_t condexec_bits; /* IT bits.  cpsr[15:10,26:25].  */
94 94

  
95 95
    /* System control coprocessor (cp15) */
......
207 207
} CPUARMState;
208 208

  
209 209
CPUARMState *cpu_arm_init(const char *cpu_model);
210
void arm_translate_init(void);
210 211
int cpu_arm_exec(CPUARMState *s);
211 212
void cpu_arm_close(CPUARMState *s);
212 213
void do_interrupt(CPUARMState *);
b/target-arm/helper.c
5 5
#include "cpu.h"
6 6
#include "exec-all.h"
7 7
#include "gdbstub.h"
8
#include "helpers.h"
8 9

  
9 10
static uint32_t cortexa8_cp15_c0_c1[8] =
10 11
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
......
174 175
{
175 176
    CPUARMState *env;
176 177
    uint32_t id;
178
    static int inited = 0;
177 179

  
178 180
    id = cpu_arm_find_by_name(cpu_model);
179 181
    if (id == 0)
......
182 184
    if (!env)
183 185
        return NULL;
184 186
    cpu_exec_init(env);
187
    if (!inited) {
188
        inited = 1;
189
        arm_translate_init();
190
    }
191

  
185 192
    env->cpu_model_str = cpu_model;
186 193
    env->cp15.c0_cpuid = id;
187 194
    cpu_reset(env);
......
315 322
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
316 323
}
317 324

  
325
#define HELPER(x) helper_##x
326
/* Sign/zero extend */
327
uint32_t HELPER(sxtb16)(uint32_t x)
328
{
329
    uint32_t res;
330
    res = (uint16_t)(int8_t)x;
331
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
332
    return res;
333
}
334

  
335
uint32_t HELPER(uxtb16)(uint32_t x)
336
{
337
    uint32_t res;
338
    res = (uint16_t)(uint8_t)x;
339
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
340
    return res;
341
}
342

  
318 343
#if defined(CONFIG_USER_ONLY)
319 344

  
320 345
void do_interrupt (CPUState *env)
......
1861 1886
}
1862 1887

  
1863 1888
#endif
1889

  
b/target-arm/helpers.h
1
#ifndef DEF_HELPER
2
#define DEF_HELPER(name, ret, args) ret helper_##name args;
3
#endif
4

  
5
DEF_HELPER(sxtb16, uint32_t, (uint32_t))
6
DEF_HELPER(uxtb16, uint32_t, (uint32_t))
b/target-arm/op.c
20 20
 */
21 21
#include "exec.h"
22 22

  
23
#define REGNAME r0
24
#define REG (env->regs[0])
25
#include "op_template.h"
26

  
27
#define REGNAME r1
28
#define REG (env->regs[1])
29
#include "op_template.h"
30

  
31
#define REGNAME r2
32
#define REG (env->regs[2])
33
#include "op_template.h"
34

  
35
#define REGNAME r3
36
#define REG (env->regs[3])
37
#include "op_template.h"
38

  
39
#define REGNAME r4
40
#define REG (env->regs[4])
41
#include "op_template.h"
42

  
43
#define REGNAME r5
44
#define REG (env->regs[5])
45
#include "op_template.h"
46

  
47
#define REGNAME r6
48
#define REG (env->regs[6])
49
#include "op_template.h"
50

  
51
#define REGNAME r7
52
#define REG (env->regs[7])
53
#include "op_template.h"
54

  
55
#define REGNAME r8
56
#define REG (env->regs[8])
57
#include "op_template.h"
58

  
59
#define REGNAME r9
60
#define REG (env->regs[9])
61
#include "op_template.h"
62

  
63
#define REGNAME r10
64
#define REG (env->regs[10])
65
#include "op_template.h"
66

  
67
#define REGNAME r11
68
#define REG (env->regs[11])
69
#include "op_template.h"
70

  
71
#define REGNAME r12
72
#define REG (env->regs[12])
73
#include "op_template.h"
74

  
75
#define REGNAME r13
76
#define REG (env->regs[13])
77
#include "op_template.h"
78

  
79
#define REGNAME r14
80
#define REG (env->regs[14])
81
#include "op_template.h"
82

  
83
#define REGNAME r15
84
#define REG (env->regs[15])
85
#define SET_REG(x) REG = x & ~(uint32_t)1
86
#include "op_template.h"
87

  
88
void OPPROTO op_bx_T0(void)
89
{
90
  env->regs[15] = T0 & ~(uint32_t)1;
91
  env->thumb = (T0 & 1) != 0;
92
}
93

  
94
void OPPROTO op_movl_T0_0(void)
95
{
96
    T0 = 0;
97
}
98

  
99
void OPPROTO op_movl_T0_im(void)
100
{
101
    T0 = PARAM1;
102
}
103

  
104
void OPPROTO op_movl_T1_im(void)
105
{
106
    T1 = PARAM1;
107
}
108

  
109
void OPPROTO op_mov_CF_T1(void)
110
{
111
    env->CF = ((uint32_t)T1) >> 31;
112
}
113

  
114
void OPPROTO op_movl_T2_im(void)
115
{
116
    T2 = PARAM1;
117
}
118

  
119
void OPPROTO op_addl_T1_im(void)
120
{
121
    T1 += PARAM1;
122
}
123

  
124
void OPPROTO op_addl_T1_T2(void)
125
{
126
    T1 += T2;
127
}
128

  
129
void OPPROTO op_subl_T1_T2(void)
130
{
131
    T1 -= T2;
132
}
133

  
134
void OPPROTO op_addl_T0_T1(void)
135
{
136
    T0 += T1;
137
}
138

  
139 23
void OPPROTO op_addl_T0_T1_cc(void)
140 24
{
141 25
    unsigned int src1;
......
146 30
    env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
147 31
}
148 32

  
149
void OPPROTO op_adcl_T0_T1(void)
150
{
151
    T0 += T1 + env->CF;
152
}
153

  
154 33
void OPPROTO op_adcl_T0_T1_cc(void)
155 34
{
156 35
    unsigned int src1;
......
169 48

  
170 49
#define OPSUB(sub, sbc, res, T0, T1)            \
171 50
                                                \
172
void OPPROTO op_ ## sub ## l_T0_T1(void)        \
173
{                                               \
174
    res = T0 - T1;                              \
175
}                                               \
176
                                                \
177 51
void OPPROTO op_ ## sub ## l_T0_T1_cc(void)     \
178 52
{                                               \
179 53
    unsigned int src1;                          \
......
211 85

  
212 86
OPSUB(rsb, rsc, T0, T1, T0)
213 87

  
214
void OPPROTO op_andl_T0_T1(void)
215
{
216
    T0 &= T1;
217
}
218

  
219
void OPPROTO op_xorl_T0_T1(void)
220
{
221
    T0 ^= T1;
222
}
223

  
224
void OPPROTO op_orl_T0_T1(void)
225
{
226
    T0 |= T1;
227
}
228

  
229
void OPPROTO op_bicl_T0_T1(void)
230
{
231
    T0 &= ~T1;
232
}
233

  
234
void OPPROTO op_notl_T0(void)
235
{
236
    T0 = ~T0;
237
}
238

  
239
void OPPROTO op_notl_T1(void)
240
{
241
    T1 = ~T1;
242
}
243

  
244
void OPPROTO op_logic_T0_cc(void)
245
{
246
    env->NZF = T0;
247
}
248

  
249
void OPPROTO op_logic_T1_cc(void)
250
{
251
    env->NZF = T1;
252
}
253

  
254 88
#define EIP (env->regs[15])
255 89

  
256 90
void OPPROTO op_test_eq(void)
......
485 319

  
486 320
/* shifts */
487 321

  
488
/* Used by NEON.  */
489
void OPPROTO op_shll_T0_im(void)
490
{
491
    T1 = T1 << PARAM1;
492
}
493

  
494
/* T1 based */
495

  
496
void OPPROTO op_shll_T1_im(void)
497
{
498
    T1 = T1 << PARAM1;
499
}
500

  
501
void OPPROTO op_shrl_T1_im(void)
502
{
503
    T1 = (uint32_t)T1 >> PARAM1;
504
}
505

  
506
void OPPROTO op_shrl_T1_0(void)
507
{
508
    T1 = 0;
509
}
510

  
511
void OPPROTO op_sarl_T1_im(void)
512
{
513
    T1 = (int32_t)T1 >> PARAM1;
514
}
515

  
516
void OPPROTO op_sarl_T1_0(void)
517
{
518
    T1 = (int32_t)T1 >> 31;
519
}
520

  
521
void OPPROTO op_rorl_T1_im(void)
522
{
523
    int shift;
524
    shift = PARAM1;
525
    T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
526
}
527

  
528
void OPPROTO op_rrxl_T1(void)
529
{
530
    T1 = ((uint32_t)T1 >> 1) | ((uint32_t)env->CF << 31);
531
}
532

  
533 322
/* T1 based, set C flag */
534 323
void OPPROTO op_shll_T1_im_cc(void)
535 324
{
......
577 366
    env->CF = c;
578 367
}
579 368

  
580
/* T2 based */
581
void OPPROTO op_shll_T2_im(void)
582
{
583
    T2 = T2 << PARAM1;
584
}
585

  
586
void OPPROTO op_shrl_T2_im(void)
587
{
588
    T2 = (uint32_t)T2 >> PARAM1;
589
}
590

  
591
void OPPROTO op_shrl_T2_0(void)
592
{
593
    T2 = 0;
594
}
595

  
596
void OPPROTO op_sarl_T2_im(void)
597
{
598
    T2 = (int32_t)T2 >> PARAM1;
599
}
600

  
601
void OPPROTO op_sarl_T2_0(void)
602
{
603
    T2 = (int32_t)T2 >> 31;
604
}
605

  
606
void OPPROTO op_rorl_T2_im(void)
607
{
608
    int shift;
609
    shift = PARAM1;
610
    T2 = ((uint32_t)T2 >> shift) | (T2 << (32 - shift));
611
}
612

  
613
void OPPROTO op_rrxl_T2(void)
614
{
615
    T2 = ((uint32_t)T2 >> 1) | ((uint32_t)env->CF << 31);
616
}
617

  
618 369
/* T1 based, use T0 as shift count */
619 370

  
620 371
void OPPROTO op_shll_T1_T0(void)
......
733 484
    FORCE_RET();
734 485
}
735 486

  
736
void OPPROTO op_sarl_T0_im(void)
737
{
738
    T0 = (int32_t)T0 >> PARAM1;
739
}
740

  
741
/* Sign/zero extend */
742
void OPPROTO op_sxth_T0(void)
743
{
744
  T0 = (int16_t)T0;
745
}
746

  
747
void OPPROTO op_sxth_T1(void)
748
{
749
  T1 = (int16_t)T1;
750
}
751

  
752
void OPPROTO op_sxtb_T1(void)
753
{
754
    T1 = (int8_t)T1;
755
}
756

  
757
void OPPROTO op_uxtb_T1(void)
758
{
759
    T1 = (uint8_t)T1;
760
}
761

  
762
void OPPROTO op_uxth_T1(void)
763
{
764
    T1 = (uint16_t)T1;
765
}
766

  
767
void OPPROTO op_sxtb16_T1(void)
768
{
769
    uint32_t res;
770
    res = (uint16_t)(int8_t)T1;
771
    res |= (uint32_t)(int8_t)(T1 >> 16) << 16;
772
    T1 = res;
773
}
774

  
775
void OPPROTO op_uxtb16_T1(void)
776
{
777
    uint32_t res;
778
    res = (uint16_t)(uint8_t)T1;
779
    res |= (uint32_t)(uint8_t)(T1 >> 16) << 16;
780
    T1 = res;
781
}
782

  
783 487
#define SIGNBIT (uint32_t)0x80000000
784 488
/* saturating arithmetic  */
785 489
void OPPROTO op_addl_T0_T1_setq(void)
......
1369 1073
    FORCE_RET();
1370 1074
}
1371 1075

  
1372
void OPPROTO op_movl_T0_T1(void)
1373
{
1374
    T0 = T1;
1375
}
1376

  
1377
void OPPROTO op_movl_T0_T2(void)
1378
{
1379
    T0 = T2;
1380
}
1381

  
1382
void OPPROTO op_movl_T1_T0(void)
1383
{
1384
    T1 = T0;
1385
}
1386

  
1387
void OPPROTO op_movl_T1_T2(void)
1388
{
1389
    T1 = T2;
1390
}
1391

  
1392
void OPPROTO op_movl_T2_T0(void)
1393
{
1394
    T2 = T0;
1395
}
1396

  
1397 1076
/* ARMv6 Media instructions.  */
1398 1077

  
1399 1078
/* Note that signed overflow is undefined in C.  The following routines are
......
1769 1448
}
1770 1449

  
1771 1450
/* Dual 16-bit add.  */
1772
void OPPROTO op_add16_T1_T2(void)
1773
{
1774
    uint32_t mask;
1775
    mask = (T0 & T1) & 0x8000;
1776
    T0 ^= ~0x8000;
1777
    T1 ^= ~0x8000;
1778
    T0 = (T0 + T1) ^ mask;
1779
}
1780

  
1781 1451
static inline uint8_t do_usad(uint8_t a, uint8_t b)
1782 1452
{
1783 1453
    if (a > b)
/dev/null
1
/*
2
 *  ARM micro operations (templates for various register related
3
 *  operations)
4
 *
5
 *  Copyright (c) 2003 Fabrice Bellard
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
 */
21

  
22
#ifndef SET_REG
23
#define SET_REG(x) REG = x
24
#endif
25

  
26
void OPPROTO glue(op_movl_T0_, REGNAME)(void)
27
{
28
    T0 = REG;
29
}
30

  
31
void OPPROTO glue(op_movl_T1_, REGNAME)(void)
32
{
33
    T1 = REG;
34
}
35

  
36
void OPPROTO glue(op_movl_T2_, REGNAME)(void)
37
{
38
    T2 = REG;
39
}
40

  
41
void OPPROTO glue(glue(op_movl_, REGNAME), _T0)(void)
42
{
43
    SET_REG (T0);
44
}
45

  
46
void OPPROTO glue(glue(op_movl_, REGNAME), _T1)(void)
47
{
48
    SET_REG (T1);
49
}
50

  
51
#undef REG
52
#undef REGNAME
53
#undef SET_REG
b/target-arm/translate.c
29 29
#include "exec-all.h"
30 30
#include "disas.h"
31 31
#include "tcg-op.h"
32
#include "helpers.h"
32 33

  
33 34
#define ENABLE_ARCH_5J    0
34 35
#define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)
......
73 74
extern FILE *logfile;
74 75
extern int loglevel;
75 76

  
77
static TCGv cpu_env;
78
/* FIXME:  These should be removed.  */
79
static TCGv cpu_T[3];
80

  
81
/* initialize TCG globals.  */
82
void arm_translate_init(void)
83
{
84
    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
85

  
86
    cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
87
    cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
88
    cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2");
89
}
90

  
91
/* The code generator doesn't like lots of temporaries, so maintain our own
92
   cache for reuse within a function.  */
93
#define MAX_TEMPS 8
94
static int num_temps;
95
static TCGv temps[MAX_TEMPS];
96

  
97
/* Allocate a temporary variable.  */
98
static TCGv new_tmp(void)
99
{
100
    TCGv tmp;
101
    if (num_temps == MAX_TEMPS)
102
        abort();
103

  
104
    if (GET_TCGV(temps[num_temps]))
105
      return temps[num_temps++];
106

  
107
    tmp = tcg_temp_new(TCG_TYPE_I32);
108
    temps[num_temps++] = tmp;
109
    return tmp;
110
}
111

  
112
/* Release a temporary variable.  */
113
static void dead_tmp(TCGv tmp)
114
{
115
    int i;
116
    num_temps--;
117
    i = num_temps;
118
    if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
119
        return;
120

  
121
    /* Shuffle this temp to the last slot.  */
122
    while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
123
        i--;
124
    while (i < num_temps) {
125
        temps[i] = temps[i + 1];
126
        i++;
127
    }
128
    temps[i] = tmp;
129
}
130

  
131
/* Set a variable to the value of a CPU register.  */
132
static void load_reg_var(DisasContext *s, TCGv var, int reg)
133
{
134
    if (reg == 15) {
135
        uint32_t addr;
136
        /* normaly, since we updated PC, we need only to add one insn */
137
        if (s->thumb)
138
            addr = (long)s->pc + 2;
139
        else
140
            addr = (long)s->pc + 4;
141
        tcg_gen_movi_i32(var, addr);
142
    } else {
143
        tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
144
    }
145
}
146

  
147
/* Create a new temporary and set it to the value of a CPU register.  */
148
static inline TCGv load_reg(DisasContext *s, int reg)
149
{
150
    TCGv tmp = new_tmp();
151
    load_reg_var(s, tmp, reg);
152
    return tmp;
153
}
154

  
155
/* Set a CPU register.  The source must be a temporary and will be
156
   marked as dead.  */
157
static void store_reg(DisasContext *s, int reg, TCGv var)
158
{
159
    if (reg == 15) {
160
        tcg_gen_andi_i32(var, var, ~1);
161
        s->is_jmp = DISAS_JUMP;
162
    }
163
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
164
    dead_tmp(var);
165
}
166

  
167

  
168
/* Basic operations.  */
169
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
170
#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2])
171
#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
172
#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2])
173
#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0])
174
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
175
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
176
#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im)
177

  
178
#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
179
#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
180
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
181
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
182

  
183
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
184
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
185
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
186
#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
187
#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
188
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
189
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
190

  
191
#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
192
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
193
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
194
#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
195
#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
196

  
197
/* Value extensions.  */
198
#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
199
#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
200
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
201
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202

  
203
#define HELPER_ADDR(x) helper_##x
204

  
205
#define gen_sxtb16(var) tcg_gen_helper_1_1(HELPER_ADDR(sxtb16), var, var)
206
#define gen_uxtb16(var) tcg_gen_helper_1_1(HELPER_ADDR(uxtb16), var, var)
207

  
208
/* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
209
    tmp = (t0 ^ t1) & 0x8000;
210
    t0 &= ~0x8000;
211
    t1 &= ~0x8000;
212
    t0 = (t0 + t1) ^ tmp;
213
 */
214

  
215
static void gen_add16(TCGv t0, TCGv t1)
216
{
217
    TCGv tmp = new_tmp();
218
    tcg_gen_xor_i32(tmp, t0, t1);
219
    tcg_gen_andi_i32(tmp, tmp, 0x8000);
220
    tcg_gen_andi_i32(t0, t0, ~0x8000);
221
    tcg_gen_andi_i32(t1, t1, ~0x8000);
222
    tcg_gen_add_i32(t0, t0, t1);
223
    tcg_gen_xor_i32(t0, t0, tmp);
224
    dead_tmp(tmp);
225
    dead_tmp(t1);
226
}
227

  
228
/* Set CF to the top bit of var.  */
229
static void gen_set_CF_bit31(TCGv var)
230
{
231
    TCGv tmp = new_tmp();
232
    tcg_gen_shri_i32(tmp, var, 31);
233
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, CF));
234
    dead_tmp(tmp);
235
}
236

  
237
/* Set N and Z flags from var.  */
238
static inline void gen_logic_CC(TCGv var)
239
{
240
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NZF));
241
}
242

  
243
/* T0 += T1 + CF.  */
244
static void gen_adc_T0_T1(void)
245
{
246
    TCGv tmp = new_tmp();
247
    gen_op_addl_T0_T1();
248
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
249
    tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
250
    dead_tmp(tmp);
251
}
252

  
253
/* FIXME:  Implement this natively.  */
254
static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
255
{
256
    tcg_gen_xori_i32(t0, t1, ~0);
257
}
258

  
259
/* T0 &= ~T1.  Clobbers T1.  */
260
/* FIXME: Implement bic natively.  */
261
static inline void gen_op_bicl_T0_T1(void)
262
{
263
    gen_op_notl_T1();
264
    gen_op_andl_T0_T1();
265
}
266

  
267
/* FIXME:  Implement this natively.  */
268
static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
269
{
270
    TCGv tmp;
271

  
272
    if (i == 0)
273
        return;
274

  
275
    tmp = new_tmp();
276
    tcg_gen_shri_i32(tmp, t1, i);
277
    tcg_gen_shli_i32(t1, t1, 32 - i);
278
    tcg_gen_or_i32(t0, t1, tmp);
279
    dead_tmp(tmp);
280
}
281

  
282
/* Shift by immediate.  Includes special handling for shift == 0.  */
283
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift)
284
{
285
    if (shift != 0) {
286
        switch (shiftop) {
287
        case 0: tcg_gen_shli_i32(var, var, shift); break;
288
        case 1: tcg_gen_shri_i32(var, var, shift); break;
289
        case 2: tcg_gen_sari_i32(var, var, shift); break;
290
        case 3: tcg_gen_rori_i32(var, var, shift); break;
291
        }
292
    } else {
293
        TCGv tmp;
294

  
295
        switch (shiftop) {
296
        case 0: break;
297
        case 1: tcg_gen_movi_i32(var, 0); break;
298
        case 2: tcg_gen_sari_i32(var, var, 31); break;
299
        case 3: /* rrx */
300
            tcg_gen_shri_i32(var, var, 1);
301
            tmp = new_tmp();
302
            tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
303
            tcg_gen_shli_i32(tmp, tmp, 31);
304
            tcg_gen_or_i32(var, var, tmp);
305
            dead_tmp(tmp);
306
            break;
307
        }
308
    }
309
};
310

  
76 311
#define PAS_OP(pfx) {  \
77 312
    gen_op_ ## pfx ## add16_T0_T1, \
78 313
    gen_op_ ## pfx ## addsubx_T0_T1, \
......
154 389
    1, /* mvn */
155 390
};
156 391

  
157
static GenOpFunc1 *gen_shift_T1_im[4] = {
158
    gen_op_shll_T1_im,
159
    gen_op_shrl_T1_im,
160
    gen_op_sarl_T1_im,
161
    gen_op_rorl_T1_im,
162
};
163

  
164
static GenOpFunc *gen_shift_T1_0[4] = {
165
    NULL,
166
    gen_op_shrl_T1_0,
167
    gen_op_sarl_T1_0,
168
    gen_op_rrxl_T1,
169
};
170

  
171
static GenOpFunc1 *gen_shift_T2_im[4] = {
172
    gen_op_shll_T2_im,
173
    gen_op_shrl_T2_im,
174
    gen_op_sarl_T2_im,
175
    gen_op_rorl_T2_im,
176
};
177

  
178
static GenOpFunc *gen_shift_T2_0[4] = {
179
    NULL,
180
    gen_op_shrl_T2_0,
181
    gen_op_sarl_T2_0,
182
    gen_op_rrxl_T2,
183
};
184

  
185 392
static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
186 393
    gen_op_shll_T1_im_cc,
187 394
    gen_op_shrl_T1_im_cc,
......
210 417
    gen_op_rorl_T1_T0_cc,
211 418
};
212 419

  
213
static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
214
    {
215
        gen_op_movl_T0_r0,
216
        gen_op_movl_T0_r1,
217
        gen_op_movl_T0_r2,
218
        gen_op_movl_T0_r3,
219
        gen_op_movl_T0_r4,
220
        gen_op_movl_T0_r5,
221
        gen_op_movl_T0_r6,
222
        gen_op_movl_T0_r7,
223
        gen_op_movl_T0_r8,
224
        gen_op_movl_T0_r9,
225
        gen_op_movl_T0_r10,
226
        gen_op_movl_T0_r11,
227
        gen_op_movl_T0_r12,
228
        gen_op_movl_T0_r13,
229
        gen_op_movl_T0_r14,
230
        gen_op_movl_T0_r15,
231
    },
232
    {
233
        gen_op_movl_T1_r0,
234
        gen_op_movl_T1_r1,
235
        gen_op_movl_T1_r2,
236
        gen_op_movl_T1_r3,
237
        gen_op_movl_T1_r4,
238
        gen_op_movl_T1_r5,
239
        gen_op_movl_T1_r6,
240
        gen_op_movl_T1_r7,
241
        gen_op_movl_T1_r8,
242
        gen_op_movl_T1_r9,
243
        gen_op_movl_T1_r10,
244
        gen_op_movl_T1_r11,
245
        gen_op_movl_T1_r12,
246
        gen_op_movl_T1_r13,
247
        gen_op_movl_T1_r14,
248
        gen_op_movl_T1_r15,
249
    },
250
    {
251
        gen_op_movl_T2_r0,
252
        gen_op_movl_T2_r1,
253
        gen_op_movl_T2_r2,
254
        gen_op_movl_T2_r3,
255
        gen_op_movl_T2_r4,
256
        gen_op_movl_T2_r5,
257
        gen_op_movl_T2_r6,
258
        gen_op_movl_T2_r7,
259
        gen_op_movl_T2_r8,
260
        gen_op_movl_T2_r9,
261
        gen_op_movl_T2_r10,
262
        gen_op_movl_T2_r11,
263
        gen_op_movl_T2_r12,
264
        gen_op_movl_T2_r13,
265
        gen_op_movl_T2_r14,
266
        gen_op_movl_T2_r15,
267
    },
268
};
269

  
270
static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
271
    {
272
        gen_op_movl_r0_T0,
273
        gen_op_movl_r1_T0,
274
        gen_op_movl_r2_T0,
275
        gen_op_movl_r3_T0,
276
        gen_op_movl_r4_T0,
277
        gen_op_movl_r5_T0,
278
        gen_op_movl_r6_T0,
279
        gen_op_movl_r7_T0,
280
        gen_op_movl_r8_T0,
281
        gen_op_movl_r9_T0,
282
        gen_op_movl_r10_T0,
283
        gen_op_movl_r11_T0,
284
        gen_op_movl_r12_T0,
285
        gen_op_movl_r13_T0,
286
        gen_op_movl_r14_T0,
287
        gen_op_movl_r15_T0,
288
    },
289
    {
290
        gen_op_movl_r0_T1,
291
        gen_op_movl_r1_T1,
292
        gen_op_movl_r2_T1,
293
        gen_op_movl_r3_T1,
294
        gen_op_movl_r4_T1,
295
        gen_op_movl_r5_T1,
296
        gen_op_movl_r6_T1,
297
        gen_op_movl_r7_T1,
298
        gen_op_movl_r8_T1,
299
        gen_op_movl_r9_T1,
300
        gen_op_movl_r10_T1,
301
        gen_op_movl_r11_T1,
302
        gen_op_movl_r12_T1,
303
        gen_op_movl_r13_T1,
304
        gen_op_movl_r14_T1,
305
        gen_op_movl_r15_T1,
306
    },
307
};
308

  
309
static GenOpFunc1 *gen_op_movl_TN_im[3] = {
310
    gen_op_movl_T0_im,
311
    gen_op_movl_T1_im,
312
    gen_op_movl_T2_im,
313
};
314

  
315 420
static GenOpFunc1 *gen_shift_T0_im_thumb_cc[3] = {
316 421
    gen_op_shll_T0_im_thumb_cc,
317 422
    gen_op_shrl_T0_im_thumb_cc,
......
324 429
    gen_op_sarl_T0_im_thumb,
325 430
};
326 431

  
432
/* Set PC and thumb state from T0.  Clobbers T0.  */
327 433
static inline void gen_bx(DisasContext *s)
328 434
{
329
  s->is_jmp = DISAS_UPDATE;
330
  gen_op_bx_T0();
331
}
435
    TCGv tmp;
332 436

  
437
    s->is_jmp = DISAS_UPDATE;
438
    tmp = new_tmp();
439
    tcg_gen_andi_i32(tmp, cpu_T[0], 1);
440
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
441
    dead_tmp(tmp);
442
    tcg_gen_andi_i32(cpu_T[0], cpu_T[0], ~1);
443
    tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
444
}
333 445

  
334 446
#if defined(CONFIG_USER_ONLY)
335 447
#define gen_ldst(name, s) gen_op_##name##_raw()
......
343 455
    } while (0)
344 456
#endif
345 457

  
346
static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
347
{
348
    int val;
349

  
350
    if (reg == 15) {
351
        /* normaly, since we updated PC, we need only to add one insn */
352
        if (s->thumb)
353
            val = (long)s->pc + 2;
354
        else
355
            val = (long)s->pc + 4;
356
        gen_op_movl_TN_im[t](val);
357
    } else {
358
        gen_op_movl_TN_reg[t][reg]();
359
    }
360
}
361

  
362 458
static inline void gen_movl_T0_reg(DisasContext *s, int reg)
363 459
{
364
    gen_movl_TN_reg(s, reg, 0);
460
    load_reg_var(s, cpu_T[0], reg);
365 461
}
366 462

  
367 463
static inline void gen_movl_T1_reg(DisasContext *s, int reg)
368 464
{
369
    gen_movl_TN_reg(s, reg, 1);
465
    load_reg_var(s, cpu_T[1], reg);
370 466
}
371 467

  
372 468
static inline void gen_movl_T2_reg(DisasContext *s, int reg)
373 469
{
374
    gen_movl_TN_reg(s, reg, 2);
470
    load_reg_var(s, cpu_T[2], reg);
471
}
472

  
473
static inline void gen_set_pc_T0(void)
474
{
475
    tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
375 476
}
376 477

  
377 478
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
378 479
{
379
    gen_op_movl_reg_TN[t][reg]();
480
    TCGv tmp;
481
    if (reg == 15) {
482
        tmp = new_tmp();
483
        tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
484
    } else {
485
        tmp = cpu_T[t];
486
    }
487
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
380 488
    if (reg == 15) {
489
        dead_tmp(tmp);
381 490
        s->is_jmp = DISAS_JUMP;
382 491
    }
383 492
}
......
403 512
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
404 513
{
405 514
    int val, rm, shift, shiftop;
515
    TCGv offset;
406 516

  
407 517
    if (!(insn & (1 << 25))) {
408 518
        /* immediate */
......
415 525
        /* shift/register */
416 526
        rm = (insn) & 0xf;
417 527
        shift = (insn >> 7) & 0x1f;
418
        gen_movl_T2_reg(s, rm);
419 528
        shiftop = (insn >> 5) & 3;
420
        if (shift != 0) {
421
            gen_shift_T2_im[shiftop](shift);
422
        } else if (shiftop != 0) {
423
            gen_shift_T2_0[shiftop]();
424
        }
529
        offset = load_reg(s, rm);
530
        gen_arm_shift_im(offset, shiftop, shift);
425 531
        if (!(insn & (1 << 23)))
426
            gen_op_subl_T1_T2();
532
            tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
427 533
        else
428
            gen_op_addl_T1_T2();
534
            tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
535
        dead_tmp(offset);
429 536
    }
430 537
}
431 538

  
......
433 540
                                        int extra)
434 541
{
435 542
    int val, rm;
543
    TCGv offset;
436 544

  
437 545
    if (insn & (1 << 22)) {
438 546
        /* immediate */
......
447 555
        if (extra)
448 556
            gen_op_addl_T1_im(extra);
449 557
        rm = (insn) & 0xf;
450
        gen_movl_T2_reg(s, rm);
558
        offset = load_reg(s, rm);
451 559
        if (!(insn & (1 << 23)))
452
            gen_op_subl_T1_T2();
560
            tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
453 561
        else
454
            gen_op_addl_T1_T2();
562
            tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
563
        dead_tmp(offset);
455 564
    }
456 565
}
457 566

  
......
979 1088
        case 3:
980 1089
            return 1;
981 1090
        }
982
        gen_op_movl_reg_TN[0][rd]();
1091
        gen_movl_reg_T0(s, rd);
983 1092
        break;
984 1093
    case 0x117: case 0x517: case 0x917: case 0xd17:	/* TEXTRC */
985 1094
        if ((insn & 0x000ff008) != 0x0003f000)
......
1531 1640
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1532 1641
        switch ((insn >> 16) & 0xf) {
1533 1642
        case 0x0:					/* TMIA */
1534
            gen_op_movl_TN_reg[0][rd0]();
1535
            gen_op_movl_TN_reg[1][rd1]();
1643
            gen_movl_T0_reg(s, rd0);
1644
            gen_movl_T1_reg(s, rd1);
1536 1645
            gen_op_iwmmxt_muladdsl_M0_T0_T1();
1537 1646
            break;
1538 1647
        case 0x8:					/* TMIAPH */
1539
            gen_op_movl_TN_reg[0][rd0]();
1540
            gen_op_movl_TN_reg[1][rd1]();
1648
            gen_movl_T0_reg(s, rd0);
1649
            gen_movl_T1_reg(s, rd1);
1541 1650
            gen_op_iwmmxt_muladdsw_M0_T0_T1();
1542 1651
            break;
1543 1652
        case 0xc: case 0xd: case 0xe: case 0xf:		/* TMIAxy */
1544
            gen_op_movl_TN_reg[1][rd0]();
1653
            gen_movl_T1_reg(s, rd0);
1545 1654
            if (insn & (1 << 16))
1546 1655
                gen_op_shrl_T1_im(16);
1547 1656
            gen_op_movl_T0_T1();
1548
            gen_op_movl_TN_reg[1][rd1]();
1657
            gen_movl_T1_reg(s, rd1);
1549 1658
            if (insn & (1 << 17))
1550 1659
                gen_op_shrl_T1_im(16);
1551 1660
            gen_op_iwmmxt_muladdswl_M0_T0_T1();
......
1580 1689

  
1581 1690
        switch ((insn >> 16) & 0xf) {
1582 1691
        case 0x0:					/* MIA */
1583
            gen_op_movl_TN_reg[0][rd0]();
1584
            gen_op_movl_TN_reg[1][rd1]();
1692
            gen_movl_T0_reg(s, rd0);
1693
            gen_movl_T1_reg(s, rd1);
1585 1694
            gen_op_iwmmxt_muladdsl_M0_T0_T1();
1586 1695
            break;
1587 1696
        case 0x8:					/* MIAPH */
1588
            gen_op_movl_TN_reg[0][rd0]();
1589
            gen_op_movl_TN_reg[1][rd1]();
1697
            gen_movl_T0_reg(s, rd0);
1698
            gen_movl_T1_reg(s, rd1);
1590 1699
            gen_op_iwmmxt_muladdsw_M0_T0_T1();
1591 1700
            break;
1592 1701
        case 0xc:					/* MIABB */
1593 1702
        case 0xd:					/* MIABT */
1594 1703
        case 0xe:					/* MIATB */
1595 1704
        case 0xf:					/* MIATT */
1596
            gen_op_movl_TN_reg[1][rd0]();
1705
            gen_movl_T1_reg(s, rd0);
1597 1706
            if (insn & (1 << 16))
1598 1707
                gen_op_shrl_T1_im(16);
1599 1708
            gen_op_movl_T0_T1();
1600
            gen_op_movl_TN_reg[1][rd1]();
1709
            gen_movl_T1_reg(s, rd1);
1601 1710
            if (insn & (1 << 17))
1602 1711
                gen_op_shrl_T1_im(16);
1603 1712
            gen_op_iwmmxt_muladdswl_M0_T0_T1();
......
1621 1730

  
1622 1731
        if (insn & ARM_CP_RW_BIT) {			/* MRA */
1623 1732
            gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1624
            gen_op_movl_reg_TN[0][rdlo]();
1733
            gen_movl_reg_T0(s, rdlo);
1625 1734
            gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1626 1735
            gen_op_andl_T0_T1();
1627
            gen_op_movl_reg_TN[0][rdhi]();
1736
            gen_movl_reg_T0(s, rdhi);
1628 1737
        } else {					/* MAR */
1629
            gen_op_movl_TN_reg[0][rdlo]();
1630
            gen_op_movl_TN_reg[1][rdhi]();
1738
            gen_movl_T0_reg(s, rdlo);
1739
            gen_movl_T1_reg(s, rdhi);
1631 1740
            gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1632 1741
        }
1633 1742
        return 0;
......
1650 1759
        if (!env->cp[cp].cp_read)
1651 1760
            return 1;
1652 1761
        gen_op_movl_T0_im((uint32_t) s->pc);
1653
        gen_op_movl_reg_TN[0][15]();
1762
        gen_set_pc_T0();
1654 1763
        gen_op_movl_T0_cp(insn);
1655 1764
        gen_movl_reg_T0(s, rd);
1656 1765
    } else {
1657 1766
        if (!env->cp[cp].cp_write)
1658 1767
            return 1;
1659 1768
        gen_op_movl_T0_im((uint32_t) s->pc);
1660
        gen_op_movl_reg_TN[0][15]();
1769
        gen_set_pc_T0();
1661 1770
        gen_movl_T0_reg(s, rd);
1662 1771
        gen_op_movl_cp_T0(insn);
1663 1772
    }
......
1713 1822
        || (insn & 0x0fff0fff) == 0x0e070f58) {
1714 1823
        /* Wait for interrupt.  */
1715 1824
        gen_op_movl_T0_im((long)s->pc);
1716
        gen_op_movl_reg_TN[0][15]();
1825
        gen_set_pc_T0();
1717 1826
        s->is_jmp = DISAS_WFI;
1718 1827
        return 0;
1719 1828
    }
......
1817 1926
                        if (offset)
1818 1927
                            gen_op_shrl_T1_im(offset);
1819 1928
                        if (insn & (1 << 23))
1820
                            gen_op_uxtb_T1();
1929
                            gen_uxtb(cpu_T[1]);
1821 1930
                        else
1822
                            gen_op_sxtb_T1();
1931
                            gen_sxtb(cpu_T[1]);
1823 1932
                        break;
1824 1933
                    case 1:
1825 1934
                        NEON_GET_REG(T1, rn, pass);
......
1827 1936
                            if (offset) {
1828 1937
                                gen_op_shrl_T1_im(16);
1829 1938
                            } else {
1830
                                gen_op_uxth_T1();
1939
                                gen_uxth(cpu_T[1]);
1831 1940
                            }
1832 1941
                        } else {
1833 1942
                            if (offset) {
1834 1943
                                gen_op_sarl_T1_im(16);
1835 1944
                            } else {
1836
                                gen_op_sxth_T1();
1945
                                gen_sxth(cpu_T[1]);
1837 1946
                            }
1838 1947
                        }
1839 1948
                        break;
......
2418 2527
    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2419 2528
        tcg_gen_goto_tb(n);
2420 2529
        gen_op_movl_T0_im(dest);
2421
        gen_op_movl_r15_T0();
2530
        gen_set_pc_T0();
2422 2531
        tcg_gen_exit_tb((long)tb + n);
2423 2532
    } else {
2424 2533
        gen_op_movl_T0_im(dest);
2425
        gen_op_movl_r15_T0();
2534
        gen_set_pc_T0();
2426 2535
        tcg_gen_exit_tb(0);
2427 2536
    }
2428 2537
}
......
2444 2553
static inline void gen_mulxy(int x, int y)
2445 2554
{
2446 2555
    if (x)
2447
        gen_op_sarl_T0_im(16);
2556
        tcg_gen_sari_i32(cpu_T[0], cpu_T[0], 16);
2448 2557
    else
2449
        gen_op_sxth_T0();
2558
        gen_sxth(cpu_T[0]);
2450 2559
    if (y)
2451 2560
        gen_op_sarl_T1_im(16);
2452 2561
    else
2453
        gen_op_sxth_T1();
2562
        gen_sxth(cpu_T[1]);
2454 2563
    gen_op_mul_T0_T1();
2455 2564
}
2456 2565

  
......
2501 2610
/* Generate an old-style exception return.  */
2502 2611
static void gen_exception_return(DisasContext *s)
2503 2612
{
2504
    gen_op_movl_reg_TN[0][15]();
2613
    gen_set_pc_T0();
2505 2614
    gen_op_movl_T0_spsr();
2506 2615
    gen_op_movl_cpsr_T0(0xffffffff);
2507 2616
    s->is_jmp = DISAS_UPDATE;
......
2512 2621
{
2513 2622
    gen_op_movl_cpsr_T0(0xffffffff);
2514 2623
    gen_op_movl_T0_T2();
2515
    gen_op_movl_reg_TN[0][15]();
2624
    gen_set_pc_T0();
2516 2625
    s->is_jmp = DISAS_UPDATE;
2517 2626
}
2518 2627

  
......
2529 2638
    switch (val) {
2530 2639
    case 3: /* wfi */
2531 2640
        gen_op_movl_T0_im((long)s->pc);
2532
        gen_op_movl_reg_TN[0][15]();
2641
        gen_set_pc_T0();
2533 2642
        s->is_jmp = DISAS_WFI;
2534 2643
        break;
2535 2644
    case 2: /* wfe */
......
3011 3120
        }
3012 3121
    }
3013 3122
    if (rm != 15) {
3014
        gen_movl_T1_reg(s, rn);
3123
        TCGv base;
3124

  
3125
        base = load_reg(s, rn);
3015 3126
        if (rm == 13) {
3016
            gen_op_addl_T1_im(stride);
3127
            tcg_gen_addi_i32(base, base, stride);
3017 3128
        } else {
3018
            gen_movl_T2_reg(s, rm);
3019
            gen_op_addl_T1_T2();
3129
            TCGv index;
3130
            index = load_reg(s, rm);
3131
            tcg_gen_add_i32(base, base, index);
3132
            dead_tmp(index);
3020 3133
        }
3021
        gen_movl_reg_T1(s, rn);
3134
        store_reg(s, rn, base);
3022 3135
    }
3023 3136
    return 0;
3024 3137
}
......
4626 4739
static void disas_arm_insn(CPUState * env, DisasContext *s)
4627 4740
{
4628 4741
    unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4742
    TCGv tmp;
4629 4743

  
4630 4744
    insn = ldl_code(s->pc);
4631 4745
    s->pc += 4;
......
4936 5050
        case 7: /* bkpt */
4937 5051
            gen_set_condexec(s);
4938 5052
            gen_op_movl_T0_im((long)s->pc - 4);
4939
            gen_op_movl_reg_TN[0][15]();
5053
            gen_set_pc_T0();
4940 5054
            gen_op_bkpt();
4941 5055
            s->is_jmp = DISAS_JUMP;
4942 5056
            break;
......
4954 5068
                if (sh & 4)
4955 5069
                    gen_op_sarl_T1_im(16);
4956 5070
                else
4957
                    gen_op_sxth_T1();
5071
                    gen_sxth(cpu_T[1]);
4958 5072
                gen_op_imulw_T0_T1();
4959 5073
                if ((sh & 2) == 0) {
4960 5074
                    gen_movl_T1_reg(s, rn);
......
5001 5115
                val = (val >> shift) | (val << (32 - shift));
5002 5116
            gen_op_movl_T1_im(val);
5003 5117
            if (logic_cc && shift)
5004
                gen_op_mov_CF_T1();
5118
                gen_set_CF_bit31(cpu_T[1]);
5005 5119
        } else {
5006 5120
            /* register */
5007 5121
            rm = (insn) & 0xf;
......
5009 5123
            shiftop = (insn >> 5) & 3;
5010 5124
            if (!(insn & (1 << 4))) {
5011 5125
                shift = (insn >> 7) & 0x1f;
5012
                if (shift != 0) {
5013
                    if (logic_cc) {
5126
                if (logic_cc) {
5127
                    if (shift != 0) {
5014 5128
                        gen_shift_T1_im_cc[shiftop](shift);
5015
                    } else {
5016
                        gen_shift_T1_im[shiftop](shift);
5017
                    }
5018
                } else if (shiftop != 0) {
5019
                    if (logic_cc) {
5129
                    } else if (shiftop != 0) {
5020 5130
                        gen_shift_T1_0_cc[shiftop]();
5021
                    } else {
5022
                        gen_shift_T1_0[shiftop]();
5023 5131
                    }
5132
                } else {
5133
                    gen_arm_shift_im(cpu_T[1], shiftop, shift);
5024 5134
                }
5025 5135
            } else {
5026 5136
                rs = (insn >> 8) & 0xf;
......
5083 5193
            if (set_cc)
5084 5194
                gen_op_adcl_T0_T1_cc();
5085 5195
            else
5086
                gen_op_adcl_T0_T1();
5196
                gen_adc_T0_T1();
5087 5197
            gen_movl_reg_T0(s, rd);
5088 5198
            break;
5089 5199
        case 0x06:
......
5389 5499
                            gen_op_rorl_T1_im(shift * 8);
5390 5500
                        op1 = (insn >> 20) & 7;
5391 5501
                        switch (op1) {
5392
                        case 0: gen_op_sxtb16_T1(); break;
5393
                        case 2: gen_op_sxtb_T1();   break;
5394
                        case 3: gen_op_sxth_T1();   break;
5395
                        case 4: gen_op_uxtb16_T1(); break;
5396
                        case 6: gen_op_uxtb_T1();   break;
5397
                        case 7: gen_op_uxth_T1();   break;
5502
                        case 0: gen_sxtb16(cpu_T[1]); break;
5503
                        case 2: gen_sxtb(cpu_T[1]);   break;
5504
                        case 3: gen_sxth(cpu_T[1]);   break;
5505
                        case 4: gen_uxtb16(cpu_T[1]); break;
5506
                        case 6: gen_uxtb(cpu_T[1]);   break;
5507
                        case 7: gen_uxth(cpu_T[1]);   break;
5398 5508
                        default: goto illegal_op;
5399 5509
                        }
5400 5510
                        if (rn != 15) {
5401
                            gen_movl_T2_reg(s, rn);
5511
                            tmp = load_reg(s, rn);
5402 5512
                            if ((op1 & 3) == 0) {
5403
                                gen_op_add16_T1_T2();
5513
                                gen_add16(cpu_T[1], tmp);
5404 5514
                            } else {
5405
                                gen_op_addl_T1_T2();
5515
                                tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
5516
                                dead_tmp(tmp);
5406 5517
                            }
5407 5518
                        }
5408 5519
                        gen_movl_reg_T1(s, rd);
......
5667 5778
                            if (i == 15) {
5668 5779
                                /* special case: r15 = PC + 8 */
5669 5780
                                val = (long)s->pc + 4;
5670
                                gen_op_movl_TN_im[0](val);
5781
                                gen_op_movl_T0_im(val);
5671 5782
                            } else if (user) {
5672 5783
                                gen_op_movl_T0_user(i);
5673 5784
                            } else {
......
5723 5834
                val = (int32_t)s->pc;
5724 5835
                if (insn & (1 << 24)) {
5725 5836
                    gen_op_movl_T0_im(val);
5726
                    gen_op_movl_reg_TN[0][14]();
5837
                    gen_movl_reg_T0(s, 14);
5727 5838
                }
5728 5839
                offset = (((int32_t)insn << 8) >> 8);
5729 5840
                val += (offset << 2) + 4;
......
5740 5851
        case 0xf:
5741 5852
            /* swi */
5742 5853
            gen_op_movl_T0_im((long)s->pc);
5743
            gen_op_movl_reg_TN[0][15]();
5854
            gen_set_pc_T0();
5744 5855
            s->is_jmp = DISAS_SWI;
5745 5856
            break;
5746 5857
        default:
5747 5858
        illegal_op:
5748 5859
            gen_set_condexec(s);
5749 5860
            gen_op_movl_T0_im((long)s->pc - 4);
5750
            gen_op_movl_reg_TN[0][15]();
5861
            gen_set_pc_T0();
5751 5862
            gen_op_undef_insn();
5752 5863
            s->is_jmp = DISAS_JUMP;
5753 5864
            break;
......
5806 5917
        if (conds)
5807 5918
            gen_op_adcl_T0_T1_cc();
5808 5919
        else
5809
            gen_op_adcl_T0_T1();
5920
            gen_adc_T0_T1();
5810 5921
        break;
5811 5922
    case 11: /* sbc */
5812 5923
        if (conds)
......
5832 5943
    if (logic_cc) {
5833 5944
        gen_op_logic_T0_cc();
5834 5945
        if (shifter_out)
5835
            gen_op_mov_CF_T1();
5946
            gen_set_CF_bit31(cpu_T[1]);
5836 5947
    }
5837 5948
    return 0;
5838 5949
}
......
5843 5954
{
5844 5955
    uint32_t insn, imm, shift, offset, addr;
5845 5956
    uint32_t rd, rn, rm, rs;
5957
    TCGv tmp;
5846 5958
    int op;
5847 5959
    int shiftop;
5848 5960
    int conds;
......
5966 6078
                } else {
5967 6079
                    gen_movl_T1_reg(s, rn);
5968 6080
                }
5969
                gen_movl_T2_reg(s, rm);
5970
                gen_op_addl_T1_T2();
6081
                tmp = load_reg(s, rm);
6082
                tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
5971 6083
                if (insn & (1 << 4)) {
5972 6084
                    /* tbh */
5973
                    gen_op_addl_T1_T2();
6085
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6086
                    dead_tmp(tmp);
5974 6087
                    gen_ldst(lduw, s);
5975 6088
                } else { /* tbb */
6089
                    dead_tmp(tmp);
5976 6090
                    gen_ldst(ldub, s);
5977 6091
                }
5978 6092
                gen_op_jmp_T0_im(s->pc);
......
6126 6240
        shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6127 6241
        conds = (insn & (1 << 20)) != 0;
6128 6242
        logic_cc = (conds && thumb2_logic_op(op));
6129
        if (shift != 0) {
6130
            if (logic_cc) {
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff